blob: 9777dc62c53602fd6217ad1ec7ed26d2fe2746d0 [file] [log] [blame]
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 *
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 *
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
24 *
25 */
26
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040027#include <linux/module.h>
Stephen Rothwellb038b042009-11-17 23:04:59 -080028#include <net/ip6_checksum.h>
29
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070030#include "vmxnet3_int.h"
31
32char vmxnet3_driver_name[] = "vmxnet3";
33#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
34
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070035/*
36 * PCI Device ID Table
37 * Last entry must be all 0s
38 */
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000039static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070040 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
41 {0}
42};
43
44MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
45
Shreyas Bhatewara09c50882010-11-19 10:55:24 +000046static int enable_mq = 1;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070047
Shreyas Bhatewaraf9f25022011-01-14 14:59:31 +000048static void
49vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
50
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070051/*
52 * Enable/Disable the given intr
53 */
54static void
55vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
56{
57 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
58}
59
60
61static void
62vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
63{
64 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
65}
66
67
68/*
69 * Enable/Disable all intrs used by the device
70 */
71static void
72vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
73{
74 int i;
75
76 for (i = 0; i < adapter->intr.num_intrs; i++)
77 vmxnet3_enable_intr(adapter, i);
Ronghua Zang6929fe82010-07-15 22:18:47 -070078 adapter->shared->devRead.intrConf.intrCtrl &=
79 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070080}
81
82
83static void
84vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
85{
86 int i;
87
Ronghua Zang6929fe82010-07-15 22:18:47 -070088 adapter->shared->devRead.intrConf.intrCtrl |=
89 cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070090 for (i = 0; i < adapter->intr.num_intrs; i++)
91 vmxnet3_disable_intr(adapter, i);
92}
93
94
95static void
96vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
97{
98 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
99}
100
101
102static bool
103vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
104{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000105 return tq->stopped;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700106}
107
108
109static void
110vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
111{
112 tq->stopped = false;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000113 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700114}
115
116
117static void
118vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
119{
120 tq->stopped = false;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000121 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700122}
123
124
125static void
126vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
127{
128 tq->stopped = true;
129 tq->num_stop++;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000130 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700131}
132
133
134/*
135 * Check the link state. This may start or stop the tx queue.
136 */
137static void
Shreyas Bhatewara4a1745fc2010-07-15 21:51:14 +0000138vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700139{
140 u32 ret;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000141 int i;
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +0000142 unsigned long flags;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700143
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +0000144 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700145 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
146 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +0000147 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
148
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700149 adapter->link_speed = ret >> 16;
150 if (ret & 1) { /* Link is up. */
Stephen Hemminger204a6e62013-01-15 07:28:30 +0000151 netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
152 adapter->link_speed);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700153 if (!netif_carrier_ok(adapter->netdev))
154 netif_carrier_on(adapter->netdev);
155
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000156 if (affectTxQueue) {
157 for (i = 0; i < adapter->num_tx_queues; i++)
158 vmxnet3_tq_start(&adapter->tx_queue[i],
159 adapter);
160 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700161 } else {
Stephen Hemminger204a6e62013-01-15 07:28:30 +0000162 netdev_info(adapter->netdev, "NIC Link is Down\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700163 if (netif_carrier_ok(adapter->netdev))
164 netif_carrier_off(adapter->netdev);
165
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000166 if (affectTxQueue) {
167 for (i = 0; i < adapter->num_tx_queues; i++)
168 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
169 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700170 }
171}
172
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700173static void
174vmxnet3_process_events(struct vmxnet3_adapter *adapter)
175{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000176 int i;
Roland Dreiere328d412011-05-06 08:32:53 +0000177 unsigned long flags;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000178 u32 events = le32_to_cpu(adapter->shared->ecr);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700179 if (!events)
180 return;
181
182 vmxnet3_ack_events(adapter, events);
183
184 /* Check if link state has changed */
185 if (events & VMXNET3_ECR_LINK)
Shreyas Bhatewara4a1745fc2010-07-15 21:51:14 +0000186 vmxnet3_check_link(adapter, true);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700187
188 /* Check if there is an error on xmit/recv queues */
189 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
Roland Dreiere328d412011-05-06 08:32:53 +0000190 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700191 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
192 VMXNET3_CMD_GET_QUEUE_STATUS);
Roland Dreiere328d412011-05-06 08:32:53 +0000193 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700194
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000195 for (i = 0; i < adapter->num_tx_queues; i++)
196 if (adapter->tqd_start[i].status.stopped)
197 dev_err(&adapter->netdev->dev,
198 "%s: tq[%d] error 0x%x\n",
199 adapter->netdev->name, i, le32_to_cpu(
200 adapter->tqd_start[i].status.error));
201 for (i = 0; i < adapter->num_rx_queues; i++)
202 if (adapter->rqd_start[i].status.stopped)
203 dev_err(&adapter->netdev->dev,
204 "%s: rq[%d] error 0x%x\n",
205 adapter->netdev->name, i,
206 adapter->rqd_start[i].status.error);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700207
208 schedule_work(&adapter->work);
209 }
210}
211
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000212#ifdef __BIG_ENDIAN_BITFIELD
213/*
214 * The device expects the bitfields in shared structures to be written in
215 * little endian. When CPU is big endian, the following routines are used to
216 * correctly read and write into ABI.
217 * The general technique used here is : double word bitfields are defined in
218 * opposite order for big endian architecture. Then before reading them in
219 * driver the complete double word is translated using le32_to_cpu. Similarly
220 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
221 * double words into required format.
222 * In order to avoid touching bits in shared structure more than once, temporary
223 * descriptors are used. These are passed as srcDesc to following functions.
224 */
225static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
226 struct Vmxnet3_RxDesc *dstDesc)
227{
228 u32 *src = (u32 *)srcDesc + 2;
229 u32 *dst = (u32 *)dstDesc + 2;
230 dstDesc->addr = le64_to_cpu(srcDesc->addr);
231 *dst = le32_to_cpu(*src);
232 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
233}
234
235static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
236 struct Vmxnet3_TxDesc *dstDesc)
237{
238 int i;
239 u32 *src = (u32 *)(srcDesc + 1);
240 u32 *dst = (u32 *)(dstDesc + 1);
241
242 /* Working backwards so that the gen bit is set at the end. */
243 for (i = 2; i > 0; i--) {
244 src--;
245 dst--;
246 *dst = cpu_to_le32(*src);
247 }
248}
249
250
251static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
252 struct Vmxnet3_RxCompDesc *dstDesc)
253{
254 int i = 0;
255 u32 *src = (u32 *)srcDesc;
256 u32 *dst = (u32 *)dstDesc;
257 for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
258 *dst = le32_to_cpu(*src);
259 src++;
260 dst++;
261 }
262}
263
264
265/* Used to read bitfield values from double words. */
266static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
267{
268 u32 temp = le32_to_cpu(*bitfield);
269 u32 mask = ((1 << size) - 1) << pos;
270 temp &= mask;
271 temp >>= pos;
272 return temp;
273}
274
275
276
277#endif /* __BIG_ENDIAN_BITFIELD */
278
279#ifdef __BIG_ENDIAN_BITFIELD
280
281# define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
282 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
283 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
284# define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
285 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
286 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
287# define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
288 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
289 VMXNET3_TCD_GEN_SIZE)
290# define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
291 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
292# define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
293 (dstrcd) = (tmp); \
294 vmxnet3_RxCompToCPU((rcd), (tmp)); \
295 } while (0)
296# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
297 (dstrxd) = (tmp); \
298 vmxnet3_RxDescToCPU((rxd), (tmp)); \
299 } while (0)
300
301#else
302
303# define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
304# define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
305# define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
306# define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
307# define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
308# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
309
310#endif /* __BIG_ENDIAN_BITFIELD */
311
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700312
313static void
314vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
315 struct pci_dev *pdev)
316{
317 if (tbi->map_type == VMXNET3_MAP_SINGLE)
318 pci_unmap_single(pdev, tbi->dma_addr, tbi->len,
319 PCI_DMA_TODEVICE);
320 else if (tbi->map_type == VMXNET3_MAP_PAGE)
321 pci_unmap_page(pdev, tbi->dma_addr, tbi->len,
322 PCI_DMA_TODEVICE);
323 else
324 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
325
326 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
327}
328
329
330static int
331vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
332 struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
333{
334 struct sk_buff *skb;
335 int entries = 0;
336
337 /* no out of order completion */
338 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000339 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700340
341 skb = tq->buf_info[eop_idx].skb;
342 BUG_ON(skb == NULL);
343 tq->buf_info[eop_idx].skb = NULL;
344
345 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
346
347 while (tq->tx_ring.next2comp != eop_idx) {
348 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
349 pdev);
350
351 /* update next2comp w/o tx_lock. Since we are marking more,
352 * instead of less, tx ring entries avail, the worst case is
353 * that the tx routine incorrectly re-queues a pkt due to
354 * insufficient tx ring entries.
355 */
356 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
357 entries++;
358 }
359
360 dev_kfree_skb_any(skb);
361 return entries;
362}
363
364
365static int
366vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
367 struct vmxnet3_adapter *adapter)
368{
369 int completed = 0;
370 union Vmxnet3_GenericDesc *gdesc;
371
372 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000373 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
374 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
375 &gdesc->tcd), tq, adapter->pdev,
376 adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700377
378 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
379 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
380 }
381
382 if (completed) {
383 spin_lock(&tq->tx_lock);
384 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
385 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
386 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
387 netif_carrier_ok(adapter->netdev))) {
388 vmxnet3_tq_wake(tq, adapter);
389 }
390 spin_unlock(&tq->tx_lock);
391 }
392 return completed;
393}
394
395
396static void
397vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
398 struct vmxnet3_adapter *adapter)
399{
400 int i;
401
402 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
403 struct vmxnet3_tx_buf_info *tbi;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700404
405 tbi = tq->buf_info + tq->tx_ring.next2comp;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700406
407 vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
408 if (tbi->skb) {
409 dev_kfree_skb_any(tbi->skb);
410 tbi->skb = NULL;
411 }
412 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
413 }
414
415 /* sanity check, verify all buffers are indeed unmapped and freed */
416 for (i = 0; i < tq->tx_ring.size; i++) {
417 BUG_ON(tq->buf_info[i].skb != NULL ||
418 tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
419 }
420
421 tq->tx_ring.gen = VMXNET3_INIT_GEN;
422 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
423
424 tq->comp_ring.gen = VMXNET3_INIT_GEN;
425 tq->comp_ring.next2proc = 0;
426}
427
428
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000429static void
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700430vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
431 struct vmxnet3_adapter *adapter)
432{
433 if (tq->tx_ring.base) {
434 pci_free_consistent(adapter->pdev, tq->tx_ring.size *
435 sizeof(struct Vmxnet3_TxDesc),
436 tq->tx_ring.base, tq->tx_ring.basePA);
437 tq->tx_ring.base = NULL;
438 }
439 if (tq->data_ring.base) {
440 pci_free_consistent(adapter->pdev, tq->data_ring.size *
441 sizeof(struct Vmxnet3_TxDataDesc),
442 tq->data_ring.base, tq->data_ring.basePA);
443 tq->data_ring.base = NULL;
444 }
445 if (tq->comp_ring.base) {
446 pci_free_consistent(adapter->pdev, tq->comp_ring.size *
447 sizeof(struct Vmxnet3_TxCompDesc),
448 tq->comp_ring.base, tq->comp_ring.basePA);
449 tq->comp_ring.base = NULL;
450 }
451 kfree(tq->buf_info);
452 tq->buf_info = NULL;
453}
454
455
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000456/* Destroy all tx queues */
457void
458vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
459{
460 int i;
461
462 for (i = 0; i < adapter->num_tx_queues; i++)
463 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
464}
465
466
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700467static void
468vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
469 struct vmxnet3_adapter *adapter)
470{
471 int i;
472
473 /* reset the tx ring contents to 0 and reset the tx ring states */
474 memset(tq->tx_ring.base, 0, tq->tx_ring.size *
475 sizeof(struct Vmxnet3_TxDesc));
476 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
477 tq->tx_ring.gen = VMXNET3_INIT_GEN;
478
479 memset(tq->data_ring.base, 0, tq->data_ring.size *
480 sizeof(struct Vmxnet3_TxDataDesc));
481
482 /* reset the tx comp ring contents to 0 and reset comp ring states */
483 memset(tq->comp_ring.base, 0, tq->comp_ring.size *
484 sizeof(struct Vmxnet3_TxCompDesc));
485 tq->comp_ring.next2proc = 0;
486 tq->comp_ring.gen = VMXNET3_INIT_GEN;
487
488 /* reset the bookkeeping data */
489 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
490 for (i = 0; i < tq->tx_ring.size; i++)
491 tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
492
493 /* stats are not reset */
494}
495
496
497static int
498vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
499 struct vmxnet3_adapter *adapter)
500{
501 BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
502 tq->comp_ring.base || tq->buf_info);
503
504 tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size
505 * sizeof(struct Vmxnet3_TxDesc),
506 &tq->tx_ring.basePA);
507 if (!tq->tx_ring.base) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +0000508 netdev_err(adapter->netdev, "failed to allocate tx ring\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700509 goto err;
510 }
511
512 tq->data_ring.base = pci_alloc_consistent(adapter->pdev,
513 tq->data_ring.size *
514 sizeof(struct Vmxnet3_TxDataDesc),
515 &tq->data_ring.basePA);
516 if (!tq->data_ring.base) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +0000517 netdev_err(adapter->netdev, "failed to allocate data ring\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700518 goto err;
519 }
520
521 tq->comp_ring.base = pci_alloc_consistent(adapter->pdev,
522 tq->comp_ring.size *
523 sizeof(struct Vmxnet3_TxCompDesc),
524 &tq->comp_ring.basePA);
525 if (!tq->comp_ring.base) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +0000526 netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700527 goto err;
528 }
529
530 tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]),
531 GFP_KERNEL);
Joe Perchese404dec2012-01-29 12:56:23 +0000532 if (!tq->buf_info)
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700533 goto err;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700534
535 return 0;
536
537err:
538 vmxnet3_tq_destroy(tq, adapter);
539 return -ENOMEM;
540}
541
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000542static void
543vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
544{
545 int i;
546
547 for (i = 0; i < adapter->num_tx_queues; i++)
548 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
549}
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700550
551/*
552 * starting from ring->next2fill, allocate rx buffers for the given ring
553 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
554 * are allocated or allocation fails
555 */
556
557static int
558vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
559 int num_to_alloc, struct vmxnet3_adapter *adapter)
560{
561 int num_allocated = 0;
562 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
563 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
564 u32 val;
565
Shreyas Bhatewara5318d802011-07-05 14:34:05 +0000566 while (num_allocated <= num_to_alloc) {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700567 struct vmxnet3_rx_buf_info *rbi;
568 union Vmxnet3_GenericDesc *gd;
569
570 rbi = rbi_base + ring->next2fill;
571 gd = ring->base + ring->next2fill;
572
573 if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
574 if (rbi->skb == NULL) {
Stephen Hemminger0d735f12013-01-15 07:28:26 +0000575 rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
576 rbi->len,
577 GFP_KERNEL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700578 if (unlikely(rbi->skb == NULL)) {
579 rq->stats.rx_buf_alloc_failure++;
580 break;
581 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700582
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700583 rbi->dma_addr = pci_map_single(adapter->pdev,
584 rbi->skb->data, rbi->len,
585 PCI_DMA_FROMDEVICE);
586 } else {
587 /* rx buffer skipped by the device */
588 }
589 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
590 } else {
591 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
592 rbi->len != PAGE_SIZE);
593
594 if (rbi->page == NULL) {
595 rbi->page = alloc_page(GFP_ATOMIC);
596 if (unlikely(rbi->page == NULL)) {
597 rq->stats.rx_buf_alloc_failure++;
598 break;
599 }
600 rbi->dma_addr = pci_map_page(adapter->pdev,
601 rbi->page, 0, PAGE_SIZE,
602 PCI_DMA_FROMDEVICE);
603 } else {
604 /* rx buffers skipped by the device */
605 }
606 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
607 }
608
609 BUG_ON(rbi->dma_addr == 0);
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000610 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
Shreyas Bhatewara5318d802011-07-05 14:34:05 +0000611 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000612 | val | rbi->len);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700613
Shreyas Bhatewara5318d802011-07-05 14:34:05 +0000614 /* Fill the last buffer but dont mark it ready, or else the
615 * device will think that the queue is full */
616 if (num_allocated == num_to_alloc)
617 break;
618
619 gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700620 num_allocated++;
621 vmxnet3_cmd_ring_adv_next2fill(ring);
622 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700623
Stephen Hemmingerfdcd79b2013-01-15 07:28:29 +0000624 netdev_dbg(adapter->netdev,
Stephen Hemminger69b9a712013-01-15 07:28:27 +0000625 "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
626 num_allocated, ring->next2fill, ring->next2comp);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700627
628 /* so that the device can distinguish a full ring and an empty ring */
629 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
630
631 return num_allocated;
632}
633
634
635static void
636vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
637 struct vmxnet3_rx_buf_info *rbi)
638{
639 struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
640 skb_shinfo(skb)->nr_frags;
641
642 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
643
Ian Campbell0e0634d2011-09-21 21:53:28 +0000644 __skb_frag_set_page(frag, rbi->page);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700645 frag->page_offset = 0;
Eric Dumazet9e903e02011-10-18 21:00:24 +0000646 skb_frag_size_set(frag, rcd->len);
647 skb->data_len += rcd->len;
Eric Dumazet5e6c3552011-10-13 11:38:17 +0000648 skb->truesize += PAGE_SIZE;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700649 skb_shinfo(skb)->nr_frags++;
650}
651
652
653static void
654vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
655 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
656 struct vmxnet3_adapter *adapter)
657{
658 u32 dw2, len;
659 unsigned long buf_offset;
660 int i;
661 union Vmxnet3_GenericDesc *gdesc;
662 struct vmxnet3_tx_buf_info *tbi = NULL;
663
664 BUG_ON(ctx->copy_size > skb_headlen(skb));
665
666 /* use the previous gen bit for the SOP desc */
667 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
668
669 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
670 gdesc = ctx->sop_txd; /* both loops below can be skipped */
671
672 /* no need to map the buffer if headers are copied */
673 if (ctx->copy_size) {
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000674 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700675 tq->tx_ring.next2fill *
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000676 sizeof(struct Vmxnet3_TxDataDesc));
677 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700678 ctx->sop_txd->dword[3] = 0;
679
680 tbi = tq->buf_info + tq->tx_ring.next2fill;
681 tbi->map_type = VMXNET3_MAP_NONE;
682
Stephen Hemmingerfdcd79b2013-01-15 07:28:29 +0000683 netdev_dbg(adapter->netdev,
Randy Dunlapf69655822009-10-16 17:54:34 -0700684 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000685 tq->tx_ring.next2fill,
686 le64_to_cpu(ctx->sop_txd->txd.addr),
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700687 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
688 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
689
690 /* use the right gen for non-SOP desc */
691 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
692 }
693
694 /* linear part can use multiple tx desc if it's big */
695 len = skb_headlen(skb) - ctx->copy_size;
696 buf_offset = ctx->copy_size;
697 while (len) {
698 u32 buf_size;
699
Bhavesh Davda1f4b1612010-07-24 14:43:29 +0000700 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
701 buf_size = len;
702 dw2 |= len;
703 } else {
704 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
705 /* spec says that for TxDesc.len, 0 == 2^14 */
706 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700707
708 tbi = tq->buf_info + tq->tx_ring.next2fill;
709 tbi->map_type = VMXNET3_MAP_SINGLE;
710 tbi->dma_addr = pci_map_single(adapter->pdev,
711 skb->data + buf_offset, buf_size,
712 PCI_DMA_TODEVICE);
713
Bhavesh Davda1f4b1612010-07-24 14:43:29 +0000714 tbi->len = buf_size;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700715
716 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
717 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
718
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000719 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
Bhavesh Davda1f4b1612010-07-24 14:43:29 +0000720 gdesc->dword[2] = cpu_to_le32(dw2);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700721 gdesc->dword[3] = 0;
722
Stephen Hemmingerfdcd79b2013-01-15 07:28:29 +0000723 netdev_dbg(adapter->netdev,
Randy Dunlapf69655822009-10-16 17:54:34 -0700724 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000725 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
726 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700727 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
728 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
729
730 len -= buf_size;
731 buf_offset += buf_size;
732 }
733
734 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000735 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000736 u32 buf_size;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700737
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000738 buf_offset = 0;
739 len = skb_frag_size(frag);
740 while (len) {
741 tbi = tq->buf_info + tq->tx_ring.next2fill;
742 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
743 buf_size = len;
744 dw2 |= len;
745 } else {
746 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
747 /* spec says that for TxDesc.len, 0 == 2^14 */
748 }
749 tbi->map_type = VMXNET3_MAP_PAGE;
750 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
751 buf_offset, buf_size,
752 DMA_TO_DEVICE);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700753
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000754 tbi->len = buf_size;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700755
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000756 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
757 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700758
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000759 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
760 gdesc->dword[2] = cpu_to_le32(dw2);
761 gdesc->dword[3] = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700762
Stephen Hemmingerfdcd79b2013-01-15 07:28:29 +0000763 netdev_dbg(adapter->netdev,
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000764 "txd[%u]: 0x%llu %u %u\n",
765 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
766 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
767 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
768 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
769
770 len -= buf_size;
771 buf_offset += buf_size;
772 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700773 }
774
775 ctx->eop_txd = gdesc;
776
777 /* set the last buf_info for the pkt */
778 tbi->skb = skb;
779 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
780}
781
782
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000783/* Init all tx queues */
784static void
785vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
786{
787 int i;
788
789 for (i = 0; i < adapter->num_tx_queues; i++)
790 vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
791}
792
793
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700794/*
795 * parse and copy relevant protocol headers:
796 * For a tso pkt, relevant headers are L2/3/4 including options
797 * For a pkt requesting csum offloading, they are L2/3 and may include L4
798 * if it's a TCP/UDP pkt
799 *
800 * Returns:
801 * -1: error happens during parsing
802 * 0: protocol headers parsed, but too big to be copied
803 * 1: protocol headers parsed and copied
804 *
805 * Other effects:
806 * 1. related *ctx fields are updated.
807 * 2. ctx->copy_size is # of bytes copied
808 * 3. the portion copied is guaranteed to be in the linear part
809 *
810 */
811static int
812vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
813 struct vmxnet3_tx_ctx *ctx,
814 struct vmxnet3_adapter *adapter)
815{
816 struct Vmxnet3_TxDataDesc *tdd;
817
Michał Mirosław0d0b1672010-12-14 15:24:08 +0000818 if (ctx->mss) { /* TSO */
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700819 ctx->eth_ip_hdr_size = skb_transport_offset(skb);
Eric Dumazet8bca5d12012-01-24 19:47:21 +0000820 ctx->l4_hdr_size = tcp_hdrlen(skb);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700821 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
822 } else {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700823 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michał Mirosław0d0b1672010-12-14 15:24:08 +0000824 ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700825
826 if (ctx->ipv4) {
Eric Dumazet8bca5d12012-01-24 19:47:21 +0000827 const struct iphdr *iph = ip_hdr(skb);
828
Shreyas Bhatewara39d4a962011-01-14 14:59:41 +0000829 if (iph->protocol == IPPROTO_TCP)
Eric Dumazet8bca5d12012-01-24 19:47:21 +0000830 ctx->l4_hdr_size = tcp_hdrlen(skb);
Shreyas Bhatewara39d4a962011-01-14 14:59:41 +0000831 else if (iph->protocol == IPPROTO_UDP)
David S. Millerf6a1ad42012-03-05 21:16:26 -0500832 ctx->l4_hdr_size = sizeof(struct udphdr);
Shreyas Bhatewara39d4a962011-01-14 14:59:41 +0000833 else
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700834 ctx->l4_hdr_size = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700835 } else {
836 /* for simplicity, don't copy L4 headers */
837 ctx->l4_hdr_size = 0;
838 }
Neil Hormanb2032622012-02-16 01:48:56 +0000839 ctx->copy_size = min(ctx->eth_ip_hdr_size +
840 ctx->l4_hdr_size, skb->len);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700841 } else {
842 ctx->eth_ip_hdr_size = 0;
843 ctx->l4_hdr_size = 0;
844 /* copy as much as allowed */
845 ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE
846 , skb_headlen(skb));
847 }
848
849 /* make sure headers are accessible directly */
850 if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
851 goto err;
852 }
853
854 if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) {
855 tq->stats.oversized_hdr++;
856 ctx->copy_size = 0;
857 return 0;
858 }
859
860 tdd = tq->data_ring.base + tq->tx_ring.next2fill;
861
862 memcpy(tdd->data, skb->data, ctx->copy_size);
Stephen Hemmingerfdcd79b2013-01-15 07:28:29 +0000863 netdev_dbg(adapter->netdev,
Randy Dunlapf69655822009-10-16 17:54:34 -0700864 "copy %u bytes to dataRing[%u]\n",
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700865 ctx->copy_size, tq->tx_ring.next2fill);
866 return 1;
867
868err:
869 return -1;
870}
871
872
873static void
874vmxnet3_prepare_tso(struct sk_buff *skb,
875 struct vmxnet3_tx_ctx *ctx)
876{
Eric Dumazet8bca5d12012-01-24 19:47:21 +0000877 struct tcphdr *tcph = tcp_hdr(skb);
878
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700879 if (ctx->ipv4) {
Eric Dumazet8bca5d12012-01-24 19:47:21 +0000880 struct iphdr *iph = ip_hdr(skb);
881
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700882 iph->check = 0;
883 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
884 IPPROTO_TCP, 0);
885 } else {
Eric Dumazet8bca5d12012-01-24 19:47:21 +0000886 struct ipv6hdr *iph = ipv6_hdr(skb);
887
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700888 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
889 IPPROTO_TCP, 0);
890 }
891}
892
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000893static int txd_estimate(const struct sk_buff *skb)
894{
895 int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
896 int i;
897
898 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
899 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
900
901 count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
902 }
903 return count;
904}
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700905
906/*
907 * Transmits a pkt thru a given tq
908 * Returns:
909 * NETDEV_TX_OK: descriptors are setup successfully
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300910 * NETDEV_TX_OK: error occurred, the pkt is dropped
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700911 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
912 *
913 * Side-effects:
914 * 1. tx ring may be changed
915 * 2. tq stats may be updated accordingly
916 * 3. shared->txNumDeferred may be updated
917 */
918
919static int
920vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
921 struct vmxnet3_adapter *adapter, struct net_device *netdev)
922{
923 int ret;
924 u32 count;
925 unsigned long flags;
926 struct vmxnet3_tx_ctx ctx;
927 union Vmxnet3_GenericDesc *gdesc;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000928#ifdef __BIG_ENDIAN_BITFIELD
929 /* Use temporary descriptor to avoid touching bits multiple times */
930 union Vmxnet3_GenericDesc tempTxDesc;
931#endif
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700932
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000933 count = txd_estimate(skb);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700934
Jesse Gross72e85c42011-06-23 13:04:39 +0000935 ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700936
937 ctx.mss = skb_shinfo(skb)->gso_size;
938 if (ctx.mss) {
939 if (skb_header_cloned(skb)) {
940 if (unlikely(pskb_expand_head(skb, 0, 0,
941 GFP_ATOMIC) != 0)) {
942 tq->stats.drop_tso++;
943 goto drop_pkt;
944 }
945 tq->stats.copy_skb_header++;
946 }
947 vmxnet3_prepare_tso(skb, &ctx);
948 } else {
949 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
950
951 /* non-tso pkts must not use more than
952 * VMXNET3_MAX_TXD_PER_PKT entries
953 */
954 if (skb_linearize(skb) != 0) {
955 tq->stats.drop_too_many_frags++;
956 goto drop_pkt;
957 }
958 tq->stats.linearized++;
959
960 /* recalculate the # of descriptors to use */
961 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
962 }
963 }
964
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000965 spin_lock_irqsave(&tq->tx_lock, flags);
966
967 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
968 tq->stats.tx_ring_full++;
Stephen Hemmingerfdcd79b2013-01-15 07:28:29 +0000969 netdev_dbg(adapter->netdev,
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000970 "tx queue stopped on %s, next2comp %u"
971 " next2fill %u\n", adapter->netdev->name,
972 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
973
974 vmxnet3_tq_stop(tq, adapter);
975 spin_unlock_irqrestore(&tq->tx_lock, flags);
976 return NETDEV_TX_BUSY;
977 }
978
979
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700980 ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
981 if (ret >= 0) {
982 BUG_ON(ret <= 0 && ctx.copy_size != 0);
983 /* hdrs parsed, check against other limits */
984 if (ctx.mss) {
985 if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
986 VMXNET3_MAX_TX_BUF_SIZE)) {
987 goto hdr_too_big;
988 }
989 } else {
990 if (skb->ip_summed == CHECKSUM_PARTIAL) {
991 if (unlikely(ctx.eth_ip_hdr_size +
992 skb->csum_offset >
993 VMXNET3_MAX_CSUM_OFFSET)) {
994 goto hdr_too_big;
995 }
996 }
997 }
998 } else {
999 tq->stats.drop_hdr_inspect_err++;
Dan Carpenterf955e142010-12-20 03:03:15 +00001000 goto unlock_drop_pkt;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001001 }
1002
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001003 /* fill tx descs related to addr & len */
1004 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
1005
1006 /* setup the EOP desc */
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001007 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001008
1009 /* setup the SOP desc */
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001010#ifdef __BIG_ENDIAN_BITFIELD
1011 gdesc = &tempTxDesc;
1012 gdesc->dword[2] = ctx.sop_txd->dword[2];
1013 gdesc->dword[3] = ctx.sop_txd->dword[3];
1014#else
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001015 gdesc = ctx.sop_txd;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001016#endif
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001017 if (ctx.mss) {
1018 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
1019 gdesc->txd.om = VMXNET3_OM_TSO;
1020 gdesc->txd.msscof = ctx.mss;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001021 le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
1022 gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001023 } else {
1024 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1025 gdesc->txd.hlen = ctx.eth_ip_hdr_size;
1026 gdesc->txd.om = VMXNET3_OM_CSUM;
1027 gdesc->txd.msscof = ctx.eth_ip_hdr_size +
1028 skb->csum_offset;
1029 } else {
1030 gdesc->txd.om = 0;
1031 gdesc->txd.msscof = 0;
1032 }
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001033 le32_add_cpu(&tq->shared->txNumDeferred, 1);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001034 }
1035
1036 if (vlan_tx_tag_present(skb)) {
1037 gdesc->txd.ti = 1;
1038 gdesc->txd.tci = vlan_tx_tag_get(skb);
1039 }
1040
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001041 /* finally flips the GEN bit of the SOP desc. */
1042 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1043 VMXNET3_TXD_GEN);
1044#ifdef __BIG_ENDIAN_BITFIELD
1045 /* Finished updating in bitfields of Tx Desc, so write them in original
1046 * place.
1047 */
1048 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
1049 (struct Vmxnet3_TxDesc *)ctx.sop_txd);
1050 gdesc = ctx.sop_txd;
1051#endif
Stephen Hemmingerfdcd79b2013-01-15 07:28:29 +00001052 netdev_dbg(adapter->netdev,
Randy Dunlapf69655822009-10-16 17:54:34 -07001053 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
Joe Perchesc2fd03a2012-06-04 12:44:18 +00001054 (u32)(ctx.sop_txd -
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001055 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1056 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001057
1058 spin_unlock_irqrestore(&tq->tx_lock, flags);
1059
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001060 if (le32_to_cpu(tq->shared->txNumDeferred) >=
1061 le32_to_cpu(tq->shared->txThreshold)) {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001062 tq->shared->txNumDeferred = 0;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001063 VMXNET3_WRITE_BAR0_REG(adapter,
1064 VMXNET3_REG_TXPROD + tq->qid * 8,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001065 tq->tx_ring.next2fill);
1066 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001067
1068 return NETDEV_TX_OK;
1069
1070hdr_too_big:
1071 tq->stats.drop_oversized_hdr++;
Dan Carpenterf955e142010-12-20 03:03:15 +00001072unlock_drop_pkt:
1073 spin_unlock_irqrestore(&tq->tx_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001074drop_pkt:
1075 tq->stats.drop_total++;
1076 dev_kfree_skb(skb);
1077 return NETDEV_TX_OK;
1078}
1079
1080
1081static netdev_tx_t
1082vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1083{
1084 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001085
stephen hemminger96800ee2012-11-13 13:53:28 +00001086 BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
1087 return vmxnet3_tq_xmit(skb,
1088 &adapter->tx_queue[skb->queue_mapping],
1089 adapter, netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001090}
1091
1092
1093static void
1094vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1095 struct sk_buff *skb,
1096 union Vmxnet3_GenericDesc *gdesc)
1097{
Michał Mirosława0d27302011-04-18 13:31:21 +00001098 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001099 /* typical case: TCP/UDP over IP and both csums are correct */
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001100 if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001101 VMXNET3_RCD_CSUM_OK) {
1102 skb->ip_summed = CHECKSUM_UNNECESSARY;
1103 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1104 BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
1105 BUG_ON(gdesc->rcd.frg);
1106 } else {
1107 if (gdesc->rcd.csum) {
1108 skb->csum = htons(gdesc->rcd.csum);
1109 skb->ip_summed = CHECKSUM_PARTIAL;
1110 } else {
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001111 skb_checksum_none_assert(skb);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001112 }
1113 }
1114 } else {
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001115 skb_checksum_none_assert(skb);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001116 }
1117}
1118
1119
1120static void
1121vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1122 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
1123{
1124 rq->stats.drop_err++;
1125 if (!rcd->fcs)
1126 rq->stats.drop_fcs++;
1127
1128 rq->stats.drop_total++;
1129
1130 /*
1131 * We do not unmap and chain the rx buffer to the skb.
1132 * We basically pretend this buffer is not used and will be recycled
1133 * by vmxnet3_rq_alloc_rx_buf()
1134 */
1135
1136 /*
1137 * ctx->skb may be NULL if this is the first and the only one
1138 * desc for the pkt
1139 */
1140 if (ctx->skb)
1141 dev_kfree_skb_irq(ctx->skb);
1142
1143 ctx->skb = NULL;
1144}
1145
1146
1147static int
1148vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1149 struct vmxnet3_adapter *adapter, int quota)
1150{
Joe Perches215faf92010-12-21 02:16:10 -08001151 static const u32 rxprod_reg[2] = {
1152 VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
1153 };
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001154 u32 num_rxd = 0;
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001155 bool skip_page_frags = false;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001156 struct Vmxnet3_RxCompDesc *rcd;
1157 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001158#ifdef __BIG_ENDIAN_BITFIELD
1159 struct Vmxnet3_RxDesc rxCmdDesc;
1160 struct Vmxnet3_RxCompDesc rxComp;
1161#endif
1162 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1163 &rxComp);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001164 while (rcd->gen == rq->comp_ring.gen) {
1165 struct vmxnet3_rx_buf_info *rbi;
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001166 struct sk_buff *skb, *new_skb = NULL;
1167 struct page *new_page = NULL;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001168 int num_to_alloc;
1169 struct Vmxnet3_RxDesc *rxd;
1170 u32 idx, ring_idx;
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001171 struct vmxnet3_cmd_ring *ring = NULL;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001172 if (num_rxd >= quota) {
1173 /* we may stop even before we see the EOP desc of
1174 * the current pkt
1175 */
1176 break;
1177 }
1178 num_rxd++;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001179 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001180 idx = rcd->rxdIdx;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001181 ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001182 ring = rq->rx_ring + ring_idx;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001183 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1184 &rxCmdDesc);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001185 rbi = rq->buf_info[ring_idx] + idx;
1186
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001187 BUG_ON(rxd->addr != rbi->dma_addr ||
1188 rxd->len != rbi->len);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001189
1190 if (unlikely(rcd->eop && rcd->err)) {
1191 vmxnet3_rx_error(rq, rcd, ctx, adapter);
1192 goto rcd_done;
1193 }
1194
1195 if (rcd->sop) { /* first buf of the pkt */
1196 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
1197 rcd->rqID != rq->qid);
1198
1199 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
1200 BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
1201
1202 if (unlikely(rcd->len == 0)) {
1203 /* Pretend the rx buffer is skipped. */
1204 BUG_ON(!(rcd->sop && rcd->eop));
Stephen Hemmingerfdcd79b2013-01-15 07:28:29 +00001205 netdev_dbg(adapter->netdev,
Randy Dunlapf69655822009-10-16 17:54:34 -07001206 "rxRing[%u][%u] 0 length\n",
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001207 ring_idx, idx);
1208 goto rcd_done;
1209 }
1210
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001211 skip_page_frags = false;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001212 ctx->skb = rbi->skb;
Stephen Hemminger0d735f12013-01-15 07:28:26 +00001213 new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
1214 rbi->len);
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001215 if (new_skb == NULL) {
1216 /* Skb allocation failed, do not handover this
1217 * skb to stack. Reuse it. Drop the existing pkt
1218 */
1219 rq->stats.rx_buf_alloc_failure++;
1220 ctx->skb = NULL;
1221 rq->stats.drop_total++;
1222 skip_page_frags = true;
1223 goto rcd_done;
1224 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001225
1226 pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
1227 PCI_DMA_FROMDEVICE);
1228
1229 skb_put(ctx->skb, rcd->len);
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001230
1231 /* Immediate refill */
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001232 rbi->skb = new_skb;
1233 rbi->dma_addr = pci_map_single(adapter->pdev,
stephen hemminger96800ee2012-11-13 13:53:28 +00001234 rbi->skb->data, rbi->len,
1235 PCI_DMA_FROMDEVICE);
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001236 rxd->addr = cpu_to_le64(rbi->dma_addr);
1237 rxd->len = rbi->len;
1238
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001239 } else {
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001240 BUG_ON(ctx->skb == NULL && !skip_page_frags);
1241
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001242 /* non SOP buffer must be type 1 in most cases */
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001243 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
1244 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001245
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001246 /* If an sop buffer was dropped, skip all
1247 * following non-sop fragments. They will be reused.
1248 */
1249 if (skip_page_frags)
1250 goto rcd_done;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001251
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001252 new_page = alloc_page(GFP_ATOMIC);
1253 if (unlikely(new_page == NULL)) {
1254 /* Replacement page frag could not be allocated.
1255 * Reuse this page. Drop the pkt and free the
1256 * skb which contained this page as a frag. Skip
1257 * processing all the following non-sop frags.
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001258 */
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001259 rq->stats.rx_buf_alloc_failure++;
1260 dev_kfree_skb(ctx->skb);
1261 ctx->skb = NULL;
1262 skip_page_frags = true;
1263 goto rcd_done;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001264 }
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001265
1266 if (rcd->len) {
1267 pci_unmap_page(adapter->pdev,
1268 rbi->dma_addr, rbi->len,
1269 PCI_DMA_FROMDEVICE);
1270
1271 vmxnet3_append_frag(ctx->skb, rcd, rbi);
1272 }
1273
1274 /* Immediate refill */
1275 rbi->page = new_page;
1276 rbi->dma_addr = pci_map_page(adapter->pdev, rbi->page,
1277 0, PAGE_SIZE,
1278 PCI_DMA_FROMDEVICE);
1279 rxd->addr = cpu_to_le64(rbi->dma_addr);
1280 rxd->len = rbi->len;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001281 }
1282
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001283
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001284 skb = ctx->skb;
1285 if (rcd->eop) {
1286 skb->len += skb->data_len;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001287
1288 vmxnet3_rx_csum(adapter, skb,
1289 (union Vmxnet3_GenericDesc *)rcd);
1290 skb->protocol = eth_type_trans(skb, adapter->netdev);
1291
Jesse Gross72e85c42011-06-23 13:04:39 +00001292 if (unlikely(rcd->ts))
1293 __vlan_hwaccel_put_tag(skb, rcd->tci);
1294
Jesse Gross213ade82011-06-24 14:24:35 +00001295 if (adapter->netdev->features & NETIF_F_LRO)
1296 netif_receive_skb(skb);
1297 else
1298 napi_gro_receive(&rq->napi, skb);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001299
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001300 ctx->skb = NULL;
1301 }
1302
1303rcd_done:
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001304 /* device may have skipped some rx descs */
1305 ring->next2comp = idx;
1306 num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
1307 ring = rq->rx_ring + ring_idx;
1308 while (num_to_alloc) {
1309 vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
1310 &rxCmdDesc);
1311 BUG_ON(!rxd->addr);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001312
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001313 /* Recv desc is ready to be used by the device */
1314 rxd->gen = ring->gen;
1315 vmxnet3_cmd_ring_adv_next2fill(ring);
1316 num_to_alloc--;
1317 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001318
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001319 /* if needed, update the register */
1320 if (unlikely(rq->shared->updateRxProd)) {
1321 VMXNET3_WRITE_BAR0_REG(adapter,
stephen hemminger96800ee2012-11-13 13:53:28 +00001322 rxprod_reg[ring_idx] + rq->qid * 8,
1323 ring->next2fill);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001324 }
1325
1326 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001327 vmxnet3_getRxComp(rcd,
stephen hemminger96800ee2012-11-13 13:53:28 +00001328 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001329 }
1330
1331 return num_rxd;
1332}
1333
1334
1335static void
1336vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1337 struct vmxnet3_adapter *adapter)
1338{
1339 u32 i, ring_idx;
1340 struct Vmxnet3_RxDesc *rxd;
1341
1342 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1343 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001344#ifdef __BIG_ENDIAN_BITFIELD
1345 struct Vmxnet3_RxDesc rxDesc;
1346#endif
1347 vmxnet3_getRxDesc(rxd,
1348 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001349
1350 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1351 rq->buf_info[ring_idx][i].skb) {
1352 pci_unmap_single(adapter->pdev, rxd->addr,
1353 rxd->len, PCI_DMA_FROMDEVICE);
1354 dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1355 rq->buf_info[ring_idx][i].skb = NULL;
1356 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1357 rq->buf_info[ring_idx][i].page) {
1358 pci_unmap_page(adapter->pdev, rxd->addr,
1359 rxd->len, PCI_DMA_FROMDEVICE);
1360 put_page(rq->buf_info[ring_idx][i].page);
1361 rq->buf_info[ring_idx][i].page = NULL;
1362 }
1363 }
1364
1365 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1366 rq->rx_ring[ring_idx].next2fill =
1367 rq->rx_ring[ring_idx].next2comp = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001368 }
1369
1370 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1371 rq->comp_ring.next2proc = 0;
1372}
1373
1374
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001375static void
1376vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
1377{
1378 int i;
1379
1380 for (i = 0; i < adapter->num_rx_queues; i++)
1381 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
1382}
1383
1384
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001385void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1386 struct vmxnet3_adapter *adapter)
1387{
1388 int i;
1389 int j;
1390
1391 /* all rx buffers must have already been freed */
1392 for (i = 0; i < 2; i++) {
1393 if (rq->buf_info[i]) {
1394 for (j = 0; j < rq->rx_ring[i].size; j++)
1395 BUG_ON(rq->buf_info[i][j].page != NULL);
1396 }
1397 }
1398
1399
1400 kfree(rq->buf_info[0]);
1401
1402 for (i = 0; i < 2; i++) {
1403 if (rq->rx_ring[i].base) {
1404 pci_free_consistent(adapter->pdev, rq->rx_ring[i].size
1405 * sizeof(struct Vmxnet3_RxDesc),
1406 rq->rx_ring[i].base,
1407 rq->rx_ring[i].basePA);
1408 rq->rx_ring[i].base = NULL;
1409 }
1410 rq->buf_info[i] = NULL;
1411 }
1412
1413 if (rq->comp_ring.base) {
1414 pci_free_consistent(adapter->pdev, rq->comp_ring.size *
1415 sizeof(struct Vmxnet3_RxCompDesc),
1416 rq->comp_ring.base, rq->comp_ring.basePA);
1417 rq->comp_ring.base = NULL;
1418 }
1419}
1420
1421
1422static int
1423vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1424 struct vmxnet3_adapter *adapter)
1425{
1426 int i;
1427
1428 /* initialize buf_info */
1429 for (i = 0; i < rq->rx_ring[0].size; i++) {
1430
1431 /* 1st buf for a pkt is skbuff */
1432 if (i % adapter->rx_buf_per_pkt == 0) {
1433 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
1434 rq->buf_info[0][i].len = adapter->skb_buf_size;
1435 } else { /* subsequent bufs for a pkt is frag */
1436 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
1437 rq->buf_info[0][i].len = PAGE_SIZE;
1438 }
1439 }
1440 for (i = 0; i < rq->rx_ring[1].size; i++) {
1441 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
1442 rq->buf_info[1][i].len = PAGE_SIZE;
1443 }
1444
1445 /* reset internal state and allocate buffers for both rings */
1446 for (i = 0; i < 2; i++) {
1447 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001448
1449 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
1450 sizeof(struct Vmxnet3_RxDesc));
1451 rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
1452 }
1453 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
1454 adapter) == 0) {
1455 /* at least has 1 rx buffer for the 1st ring */
1456 return -ENOMEM;
1457 }
1458 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
1459
1460 /* reset the comp ring */
1461 rq->comp_ring.next2proc = 0;
1462 memset(rq->comp_ring.base, 0, rq->comp_ring.size *
1463 sizeof(struct Vmxnet3_RxCompDesc));
1464 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1465
1466 /* reset rxctx */
1467 rq->rx_ctx.skb = NULL;
1468
1469 /* stats are not reset */
1470 return 0;
1471}
1472
1473
1474static int
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001475vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
1476{
1477 int i, err = 0;
1478
1479 for (i = 0; i < adapter->num_rx_queues; i++) {
1480 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
1481 if (unlikely(err)) {
1482 dev_err(&adapter->netdev->dev, "%s: failed to "
1483 "initialize rx queue%i\n",
1484 adapter->netdev->name, i);
1485 break;
1486 }
1487 }
1488 return err;
1489
1490}
1491
1492
1493static int
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001494vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1495{
1496 int i;
1497 size_t sz;
1498 struct vmxnet3_rx_buf_info *bi;
1499
1500 for (i = 0; i < 2; i++) {
1501
1502 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
1503 rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz,
1504 &rq->rx_ring[i].basePA);
1505 if (!rq->rx_ring[i].base) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00001506 netdev_err(adapter->netdev,
1507 "failed to allocate rx ring %d\n", i);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001508 goto err;
1509 }
1510 }
1511
1512 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1513 rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz,
1514 &rq->comp_ring.basePA);
1515 if (!rq->comp_ring.base) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00001516 netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001517 goto err;
1518 }
1519
1520 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1521 rq->rx_ring[1].size);
Julia Lawall476c6092010-05-13 10:05:40 +00001522 bi = kzalloc(sz, GFP_KERNEL);
Joe Perchese404dec2012-01-29 12:56:23 +00001523 if (!bi)
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001524 goto err;
Joe Perchese404dec2012-01-29 12:56:23 +00001525
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001526 rq->buf_info[0] = bi;
1527 rq->buf_info[1] = bi + rq->rx_ring[0].size;
1528
1529 return 0;
1530
1531err:
1532 vmxnet3_rq_destroy(rq, adapter);
1533 return -ENOMEM;
1534}
1535
1536
1537static int
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001538vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
1539{
1540 int i, err = 0;
1541
1542 for (i = 0; i < adapter->num_rx_queues; i++) {
1543 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
1544 if (unlikely(err)) {
1545 dev_err(&adapter->netdev->dev,
1546 "%s: failed to create rx queue%i\n",
1547 adapter->netdev->name, i);
1548 goto err_out;
1549 }
1550 }
1551 return err;
1552err_out:
1553 vmxnet3_rq_destroy_all(adapter);
1554 return err;
1555
1556}
1557
1558/* Multiple queue aware polling function for tx and rx */
1559
1560static int
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001561vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
1562{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001563 int rcd_done = 0, i;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001564 if (unlikely(adapter->shared->ecr))
1565 vmxnet3_process_events(adapter);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001566 for (i = 0; i < adapter->num_tx_queues; i++)
1567 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001568
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001569 for (i = 0; i < adapter->num_rx_queues; i++)
1570 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
1571 adapter, budget);
1572 return rcd_done;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001573}
1574
1575
1576static int
1577vmxnet3_poll(struct napi_struct *napi, int budget)
1578{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001579 struct vmxnet3_rx_queue *rx_queue = container_of(napi,
1580 struct vmxnet3_rx_queue, napi);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001581 int rxd_done;
1582
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001583 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001584
1585 if (rxd_done < budget) {
1586 napi_complete(napi);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001587 vmxnet3_enable_all_intrs(rx_queue->adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001588 }
1589 return rxd_done;
1590}
1591
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001592/*
1593 * NAPI polling function for MSI-X mode with multiple Rx queues
1594 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
1595 */
1596
1597static int
1598vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
1599{
1600 struct vmxnet3_rx_queue *rq = container_of(napi,
1601 struct vmxnet3_rx_queue, napi);
1602 struct vmxnet3_adapter *adapter = rq->adapter;
1603 int rxd_done;
1604
1605 /* When sharing interrupt with corresponding tx queue, process
1606 * tx completions in that queue as well
1607 */
1608 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
1609 struct vmxnet3_tx_queue *tq =
1610 &adapter->tx_queue[rq - adapter->rx_queue];
1611 vmxnet3_tq_tx_complete(tq, adapter);
1612 }
1613
1614 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
1615
1616 if (rxd_done < budget) {
1617 napi_complete(napi);
1618 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
1619 }
1620 return rxd_done;
1621}
1622
1623
1624#ifdef CONFIG_PCI_MSI
1625
1626/*
1627 * Handle completion interrupts on tx queues
1628 * Returns whether or not the intr is handled
1629 */
1630
1631static irqreturn_t
1632vmxnet3_msix_tx(int irq, void *data)
1633{
1634 struct vmxnet3_tx_queue *tq = data;
1635 struct vmxnet3_adapter *adapter = tq->adapter;
1636
1637 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1638 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
1639
1640 /* Handle the case where only one irq is allocate for all tx queues */
1641 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
1642 int i;
1643 for (i = 0; i < adapter->num_tx_queues; i++) {
1644 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
1645 vmxnet3_tq_tx_complete(txq, adapter);
1646 }
1647 } else {
1648 vmxnet3_tq_tx_complete(tq, adapter);
1649 }
1650 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
1651
1652 return IRQ_HANDLED;
1653}
1654
1655
1656/*
1657 * Handle completion interrupts on rx queues. Returns whether or not the
1658 * intr is handled
1659 */
1660
1661static irqreturn_t
1662vmxnet3_msix_rx(int irq, void *data)
1663{
1664 struct vmxnet3_rx_queue *rq = data;
1665 struct vmxnet3_adapter *adapter = rq->adapter;
1666
1667 /* disable intr if needed */
1668 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1669 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
1670 napi_schedule(&rq->napi);
1671
1672 return IRQ_HANDLED;
1673}
1674
1675/*
1676 *----------------------------------------------------------------------------
1677 *
1678 * vmxnet3_msix_event --
1679 *
1680 * vmxnet3 msix event intr handler
1681 *
1682 * Result:
1683 * whether or not the intr is handled
1684 *
1685 *----------------------------------------------------------------------------
1686 */
1687
1688static irqreturn_t
1689vmxnet3_msix_event(int irq, void *data)
1690{
1691 struct net_device *dev = data;
1692 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1693
1694 /* disable intr if needed */
1695 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1696 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
1697
1698 if (adapter->shared->ecr)
1699 vmxnet3_process_events(adapter);
1700
1701 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
1702
1703 return IRQ_HANDLED;
1704}
1705
1706#endif /* CONFIG_PCI_MSI */
1707
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001708
1709/* Interrupt handler for vmxnet3 */
1710static irqreturn_t
1711vmxnet3_intr(int irq, void *dev_id)
1712{
1713 struct net_device *dev = dev_id;
1714 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1715
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001716 if (adapter->intr.type == VMXNET3_IT_INTX) {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001717 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
1718 if (unlikely(icr == 0))
1719 /* not ours */
1720 return IRQ_NONE;
1721 }
1722
1723
1724 /* disable intr if needed */
1725 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001726 vmxnet3_disable_all_intrs(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001727
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001728 napi_schedule(&adapter->rx_queue[0].napi);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001729
1730 return IRQ_HANDLED;
1731}
1732
1733#ifdef CONFIG_NET_POLL_CONTROLLER
1734
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001735/* netpoll callback. */
1736static void
1737vmxnet3_netpoll(struct net_device *netdev)
1738{
1739 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001740
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001741 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1742 vmxnet3_disable_all_intrs(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001743
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001744 vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size);
1745 vmxnet3_enable_all_intrs(adapter);
1746
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001747}
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001748#endif /* CONFIG_NET_POLL_CONTROLLER */
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001749
1750static int
1751vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1752{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001753 struct vmxnet3_intr *intr = &adapter->intr;
1754 int err = 0, i;
1755 int vector = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001756
Randy Dunlap8f7e5242009-10-14 20:38:58 -07001757#ifdef CONFIG_PCI_MSI
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001758 if (adapter->intr.type == VMXNET3_IT_MSIX) {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001759 for (i = 0; i < adapter->num_tx_queues; i++) {
1760 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
1761 sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
1762 adapter->netdev->name, vector);
1763 err = request_irq(
1764 intr->msix_entries[vector].vector,
1765 vmxnet3_msix_tx, 0,
1766 adapter->tx_queue[i].name,
1767 &adapter->tx_queue[i]);
1768 } else {
1769 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
1770 adapter->netdev->name, vector);
1771 }
1772 if (err) {
1773 dev_err(&adapter->netdev->dev,
1774 "Failed to request irq for MSIX, %s, "
1775 "error %d\n",
1776 adapter->tx_queue[i].name, err);
1777 return err;
1778 }
1779
1780 /* Handle the case where only 1 MSIx was allocated for
1781 * all tx queues */
1782 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
1783 for (; i < adapter->num_tx_queues; i++)
1784 adapter->tx_queue[i].comp_ring.intr_idx
1785 = vector;
1786 vector++;
1787 break;
1788 } else {
1789 adapter->tx_queue[i].comp_ring.intr_idx
1790 = vector++;
1791 }
1792 }
1793 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
1794 vector = 0;
1795
1796 for (i = 0; i < adapter->num_rx_queues; i++) {
1797 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
1798 sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
1799 adapter->netdev->name, vector);
1800 else
1801 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
1802 adapter->netdev->name, vector);
1803 err = request_irq(intr->msix_entries[vector].vector,
1804 vmxnet3_msix_rx, 0,
1805 adapter->rx_queue[i].name,
1806 &(adapter->rx_queue[i]));
1807 if (err) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00001808 netdev_err(adapter->netdev,
1809 "Failed to request irq for MSIX, "
1810 "%s, error %d\n",
1811 adapter->rx_queue[i].name, err);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001812 return err;
1813 }
1814
1815 adapter->rx_queue[i].comp_ring.intr_idx = vector++;
1816 }
1817
1818 sprintf(intr->event_msi_vector_name, "%s-event-%d",
1819 adapter->netdev->name, vector);
1820 err = request_irq(intr->msix_entries[vector].vector,
1821 vmxnet3_msix_event, 0,
1822 intr->event_msi_vector_name, adapter->netdev);
1823 intr->event_intr_idx = vector;
1824
1825 } else if (intr->type == VMXNET3_IT_MSI) {
1826 adapter->num_rx_queues = 1;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001827 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
1828 adapter->netdev->name, adapter->netdev);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001829 } else {
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001830#endif
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001831 adapter->num_rx_queues = 1;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001832 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
1833 IRQF_SHARED, adapter->netdev->name,
1834 adapter->netdev);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001835#ifdef CONFIG_PCI_MSI
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001836 }
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001837#endif
1838 intr->num_intrs = vector + 1;
1839 if (err) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00001840 netdev_err(adapter->netdev,
1841 "Failed to request irq (intr type:%d), error %d\n",
1842 intr->type, err);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001843 } else {
1844 /* Number of rx queues will not change after this */
1845 for (i = 0; i < adapter->num_rx_queues; i++) {
1846 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
1847 rq->qid = i;
1848 rq->qid2 = i + adapter->num_rx_queues;
1849 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001850
1851
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001852
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001853 /* init our intr settings */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001854 for (i = 0; i < intr->num_intrs; i++)
1855 intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
1856 if (adapter->intr.type != VMXNET3_IT_MSIX) {
1857 adapter->intr.event_intr_idx = 0;
1858 for (i = 0; i < adapter->num_tx_queues; i++)
1859 adapter->tx_queue[i].comp_ring.intr_idx = 0;
1860 adapter->rx_queue[0].comp_ring.intr_idx = 0;
1861 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001862
Stephen Hemminger204a6e62013-01-15 07:28:30 +00001863 netdev_info(adapter->netdev,
1864 "intr type %u, mode %u, %u vectors allocated\n",
1865 intr->type, intr->mask_mode, intr->num_intrs);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001866 }
1867
1868 return err;
1869}
1870
1871
1872static void
1873vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1874{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001875 struct vmxnet3_intr *intr = &adapter->intr;
1876 BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001877
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001878 switch (intr->type) {
Randy Dunlap8f7e5242009-10-14 20:38:58 -07001879#ifdef CONFIG_PCI_MSI
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001880 case VMXNET3_IT_MSIX:
1881 {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001882 int i, vector = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001883
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001884 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
1885 for (i = 0; i < adapter->num_tx_queues; i++) {
1886 free_irq(intr->msix_entries[vector++].vector,
1887 &(adapter->tx_queue[i]));
1888 if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
1889 break;
1890 }
1891 }
1892
1893 for (i = 0; i < adapter->num_rx_queues; i++) {
1894 free_irq(intr->msix_entries[vector++].vector,
1895 &(adapter->rx_queue[i]));
1896 }
1897
1898 free_irq(intr->msix_entries[vector].vector,
1899 adapter->netdev);
1900 BUG_ON(vector >= intr->num_intrs);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001901 break;
1902 }
Randy Dunlap8f7e5242009-10-14 20:38:58 -07001903#endif
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001904 case VMXNET3_IT_MSI:
1905 free_irq(adapter->pdev->irq, adapter->netdev);
1906 break;
1907 case VMXNET3_IT_INTX:
1908 free_irq(adapter->pdev->irq, adapter->netdev);
1909 break;
1910 default:
Sasha Levinc068e772012-11-08 10:23:03 +00001911 BUG();
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001912 }
1913}
1914
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001915
1916static void
1917vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
1918{
Jesse Gross72e85c42011-06-23 13:04:39 +00001919 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1920 u16 vid;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001921
Jesse Gross72e85c42011-06-23 13:04:39 +00001922 /* allow untagged pkts */
1923 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
1924
1925 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1926 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001927}
1928
1929
Jiri Pirko8e586132011-12-08 19:52:37 -05001930static int
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001931vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1932{
1933 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001934
Jesse Grossf6957f82011-08-07 23:15:47 +00001935 if (!(netdev->flags & IFF_PROMISC)) {
1936 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1937 unsigned long flags;
1938
1939 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1940 spin_lock_irqsave(&adapter->cmd_lock, flags);
1941 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1942 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1943 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1944 }
Jesse Gross72e85c42011-06-23 13:04:39 +00001945
1946 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05001947
1948 return 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001949}
1950
1951
Jiri Pirko8e586132011-12-08 19:52:37 -05001952static int
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001953vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1954{
1955 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001956
Jesse Grossf6957f82011-08-07 23:15:47 +00001957 if (!(netdev->flags & IFF_PROMISC)) {
1958 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1959 unsigned long flags;
1960
1961 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
1962 spin_lock_irqsave(&adapter->cmd_lock, flags);
1963 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1964 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1965 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1966 }
Jesse Gross72e85c42011-06-23 13:04:39 +00001967
1968 clear_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05001969
1970 return 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001971}
1972
1973
1974static u8 *
1975vmxnet3_copy_mc(struct net_device *netdev)
1976{
1977 u8 *buf = NULL;
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001978 u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001979
1980 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
1981 if (sz <= 0xffff) {
1982 /* We may be called with BH disabled */
1983 buf = kmalloc(sz, GFP_ATOMIC);
1984 if (buf) {
Jiri Pirko22bedad32010-04-01 21:22:57 +00001985 struct netdev_hw_addr *ha;
Jiri Pirko567ec872010-02-23 23:17:07 +00001986 int i = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001987
Jiri Pirko22bedad32010-04-01 21:22:57 +00001988 netdev_for_each_mc_addr(ha, netdev)
1989 memcpy(buf + i++ * ETH_ALEN, ha->addr,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001990 ETH_ALEN);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001991 }
1992 }
1993 return buf;
1994}
1995
1996
1997static void
1998vmxnet3_set_mc(struct net_device *netdev)
1999{
2000 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002001 unsigned long flags;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002002 struct Vmxnet3_RxFilterConf *rxConf =
2003 &adapter->shared->devRead.rxFilterConf;
2004 u8 *new_table = NULL;
2005 u32 new_mode = VMXNET3_RXM_UCAST;
2006
Jesse Gross72e85c42011-06-23 13:04:39 +00002007 if (netdev->flags & IFF_PROMISC) {
2008 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2009 memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
2010
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002011 new_mode |= VMXNET3_RXM_PROMISC;
Jesse Gross72e85c42011-06-23 13:04:39 +00002012 } else {
2013 vmxnet3_restore_vlan(adapter);
2014 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002015
2016 if (netdev->flags & IFF_BROADCAST)
2017 new_mode |= VMXNET3_RXM_BCAST;
2018
2019 if (netdev->flags & IFF_ALLMULTI)
2020 new_mode |= VMXNET3_RXM_ALL_MULTI;
2021 else
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002022 if (!netdev_mc_empty(netdev)) {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002023 new_table = vmxnet3_copy_mc(netdev);
2024 if (new_table) {
2025 new_mode |= VMXNET3_RXM_MCAST;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002026 rxConf->mfTableLen = cpu_to_le16(
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002027 netdev_mc_count(netdev) * ETH_ALEN);
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002028 rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
2029 new_table));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002030 } else {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002031 netdev_info(netdev, "failed to copy mcast list"
2032 ", setting ALL_MULTI\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002033 new_mode |= VMXNET3_RXM_ALL_MULTI;
2034 }
2035 }
2036
2037
2038 if (!(new_mode & VMXNET3_RXM_MCAST)) {
2039 rxConf->mfTableLen = 0;
2040 rxConf->mfTablePA = 0;
2041 }
2042
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002043 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002044 if (new_mode != rxConf->rxMode) {
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002045 rxConf->rxMode = cpu_to_le32(new_mode);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002046 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2047 VMXNET3_CMD_UPDATE_RX_MODE);
Jesse Gross72e85c42011-06-23 13:04:39 +00002048 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2049 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002050 }
2051
2052 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2053 VMXNET3_CMD_UPDATE_MAC_FILTERS);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002054 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002055
2056 kfree(new_table);
2057}
2058
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002059void
2060vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
2061{
2062 int i;
2063
2064 for (i = 0; i < adapter->num_rx_queues; i++)
2065 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
2066}
2067
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002068
2069/*
2070 * Set up driver_shared based on settings in adapter.
2071 */
2072
2073static void
2074vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2075{
2076 struct Vmxnet3_DriverShared *shared = adapter->shared;
2077 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
2078 struct Vmxnet3_TxQueueConf *tqc;
2079 struct Vmxnet3_RxQueueConf *rqc;
2080 int i;
2081
2082 memset(shared, 0, sizeof(*shared));
2083
2084 /* driver settings */
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002085 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
2086 devRead->misc.driverInfo.version = cpu_to_le32(
2087 VMXNET3_DRIVER_VERSION_NUM);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002088 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
2089 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
2090 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002091 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
2092 *((u32 *)&devRead->misc.driverInfo.gos));
2093 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2094 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002095
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002096 devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter));
2097 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002098
2099 /* set up feature flags */
Michał Mirosława0d27302011-04-18 13:31:21 +00002100 if (adapter->netdev->features & NETIF_F_RXCSUM)
Harvey Harrison3843e512010-10-21 18:05:32 +00002101 devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002102
Michał Mirosława0d27302011-04-18 13:31:21 +00002103 if (adapter->netdev->features & NETIF_F_LRO) {
Harvey Harrison3843e512010-10-21 18:05:32 +00002104 devRead->misc.uptFeatures |= UPT1_F_LRO;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002105 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002106 }
Shreyas Bhatewara54da3d02011-01-14 14:59:36 +00002107 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX)
Harvey Harrison3843e512010-10-21 18:05:32 +00002108 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002109
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002110 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2111 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
2112 devRead->misc.queueDescLen = cpu_to_le32(
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002113 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
2114 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002115
2116 /* tx queue settings */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002117 devRead->misc.numTxQueues = adapter->num_tx_queues;
2118 for (i = 0; i < adapter->num_tx_queues; i++) {
2119 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2120 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
2121 tqc = &adapter->tqd_start[i].conf;
2122 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
2123 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
2124 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
2125 tqc->ddPA = cpu_to_le64(virt_to_phys(tq->buf_info));
2126 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
2127 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
2128 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
2129 tqc->ddLen = cpu_to_le32(
2130 sizeof(struct vmxnet3_tx_buf_info) *
2131 tqc->txRingSize);
2132 tqc->intrIdx = tq->comp_ring.intr_idx;
2133 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002134
2135 /* rx queue settings */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002136 devRead->misc.numRxQueues = adapter->num_rx_queues;
2137 for (i = 0; i < adapter->num_rx_queues; i++) {
2138 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2139 rqc = &adapter->rqd_start[i].conf;
2140 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
2141 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
2142 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
2143 rqc->ddPA = cpu_to_le64(virt_to_phys(
2144 rq->buf_info));
2145 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
2146 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
2147 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
2148 rqc->ddLen = cpu_to_le32(
2149 sizeof(struct vmxnet3_rx_buf_info) *
2150 (rqc->rxRingSize[0] +
2151 rqc->rxRingSize[1]));
2152 rqc->intrIdx = rq->comp_ring.intr_idx;
2153 }
2154
2155#ifdef VMXNET3_RSS
2156 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
2157
2158 if (adapter->rss) {
2159 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
Stephen Hemminger66d35912013-01-15 07:28:34 +00002160 static const uint8_t rss_key[UPT1_RSS_MAX_KEY_SIZE] = {
2161 0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac,
2162 0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28,
2163 0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70,
2164 0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3,
2165 0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9,
2166 };
2167
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002168 devRead->misc.uptFeatures |= UPT1_F_RSS;
2169 devRead->misc.numRxQueues = adapter->num_rx_queues;
2170 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
2171 UPT1_RSS_HASH_TYPE_IPV4 |
2172 UPT1_RSS_HASH_TYPE_TCP_IPV6 |
2173 UPT1_RSS_HASH_TYPE_IPV6;
2174 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
2175 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
2176 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
Stephen Hemminger66d35912013-01-15 07:28:34 +00002177 memcpy(rssConf->hashKey, rss_key, sizeof(rss_key));
2178
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002179 for (i = 0; i < rssConf->indTableSize; i++)
Ben Hutchings278bc422011-12-15 13:56:49 +00002180 rssConf->indTable[i] = ethtool_rxfh_indir_default(
2181 i, adapter->num_rx_queues);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002182
2183 devRead->rssConfDesc.confVer = 1;
2184 devRead->rssConfDesc.confLen = sizeof(*rssConf);
2185 devRead->rssConfDesc.confPA = virt_to_phys(rssConf);
2186 }
2187
2188#endif /* VMXNET3_RSS */
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002189
2190 /* intr settings */
2191 devRead->intrConf.autoMask = adapter->intr.mask_mode ==
2192 VMXNET3_IMM_AUTO;
2193 devRead->intrConf.numIntrs = adapter->intr.num_intrs;
2194 for (i = 0; i < adapter->intr.num_intrs; i++)
2195 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
2196
2197 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
Ronghua Zang6929fe82010-07-15 22:18:47 -07002198 devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002199
2200 /* rx filter settings */
2201 devRead->rxFilterConf.rxMode = 0;
2202 vmxnet3_restore_vlan(adapter);
Shreyas Bhatewaraf9f25022011-01-14 14:59:31 +00002203 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2204
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002205 /* the rest are already zeroed */
2206}
2207
2208
2209int
2210vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2211{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002212 int err, i;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002213 u32 ret;
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002214 unsigned long flags;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002215
Stephen Hemmingerfdcd79b2013-01-15 07:28:29 +00002216 netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002217 " ring sizes %u %u %u\n", adapter->netdev->name,
2218 adapter->skb_buf_size, adapter->rx_buf_per_pkt,
2219 adapter->tx_queue[0].tx_ring.size,
2220 adapter->rx_queue[0].rx_ring[0].size,
2221 adapter->rx_queue[0].rx_ring[1].size);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002222
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002223 vmxnet3_tq_init_all(adapter);
2224 err = vmxnet3_rq_init_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002225 if (err) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002226 netdev_err(adapter->netdev,
2227 "Failed to init rx queue error %d\n", err);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002228 goto rq_err;
2229 }
2230
2231 err = vmxnet3_request_irqs(adapter);
2232 if (err) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002233 netdev_err(adapter->netdev,
2234 "Failed to setup irq for error %d\n", err);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002235 goto irq_err;
2236 }
2237
2238 vmxnet3_setup_driver_shared(adapter);
2239
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002240 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
2241 adapter->shared_pa));
2242 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
2243 adapter->shared_pa));
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002244 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002245 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2246 VMXNET3_CMD_ACTIVATE_DEV);
2247 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002248 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002249
2250 if (ret != 0) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002251 netdev_err(adapter->netdev,
2252 "Failed to activate dev: error %u\n", ret);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002253 err = -EINVAL;
2254 goto activate_err;
2255 }
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002256
2257 for (i = 0; i < adapter->num_rx_queues; i++) {
2258 VMXNET3_WRITE_BAR0_REG(adapter,
2259 VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
2260 adapter->rx_queue[i].rx_ring[0].next2fill);
2261 VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
2262 (i * VMXNET3_REG_ALIGN)),
2263 adapter->rx_queue[i].rx_ring[1].next2fill);
2264 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002265
2266 /* Apply the rx filter settins last. */
2267 vmxnet3_set_mc(adapter->netdev);
2268
2269 /*
2270 * Check link state when first activating device. It will start the
2271 * tx queue if the link is up.
2272 */
Shreyas Bhatewara4a1745fc2010-07-15 21:51:14 +00002273 vmxnet3_check_link(adapter, true);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002274 for (i = 0; i < adapter->num_rx_queues; i++)
2275 napi_enable(&adapter->rx_queue[i].napi);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002276 vmxnet3_enable_all_intrs(adapter);
2277 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2278 return 0;
2279
2280activate_err:
2281 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
2282 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
2283 vmxnet3_free_irqs(adapter);
2284irq_err:
2285rq_err:
2286 /* free up buffers we allocated */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002287 vmxnet3_rq_cleanup_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002288 return err;
2289}
2290
2291
2292void
2293vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
2294{
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002295 unsigned long flags;
2296 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002297 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002298 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002299}
2300
2301
2302int
2303vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
2304{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002305 int i;
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002306 unsigned long flags;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002307 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
2308 return 0;
2309
2310
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002311 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002312 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2313 VMXNET3_CMD_QUIESCE_DEV);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002314 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002315 vmxnet3_disable_all_intrs(adapter);
2316
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002317 for (i = 0; i < adapter->num_rx_queues; i++)
2318 napi_disable(&adapter->rx_queue[i].napi);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002319 netif_tx_disable(adapter->netdev);
2320 adapter->link_speed = 0;
2321 netif_carrier_off(adapter->netdev);
2322
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002323 vmxnet3_tq_cleanup_all(adapter);
2324 vmxnet3_rq_cleanup_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002325 vmxnet3_free_irqs(adapter);
2326 return 0;
2327}
2328
2329
2330static void
2331vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2332{
2333 u32 tmp;
2334
2335 tmp = *(u32 *)mac;
2336 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
2337
2338 tmp = (mac[5] << 8) | mac[4];
2339 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
2340}
2341
2342
2343static int
2344vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
2345{
2346 struct sockaddr *addr = p;
2347 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2348
2349 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2350 vmxnet3_write_mac_addr(adapter, addr->sa_data);
2351
2352 return 0;
2353}
2354
2355
2356/* ==================== initialization and cleanup routines ============ */
2357
2358static int
2359vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
2360{
2361 int err;
2362 unsigned long mmio_start, mmio_len;
2363 struct pci_dev *pdev = adapter->pdev;
2364
2365 err = pci_enable_device(pdev);
2366 if (err) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002367 dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002368 return err;
2369 }
2370
2371 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
2372 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002373 dev_err(&pdev->dev,
2374 "pci_set_consistent_dma_mask failed\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002375 err = -EIO;
2376 goto err_set_mask;
2377 }
2378 *dma64 = true;
2379 } else {
2380 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002381 dev_err(&pdev->dev,
2382 "pci_set_dma_mask failed\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002383 err = -EIO;
2384 goto err_set_mask;
2385 }
2386 *dma64 = false;
2387 }
2388
2389 err = pci_request_selected_regions(pdev, (1 << 2) - 1,
2390 vmxnet3_driver_name);
2391 if (err) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002392 dev_err(&pdev->dev,
2393 "Failed to request region for adapter: error %d\n", err);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002394 goto err_set_mask;
2395 }
2396
2397 pci_set_master(pdev);
2398
2399 mmio_start = pci_resource_start(pdev, 0);
2400 mmio_len = pci_resource_len(pdev, 0);
2401 adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
2402 if (!adapter->hw_addr0) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002403 dev_err(&pdev->dev, "Failed to map bar0\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002404 err = -EIO;
2405 goto err_ioremap;
2406 }
2407
2408 mmio_start = pci_resource_start(pdev, 1);
2409 mmio_len = pci_resource_len(pdev, 1);
2410 adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
2411 if (!adapter->hw_addr1) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002412 dev_err(&pdev->dev, "Failed to map bar1\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002413 err = -EIO;
2414 goto err_bar1;
2415 }
2416 return 0;
2417
2418err_bar1:
2419 iounmap(adapter->hw_addr0);
2420err_ioremap:
2421 pci_release_selected_regions(pdev, (1 << 2) - 1);
2422err_set_mask:
2423 pci_disable_device(pdev);
2424 return err;
2425}
2426
2427
2428static void
2429vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
2430{
2431 BUG_ON(!adapter->pdev);
2432
2433 iounmap(adapter->hw_addr0);
2434 iounmap(adapter->hw_addr1);
2435 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
2436 pci_disable_device(adapter->pdev);
2437}
2438
2439
2440static void
2441vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2442{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002443 size_t sz, i, ring0_size, ring1_size, comp_size;
2444 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0];
2445
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002446
2447 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
2448 VMXNET3_MAX_ETH_HDR_SIZE) {
2449 adapter->skb_buf_size = adapter->netdev->mtu +
2450 VMXNET3_MAX_ETH_HDR_SIZE;
2451 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
2452 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
2453
2454 adapter->rx_buf_per_pkt = 1;
2455 } else {
2456 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
2457 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
2458 VMXNET3_MAX_ETH_HDR_SIZE;
2459 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
2460 }
2461
2462 /*
2463 * for simplicity, force the ring0 size to be a multiple of
2464 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2465 */
2466 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002467 ring0_size = adapter->rx_queue[0].rx_ring[0].size;
2468 ring0_size = (ring0_size + sz - 1) / sz * sz;
Shreyas Bhatewaraa53255d2011-01-14 14:59:25 +00002469 ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002470 sz * sz);
2471 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
2472 comp_size = ring0_size + ring1_size;
2473
2474 for (i = 0; i < adapter->num_rx_queues; i++) {
2475 rq = &adapter->rx_queue[i];
2476 rq->rx_ring[0].size = ring0_size;
2477 rq->rx_ring[1].size = ring1_size;
2478 rq->comp_ring.size = comp_size;
2479 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002480}
2481
2482
2483int
2484vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
2485 u32 rx_ring_size, u32 rx_ring2_size)
2486{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002487 int err = 0, i;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002488
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002489 for (i = 0; i < adapter->num_tx_queues; i++) {
2490 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2491 tq->tx_ring.size = tx_ring_size;
2492 tq->data_ring.size = tx_ring_size;
2493 tq->comp_ring.size = tx_ring_size;
2494 tq->shared = &adapter->tqd_start[i].ctrl;
2495 tq->stopped = true;
2496 tq->adapter = adapter;
2497 tq->qid = i;
2498 err = vmxnet3_tq_create(tq, adapter);
2499 /*
2500 * Too late to change num_tx_queues. We cannot do away with
2501 * lesser number of queues than what we asked for
2502 */
2503 if (err)
2504 goto queue_err;
2505 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002506
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002507 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
2508 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002509 vmxnet3_adjust_rx_ring_size(adapter);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002510 for (i = 0; i < adapter->num_rx_queues; i++) {
2511 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2512 /* qid and qid2 for rx queues will be assigned later when num
2513 * of rx queues is finalized after allocating intrs */
2514 rq->shared = &adapter->rqd_start[i].ctrl;
2515 rq->adapter = adapter;
2516 err = vmxnet3_rq_create(rq, adapter);
2517 if (err) {
2518 if (i == 0) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002519 netdev_err(adapter->netdev,
2520 "Could not allocate any rx queues. "
2521 "Aborting.\n");
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002522 goto queue_err;
2523 } else {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002524 netdev_info(adapter->netdev,
2525 "Number of rx queues changed "
2526 "to : %d.\n", i);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002527 adapter->num_rx_queues = i;
2528 err = 0;
2529 break;
2530 }
2531 }
2532 }
2533 return err;
2534queue_err:
2535 vmxnet3_tq_destroy_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002536 return err;
2537}
2538
2539static int
2540vmxnet3_open(struct net_device *netdev)
2541{
2542 struct vmxnet3_adapter *adapter;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002543 int err, i;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002544
2545 adapter = netdev_priv(netdev);
2546
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002547 for (i = 0; i < adapter->num_tx_queues; i++)
2548 spin_lock_init(&adapter->tx_queue[i].tx_lock);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002549
2550 err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
2551 VMXNET3_DEF_RX_RING_SIZE,
2552 VMXNET3_DEF_RX_RING_SIZE);
2553 if (err)
2554 goto queue_err;
2555
2556 err = vmxnet3_activate_dev(adapter);
2557 if (err)
2558 goto activate_err;
2559
2560 return 0;
2561
2562activate_err:
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002563 vmxnet3_rq_destroy_all(adapter);
2564 vmxnet3_tq_destroy_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002565queue_err:
2566 return err;
2567}
2568
2569
2570static int
2571vmxnet3_close(struct net_device *netdev)
2572{
2573 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2574
2575 /*
2576 * Reset_work may be in the middle of resetting the device, wait for its
2577 * completion.
2578 */
2579 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2580 msleep(1);
2581
2582 vmxnet3_quiesce_dev(adapter);
2583
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002584 vmxnet3_rq_destroy_all(adapter);
2585 vmxnet3_tq_destroy_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002586
2587 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2588
2589
2590 return 0;
2591}
2592
2593
2594void
2595vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2596{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002597 int i;
2598
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002599 /*
2600 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2601 * vmxnet3_close() will deadlock.
2602 */
2603 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
2604
2605 /* we need to enable NAPI, otherwise dev_close will deadlock */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002606 for (i = 0; i < adapter->num_rx_queues; i++)
2607 napi_enable(&adapter->rx_queue[i].napi);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002608 dev_close(adapter->netdev);
2609}
2610
2611
2612static int
2613vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
2614{
2615 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2616 int err = 0;
2617
2618 if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
2619 return -EINVAL;
2620
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002621 netdev->mtu = new_mtu;
2622
2623 /*
2624 * Reset_work may be in the middle of resetting the device, wait for its
2625 * completion.
2626 */
2627 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2628 msleep(1);
2629
2630 if (netif_running(netdev)) {
2631 vmxnet3_quiesce_dev(adapter);
2632 vmxnet3_reset_dev(adapter);
2633
2634 /* we need to re-create the rx queue based on the new mtu */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002635 vmxnet3_rq_destroy_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002636 vmxnet3_adjust_rx_ring_size(adapter);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002637 err = vmxnet3_rq_create_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002638 if (err) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002639 netdev_err(netdev,
2640 "failed to re-create rx queues, "
2641 " error %d. Closing it.\n", err);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002642 goto out;
2643 }
2644
2645 err = vmxnet3_activate_dev(adapter);
2646 if (err) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002647 netdev_err(netdev,
2648 "failed to re-activate, error %d. "
2649 "Closing it\n", err);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002650 goto out;
2651 }
2652 }
2653
2654out:
2655 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2656 if (err)
2657 vmxnet3_force_close(adapter);
2658
2659 return err;
2660}
2661
2662
2663static void
2664vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
2665{
2666 struct net_device *netdev = adapter->netdev;
2667
Michał Mirosława0d27302011-04-18 13:31:21 +00002668 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
2669 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX |
Jesse Gross72e85c42011-06-23 13:04:39 +00002670 NETIF_F_HW_VLAN_RX | NETIF_F_TSO | NETIF_F_TSO6 |
2671 NETIF_F_LRO;
Michał Mirosława0d27302011-04-18 13:31:21 +00002672 if (dma64)
Shreyas Bhatewaraebbf9292011-07-20 17:21:51 +00002673 netdev->hw_features |= NETIF_F_HIGHDMA;
Jesse Gross72e85c42011-06-23 13:04:39 +00002674 netdev->vlan_features = netdev->hw_features &
2675 ~(NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
2676 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_FILTER;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002677}
2678
2679
2680static void
2681vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2682{
2683 u32 tmp;
2684
2685 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
2686 *(u32 *)mac = tmp;
2687
2688 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
2689 mac[4] = tmp & 0xff;
2690 mac[5] = (tmp >> 8) & 0xff;
2691}
2692
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002693#ifdef CONFIG_PCI_MSI
2694
2695/*
2696 * Enable MSIx vectors.
2697 * Returns :
2698 * 0 on successful enabling of required vectors,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002699 * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002700 * could be enabled.
2701 * number of vectors which can be enabled otherwise (this number is smaller
2702 * than VMXNET3_LINUX_MIN_MSIX_VECT)
2703 */
2704
2705static int
2706vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
2707 int vectors)
2708{
2709 int err = 0, vector_threshold;
2710 vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT;
2711
2712 while (vectors >= vector_threshold) {
2713 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
2714 vectors);
2715 if (!err) {
2716 adapter->intr.num_intrs = vectors;
2717 return 0;
2718 } else if (err < 0) {
Stephen Hemminger4bad25f2013-01-15 07:28:28 +00002719 dev_err(&adapter->netdev->dev,
Shreyas Bhatewara4c1dc802012-02-28 22:08:39 +00002720 "Failed to enable MSI-X, error: %d\n", err);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002721 vectors = 0;
2722 } else if (err < vector_threshold) {
2723 break;
2724 } else {
2725 /* If fails to enable required number of MSI-x vectors
Shreyas Bhatewara7e96fbf2011-01-14 15:00:03 +00002726 * try enabling minimum number of vectors required.
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002727 */
Stephen Hemminger4bad25f2013-01-15 07:28:28 +00002728 dev_err(&adapter->netdev->dev,
2729 "Failed to enable %d MSI-X, trying %d instead\n",
Shreyas Bhatewara4c1dc802012-02-28 22:08:39 +00002730 vectors, vector_threshold);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002731 vectors = vector_threshold;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002732 }
2733 }
2734
Stephen Hemminger4bad25f2013-01-15 07:28:28 +00002735 dev_info(&adapter->pdev->dev,
2736 "Number of MSI-X interrupts which can be allocated "
2737 "is lower than min threshold required.\n");
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002738 return err;
2739}
2740
2741
2742#endif /* CONFIG_PCI_MSI */
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002743
2744static void
2745vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2746{
2747 u32 cfg;
Roland Dreiere328d412011-05-06 08:32:53 +00002748 unsigned long flags;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002749
2750 /* intr settings */
Roland Dreiere328d412011-05-06 08:32:53 +00002751 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002752 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2753 VMXNET3_CMD_GET_CONF_INTR);
2754 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
Roland Dreiere328d412011-05-06 08:32:53 +00002755 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002756 adapter->intr.type = cfg & 0x3;
2757 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
2758
2759 if (adapter->intr.type == VMXNET3_IT_AUTO) {
Shreyas Bhatewara0bdc0d72010-07-15 15:21:27 +00002760 adapter->intr.type = VMXNET3_IT_MSIX;
2761 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002762
Randy Dunlap8f7e5242009-10-14 20:38:58 -07002763#ifdef CONFIG_PCI_MSI
Shreyas Bhatewara0bdc0d72010-07-15 15:21:27 +00002764 if (adapter->intr.type == VMXNET3_IT_MSIX) {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002765 int vector, err = 0;
Shreyas Bhatewara0bdc0d72010-07-15 15:21:27 +00002766
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002767 adapter->intr.num_intrs = (adapter->share_intr ==
2768 VMXNET3_INTR_TXSHARE) ? 1 :
2769 adapter->num_tx_queues;
2770 adapter->intr.num_intrs += (adapter->share_intr ==
2771 VMXNET3_INTR_BUDDYSHARE) ? 0 :
2772 adapter->num_rx_queues;
2773 adapter->intr.num_intrs += 1; /* for link event */
2774
2775 adapter->intr.num_intrs = (adapter->intr.num_intrs >
2776 VMXNET3_LINUX_MIN_MSIX_VECT
2777 ? adapter->intr.num_intrs :
2778 VMXNET3_LINUX_MIN_MSIX_VECT);
2779
2780 for (vector = 0; vector < adapter->intr.num_intrs; vector++)
2781 adapter->intr.msix_entries[vector].entry = vector;
2782
2783 err = vmxnet3_acquire_msix_vectors(adapter,
2784 adapter->intr.num_intrs);
2785 /* If we cannot allocate one MSIx vector per queue
2786 * then limit the number of rx queues to 1
2787 */
2788 if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
2789 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
Shreyas Bhatewara7e96fbf2011-01-14 15:00:03 +00002790 || adapter->num_rx_queues != 1) {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002791 adapter->share_intr = VMXNET3_INTR_TXSHARE;
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002792 netdev_err(adapter->netdev,
2793 "Number of rx queues : 1\n");
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002794 adapter->num_rx_queues = 1;
2795 adapter->intr.num_intrs =
2796 VMXNET3_LINUX_MIN_MSIX_VECT;
2797 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002798 return;
2799 }
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002800 if (!err)
2801 return;
2802
2803 /* If we cannot allocate MSIx vectors use only one rx queue */
Stephen Hemminger4bad25f2013-01-15 07:28:28 +00002804 dev_info(&adapter->pdev->dev,
2805 "Failed to enable MSI-X, error %d. "
2806 "Limiting #rx queues to 1, try MSI.\n", err);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002807
Shreyas Bhatewara0bdc0d72010-07-15 15:21:27 +00002808 adapter->intr.type = VMXNET3_IT_MSI;
2809 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002810
Shreyas Bhatewara0bdc0d72010-07-15 15:21:27 +00002811 if (adapter->intr.type == VMXNET3_IT_MSI) {
2812 int err;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002813 err = pci_enable_msi(adapter->pdev);
2814 if (!err) {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002815 adapter->num_rx_queues = 1;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002816 adapter->intr.num_intrs = 1;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002817 return;
2818 }
2819 }
Shreyas Bhatewara0bdc0d72010-07-15 15:21:27 +00002820#endif /* CONFIG_PCI_MSI */
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002821
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002822 adapter->num_rx_queues = 1;
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002823 dev_info(&adapter->netdev->dev,
2824 "Using INTx interrupt, #Rx queues: 1.\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002825 adapter->intr.type = VMXNET3_IT_INTX;
2826
2827 /* INT-X related setting */
2828 adapter->intr.num_intrs = 1;
2829}
2830
2831
2832static void
2833vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
2834{
2835 if (adapter->intr.type == VMXNET3_IT_MSIX)
2836 pci_disable_msix(adapter->pdev);
2837 else if (adapter->intr.type == VMXNET3_IT_MSI)
2838 pci_disable_msi(adapter->pdev);
2839 else
2840 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
2841}
2842
2843
2844static void
2845vmxnet3_tx_timeout(struct net_device *netdev)
2846{
2847 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2848 adapter->tx_timeout_count++;
2849
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002850 netdev_err(adapter->netdev, "tx hang\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002851 schedule_work(&adapter->work);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002852 netif_wake_queue(adapter->netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002853}
2854
2855
2856static void
2857vmxnet3_reset_work(struct work_struct *data)
2858{
2859 struct vmxnet3_adapter *adapter;
2860
2861 adapter = container_of(data, struct vmxnet3_adapter, work);
2862
2863 /* if another thread is resetting the device, no need to proceed */
2864 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2865 return;
2866
2867 /* if the device is closed, we must leave it alone */
Shreyas Bhatewarad9a5f212010-07-19 07:02:13 +00002868 rtnl_lock();
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002869 if (netif_running(adapter->netdev)) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002870 netdev_notice(adapter->netdev, "resetting\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002871 vmxnet3_quiesce_dev(adapter);
2872 vmxnet3_reset_dev(adapter);
2873 vmxnet3_activate_dev(adapter);
2874 } else {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002875 netdev_info(adapter->netdev, "already closed\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002876 }
Shreyas Bhatewarad9a5f212010-07-19 07:02:13 +00002877 rtnl_unlock();
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002878
2879 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2880}
2881
2882
Bill Pemberton3a4751a2012-12-03 09:24:16 -05002883static int
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002884vmxnet3_probe_device(struct pci_dev *pdev,
2885 const struct pci_device_id *id)
2886{
2887 static const struct net_device_ops vmxnet3_netdev_ops = {
2888 .ndo_open = vmxnet3_open,
2889 .ndo_stop = vmxnet3_close,
2890 .ndo_start_xmit = vmxnet3_xmit_frame,
2891 .ndo_set_mac_address = vmxnet3_set_mac_addr,
2892 .ndo_change_mtu = vmxnet3_change_mtu,
Michał Mirosława0d27302011-04-18 13:31:21 +00002893 .ndo_set_features = vmxnet3_set_features,
stephen hemminger95305f62011-06-08 14:53:57 +00002894 .ndo_get_stats64 = vmxnet3_get_stats64,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002895 .ndo_tx_timeout = vmxnet3_tx_timeout,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00002896 .ndo_set_rx_mode = vmxnet3_set_mc,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002897 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
2898 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
2899#ifdef CONFIG_NET_POLL_CONTROLLER
2900 .ndo_poll_controller = vmxnet3_netpoll,
2901#endif
2902 };
2903 int err;
2904 bool dma64 = false; /* stupid gcc */
2905 u32 ver;
2906 struct net_device *netdev;
2907 struct vmxnet3_adapter *adapter;
2908 u8 mac[ETH_ALEN];
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002909 int size;
2910 int num_tx_queues;
2911 int num_rx_queues;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002912
Shreyas Bhatewarae154b632011-05-10 06:13:56 +00002913 if (!pci_msi_enabled())
2914 enable_mq = 0;
2915
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002916#ifdef VMXNET3_RSS
2917 if (enable_mq)
2918 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
2919 (int)num_online_cpus());
2920 else
2921#endif
2922 num_rx_queues = 1;
Shreyas Bhatewaraeebb02b2011-07-07 00:25:52 -07002923 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002924
2925 if (enable_mq)
2926 num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
2927 (int)num_online_cpus());
2928 else
2929 num_tx_queues = 1;
2930
Shreyas Bhatewaraeebb02b2011-07-07 00:25:52 -07002931 num_tx_queues = rounddown_pow_of_two(num_tx_queues);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002932 netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
2933 max(num_tx_queues, num_rx_queues));
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002934 dev_info(&pdev->dev,
2935 "# of Tx queues : %d, # of Rx queues : %d\n",
2936 num_tx_queues, num_rx_queues);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002937
Joe Perches41de8d42012-01-29 13:47:52 +00002938 if (!netdev)
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002939 return -ENOMEM;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002940
2941 pci_set_drvdata(pdev, netdev);
2942 adapter = netdev_priv(netdev);
2943 adapter->netdev = netdev;
2944 adapter->pdev = pdev;
2945
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002946 spin_lock_init(&adapter->cmd_lock);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002947 adapter->shared = pci_alloc_consistent(adapter->pdev,
stephen hemminger96800ee2012-11-13 13:53:28 +00002948 sizeof(struct Vmxnet3_DriverShared),
2949 &adapter->shared_pa);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002950 if (!adapter->shared) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002951 dev_err(&pdev->dev, "Failed to allocate memory\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002952 err = -ENOMEM;
2953 goto err_alloc_shared;
2954 }
2955
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002956 adapter->num_rx_queues = num_rx_queues;
2957 adapter->num_tx_queues = num_tx_queues;
2958
2959 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
2960 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
2961 adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size,
stephen hemminger96800ee2012-11-13 13:53:28 +00002962 &adapter->queue_desc_pa);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002963
2964 if (!adapter->tqd_start) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002965 dev_err(&pdev->dev, "Failed to allocate memory\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002966 err = -ENOMEM;
2967 goto err_alloc_queue_desc;
2968 }
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002969 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
stephen hemminger96800ee2012-11-13 13:53:28 +00002970 adapter->num_tx_queues);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002971
2972 adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
2973 if (adapter->pm_conf == NULL) {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002974 err = -ENOMEM;
2975 goto err_alloc_pm;
2976 }
2977
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002978#ifdef VMXNET3_RSS
2979
2980 adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL);
2981 if (adapter->rss_conf == NULL) {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002982 err = -ENOMEM;
2983 goto err_alloc_rss;
2984 }
2985#endif /* VMXNET3_RSS */
2986
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002987 err = vmxnet3_alloc_pci_resources(adapter, &dma64);
2988 if (err < 0)
2989 goto err_alloc_pci;
2990
2991 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
2992 if (ver & 1) {
2993 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
2994 } else {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002995 dev_err(&pdev->dev,
2996 "Incompatible h/w version (0x%x) for adapter\n", ver);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002997 err = -EBUSY;
2998 goto err_ver;
2999 }
3000
3001 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
3002 if (ver & 1) {
3003 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
3004 } else {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00003005 dev_err(&pdev->dev,
3006 "Incompatible upt version (0x%x) for adapter\n", ver);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003007 err = -EBUSY;
3008 goto err_ver;
3009 }
3010
Shreyas Bhatewarae101e7d2011-07-20 16:01:11 +00003011 SET_NETDEV_DEV(netdev, &pdev->dev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003012 vmxnet3_declare_features(adapter, dma64);
3013
Stephen Hemminger4db37a72013-01-15 07:28:33 +00003014 if (adapter->num_tx_queues == adapter->num_rx_queues)
3015 adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
3016 else
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003017 adapter->share_intr = VMXNET3_INTR_DONTSHARE;
3018
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003019 vmxnet3_alloc_intr_resources(adapter);
3020
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003021#ifdef VMXNET3_RSS
3022 if (adapter->num_rx_queues > 1 &&
3023 adapter->intr.type == VMXNET3_IT_MSIX) {
3024 adapter->rss = true;
Stephen Hemminger204a6e62013-01-15 07:28:30 +00003025 dev_dbg(&pdev->dev, "RSS is enabled.\n");
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003026 } else {
3027 adapter->rss = false;
3028 }
3029#endif
3030
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003031 vmxnet3_read_mac_addr(adapter, mac);
3032 memcpy(netdev->dev_addr, mac, netdev->addr_len);
3033
3034 netdev->netdev_ops = &vmxnet3_netdev_ops;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003035 vmxnet3_set_ethtool_ops(netdev);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003036 netdev->watchdog_timeo = 5 * HZ;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003037
3038 INIT_WORK(&adapter->work, vmxnet3_reset_work);
Steve Hodgsone3bc4ff2012-08-14 17:13:36 +01003039 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003040
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003041 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3042 int i;
3043 for (i = 0; i < adapter->num_rx_queues; i++) {
3044 netif_napi_add(adapter->netdev,
3045 &adapter->rx_queue[i].napi,
3046 vmxnet3_poll_rx_only, 64);
3047 }
3048 } else {
3049 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
3050 vmxnet3_poll, 64);
3051 }
3052
3053 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
3054 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
3055
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003056 err = register_netdev(netdev);
3057
3058 if (err) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00003059 dev_err(&pdev->dev, "Failed to register adapter\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003060 goto err_register;
3061 }
3062
Shreyas Bhatewara4a1745fc2010-07-15 21:51:14 +00003063 vmxnet3_check_link(adapter, false);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003064 return 0;
3065
3066err_register:
3067 vmxnet3_free_intr_resources(adapter);
3068err_ver:
3069 vmxnet3_free_pci_resources(adapter);
3070err_alloc_pci:
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003071#ifdef VMXNET3_RSS
3072 kfree(adapter->rss_conf);
3073err_alloc_rss:
3074#endif
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003075 kfree(adapter->pm_conf);
3076err_alloc_pm:
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003077 pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
3078 adapter->queue_desc_pa);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003079err_alloc_queue_desc:
3080 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
3081 adapter->shared, adapter->shared_pa);
3082err_alloc_shared:
3083 pci_set_drvdata(pdev, NULL);
3084 free_netdev(netdev);
3085 return err;
3086}
3087
3088
Bill Pemberton3a4751a2012-12-03 09:24:16 -05003089static void
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003090vmxnet3_remove_device(struct pci_dev *pdev)
3091{
3092 struct net_device *netdev = pci_get_drvdata(pdev);
3093 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003094 int size = 0;
3095 int num_rx_queues;
3096
3097#ifdef VMXNET3_RSS
3098 if (enable_mq)
3099 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3100 (int)num_online_cpus());
3101 else
3102#endif
3103 num_rx_queues = 1;
Shreyas Bhatewaraeebb02b2011-07-07 00:25:52 -07003104 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003105
Tejun Heo23f333a2010-12-12 16:45:14 +01003106 cancel_work_sync(&adapter->work);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003107
3108 unregister_netdev(netdev);
3109
3110 vmxnet3_free_intr_resources(adapter);
3111 vmxnet3_free_pci_resources(adapter);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003112#ifdef VMXNET3_RSS
3113 kfree(adapter->rss_conf);
3114#endif
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003115 kfree(adapter->pm_conf);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003116
3117 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3118 size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
3119 pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
3120 adapter->queue_desc_pa);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003121 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
3122 adapter->shared, adapter->shared_pa);
3123 free_netdev(netdev);
3124}
3125
3126
3127#ifdef CONFIG_PM
3128
3129static int
3130vmxnet3_suspend(struct device *device)
3131{
3132 struct pci_dev *pdev = to_pci_dev(device);
3133 struct net_device *netdev = pci_get_drvdata(pdev);
3134 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3135 struct Vmxnet3_PMConf *pmConf;
3136 struct ethhdr *ehdr;
3137 struct arphdr *ahdr;
3138 u8 *arpreq;
3139 struct in_device *in_dev;
3140 struct in_ifaddr *ifa;
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00003141 unsigned long flags;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003142 int i = 0;
3143
3144 if (!netif_running(netdev))
3145 return 0;
3146
Shreyas Bhatewara51956cd2011-01-14 14:59:52 +00003147 for (i = 0; i < adapter->num_rx_queues; i++)
3148 napi_disable(&adapter->rx_queue[i].napi);
3149
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003150 vmxnet3_disable_all_intrs(adapter);
3151 vmxnet3_free_irqs(adapter);
3152 vmxnet3_free_intr_resources(adapter);
3153
3154 netif_device_detach(netdev);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003155 netif_tx_stop_all_queues(netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003156
3157 /* Create wake-up filters. */
3158 pmConf = adapter->pm_conf;
3159 memset(pmConf, 0, sizeof(*pmConf));
3160
3161 if (adapter->wol & WAKE_UCAST) {
3162 pmConf->filters[i].patternSize = ETH_ALEN;
3163 pmConf->filters[i].maskSize = 1;
3164 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
3165 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
3166
Harvey Harrison3843e512010-10-21 18:05:32 +00003167 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003168 i++;
3169 }
3170
3171 if (adapter->wol & WAKE_ARP) {
3172 in_dev = in_dev_get(netdev);
3173 if (!in_dev)
3174 goto skip_arp;
3175
3176 ifa = (struct in_ifaddr *)in_dev->ifa_list;
3177 if (!ifa)
3178 goto skip_arp;
3179
3180 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
3181 sizeof(struct arphdr) + /* ARP header */
3182 2 * ETH_ALEN + /* 2 Ethernet addresses*/
3183 2 * sizeof(u32); /*2 IPv4 addresses */
3184 pmConf->filters[i].maskSize =
3185 (pmConf->filters[i].patternSize - 1) / 8 + 1;
3186
3187 /* ETH_P_ARP in Ethernet header. */
3188 ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
3189 ehdr->h_proto = htons(ETH_P_ARP);
3190
3191 /* ARPOP_REQUEST in ARP header. */
3192 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
3193 ahdr->ar_op = htons(ARPOP_REQUEST);
3194 arpreq = (u8 *)(ahdr + 1);
3195
3196 /* The Unicast IPv4 address in 'tip' field. */
3197 arpreq += 2 * ETH_ALEN + sizeof(u32);
3198 *(u32 *)arpreq = ifa->ifa_address;
3199
3200 /* The mask for the relevant bits. */
3201 pmConf->filters[i].mask[0] = 0x00;
3202 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
3203 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
3204 pmConf->filters[i].mask[3] = 0x00;
3205 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
3206 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
3207 in_dev_put(in_dev);
3208
Harvey Harrison3843e512010-10-21 18:05:32 +00003209 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003210 i++;
3211 }
3212
3213skip_arp:
3214 if (adapter->wol & WAKE_MAGIC)
Harvey Harrison3843e512010-10-21 18:05:32 +00003215 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003216
3217 pmConf->numFilters = i;
3218
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00003219 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3220 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3221 *pmConf));
3222 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
3223 pmConf));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003224
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00003225 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003226 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3227 VMXNET3_CMD_UPDATE_PMCFG);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00003228 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003229
3230 pci_save_state(pdev);
3231 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
3232 adapter->wol);
3233 pci_disable_device(pdev);
3234 pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
3235
3236 return 0;
3237}
3238
3239
3240static int
3241vmxnet3_resume(struct device *device)
3242{
Shreyas Bhatewara51956cd2011-01-14 14:59:52 +00003243 int err, i = 0;
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00003244 unsigned long flags;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003245 struct pci_dev *pdev = to_pci_dev(device);
3246 struct net_device *netdev = pci_get_drvdata(pdev);
3247 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3248 struct Vmxnet3_PMConf *pmConf;
3249
3250 if (!netif_running(netdev))
3251 return 0;
3252
3253 /* Destroy wake-up filters. */
3254 pmConf = adapter->pm_conf;
3255 memset(pmConf, 0, sizeof(*pmConf));
3256
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00003257 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3258 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3259 *pmConf));
Harvey Harrison0561cf32010-10-21 18:05:34 +00003260 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00003261 pmConf));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003262
3263 netif_device_attach(netdev);
3264 pci_set_power_state(pdev, PCI_D0);
3265 pci_restore_state(pdev);
3266 err = pci_enable_device_mem(pdev);
3267 if (err != 0)
3268 return err;
3269
3270 pci_enable_wake(pdev, PCI_D0, 0);
3271
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00003272 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003273 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3274 VMXNET3_CMD_UPDATE_PMCFG);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00003275 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003276 vmxnet3_alloc_intr_resources(adapter);
3277 vmxnet3_request_irqs(adapter);
Shreyas Bhatewara51956cd2011-01-14 14:59:52 +00003278 for (i = 0; i < adapter->num_rx_queues; i++)
3279 napi_enable(&adapter->rx_queue[i].napi);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003280 vmxnet3_enable_all_intrs(adapter);
3281
3282 return 0;
3283}
3284
Alexey Dobriyan47145212009-12-14 18:00:08 -08003285static const struct dev_pm_ops vmxnet3_pm_ops = {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003286 .suspend = vmxnet3_suspend,
3287 .resume = vmxnet3_resume,
3288};
3289#endif
3290
3291static struct pci_driver vmxnet3_driver = {
3292 .name = vmxnet3_driver_name,
3293 .id_table = vmxnet3_pciid_table,
3294 .probe = vmxnet3_probe_device,
Bill Pemberton3a4751a2012-12-03 09:24:16 -05003295 .remove = vmxnet3_remove_device,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003296#ifdef CONFIG_PM
3297 .driver.pm = &vmxnet3_pm_ops,
3298#endif
3299};
3300
3301
3302static int __init
3303vmxnet3_init_module(void)
3304{
Stephen Hemminger204a6e62013-01-15 07:28:30 +00003305 pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003306 VMXNET3_DRIVER_VERSION_REPORT);
3307 return pci_register_driver(&vmxnet3_driver);
3308}
3309
3310module_init(vmxnet3_init_module);
3311
3312
3313static void
3314vmxnet3_exit_module(void)
3315{
3316 pci_unregister_driver(&vmxnet3_driver);
3317}
3318
3319module_exit(vmxnet3_exit_module);
3320
3321MODULE_AUTHOR("VMware, Inc.");
3322MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
3323MODULE_LICENSE("GPL v2");
3324MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);