blob: 7e2788c488edce4536fa0039337b5b267d2dc270 [file] [log] [blame]
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 *
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 *
23 * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
24 *
25 */
26
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040027#include <linux/module.h>
Stephen Rothwellb038b042009-11-17 23:04:59 -080028#include <net/ip6_checksum.h>
29
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070030#include "vmxnet3_int.h"
31
32char vmxnet3_driver_name[] = "vmxnet3";
33#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
34
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070035/*
36 * PCI Device ID Table
37 * Last entry must be all 0s
38 */
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000039static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070040 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
41 {0}
42};
43
44MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
45
Shreyas Bhatewara09c50882010-11-19 10:55:24 +000046static int enable_mq = 1;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070047
Shreyas Bhatewaraf9f25022011-01-14 14:59:31 +000048static void
49vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
50
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070051/*
52 * Enable/Disable the given intr
53 */
54static void
55vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
56{
57 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
58}
59
60
61static void
62vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
63{
64 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
65}
66
67
68/*
69 * Enable/Disable all intrs used by the device
70 */
71static void
72vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
73{
74 int i;
75
76 for (i = 0; i < adapter->intr.num_intrs; i++)
77 vmxnet3_enable_intr(adapter, i);
Ronghua Zang6929fe82010-07-15 22:18:47 -070078 adapter->shared->devRead.intrConf.intrCtrl &=
79 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070080}
81
82
83static void
84vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
85{
86 int i;
87
Ronghua Zang6929fe82010-07-15 22:18:47 -070088 adapter->shared->devRead.intrConf.intrCtrl |=
89 cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -070090 for (i = 0; i < adapter->intr.num_intrs; i++)
91 vmxnet3_disable_intr(adapter, i);
92}
93
94
95static void
96vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
97{
98 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
99}
100
101
102static bool
103vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
104{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000105 return tq->stopped;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700106}
107
108
109static void
110vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
111{
112 tq->stopped = false;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000113 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700114}
115
116
117static void
118vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
119{
120 tq->stopped = false;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000121 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700122}
123
124
125static void
126vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
127{
128 tq->stopped = true;
129 tq->num_stop++;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000130 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700131}
132
133
134/*
135 * Check the link state. This may start or stop the tx queue.
136 */
137static void
Shreyas Bhatewara4a1745fc2010-07-15 21:51:14 +0000138vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700139{
140 u32 ret;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000141 int i;
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +0000142 unsigned long flags;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700143
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +0000144 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700145 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
146 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +0000147 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
148
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700149 adapter->link_speed = ret >> 16;
150 if (ret & 1) { /* Link is up. */
Stephen Hemminger204a6e62013-01-15 07:28:30 +0000151 netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
152 adapter->link_speed);
Neil Horman6cdd20c2013-01-29 16:15:45 -0500153 netif_carrier_on(adapter->netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700154
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000155 if (affectTxQueue) {
156 for (i = 0; i < adapter->num_tx_queues; i++)
157 vmxnet3_tq_start(&adapter->tx_queue[i],
158 adapter);
159 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700160 } else {
Stephen Hemminger204a6e62013-01-15 07:28:30 +0000161 netdev_info(adapter->netdev, "NIC Link is Down\n");
Neil Horman6cdd20c2013-01-29 16:15:45 -0500162 netif_carrier_off(adapter->netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700163
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000164 if (affectTxQueue) {
165 for (i = 0; i < adapter->num_tx_queues; i++)
166 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
167 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700168 }
169}
170
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700171static void
172vmxnet3_process_events(struct vmxnet3_adapter *adapter)
173{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000174 int i;
Roland Dreiere328d412011-05-06 08:32:53 +0000175 unsigned long flags;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000176 u32 events = le32_to_cpu(adapter->shared->ecr);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700177 if (!events)
178 return;
179
180 vmxnet3_ack_events(adapter, events);
181
182 /* Check if link state has changed */
183 if (events & VMXNET3_ECR_LINK)
Shreyas Bhatewara4a1745fc2010-07-15 21:51:14 +0000184 vmxnet3_check_link(adapter, true);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700185
186 /* Check if there is an error on xmit/recv queues */
187 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
Roland Dreiere328d412011-05-06 08:32:53 +0000188 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700189 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
190 VMXNET3_CMD_GET_QUEUE_STATUS);
Roland Dreiere328d412011-05-06 08:32:53 +0000191 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700192
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000193 for (i = 0; i < adapter->num_tx_queues; i++)
194 if (adapter->tqd_start[i].status.stopped)
195 dev_err(&adapter->netdev->dev,
196 "%s: tq[%d] error 0x%x\n",
197 adapter->netdev->name, i, le32_to_cpu(
198 adapter->tqd_start[i].status.error));
199 for (i = 0; i < adapter->num_rx_queues; i++)
200 if (adapter->rqd_start[i].status.stopped)
201 dev_err(&adapter->netdev->dev,
202 "%s: rq[%d] error 0x%x\n",
203 adapter->netdev->name, i,
204 adapter->rqd_start[i].status.error);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700205
206 schedule_work(&adapter->work);
207 }
208}
209
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000210#ifdef __BIG_ENDIAN_BITFIELD
211/*
212 * The device expects the bitfields in shared structures to be written in
213 * little endian. When CPU is big endian, the following routines are used to
214 * correctly read and write into ABI.
215 * The general technique used here is : double word bitfields are defined in
216 * opposite order for big endian architecture. Then before reading them in
217 * driver the complete double word is translated using le32_to_cpu. Similarly
218 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
219 * double words into required format.
220 * In order to avoid touching bits in shared structure more than once, temporary
221 * descriptors are used. These are passed as srcDesc to following functions.
222 */
223static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
224 struct Vmxnet3_RxDesc *dstDesc)
225{
226 u32 *src = (u32 *)srcDesc + 2;
227 u32 *dst = (u32 *)dstDesc + 2;
228 dstDesc->addr = le64_to_cpu(srcDesc->addr);
229 *dst = le32_to_cpu(*src);
230 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
231}
232
233static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
234 struct Vmxnet3_TxDesc *dstDesc)
235{
236 int i;
237 u32 *src = (u32 *)(srcDesc + 1);
238 u32 *dst = (u32 *)(dstDesc + 1);
239
240 /* Working backwards so that the gen bit is set at the end. */
241 for (i = 2; i > 0; i--) {
242 src--;
243 dst--;
244 *dst = cpu_to_le32(*src);
245 }
246}
247
248
249static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
250 struct Vmxnet3_RxCompDesc *dstDesc)
251{
252 int i = 0;
253 u32 *src = (u32 *)srcDesc;
254 u32 *dst = (u32 *)dstDesc;
255 for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
256 *dst = le32_to_cpu(*src);
257 src++;
258 dst++;
259 }
260}
261
262
263/* Used to read bitfield values from double words. */
264static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
265{
266 u32 temp = le32_to_cpu(*bitfield);
267 u32 mask = ((1 << size) - 1) << pos;
268 temp &= mask;
269 temp >>= pos;
270 return temp;
271}
272
273
274
275#endif /* __BIG_ENDIAN_BITFIELD */
276
277#ifdef __BIG_ENDIAN_BITFIELD
278
279# define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
280 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
281 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
282# define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
283 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
284 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
285# define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
286 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
287 VMXNET3_TCD_GEN_SIZE)
288# define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
289 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
290# define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
291 (dstrcd) = (tmp); \
292 vmxnet3_RxCompToCPU((rcd), (tmp)); \
293 } while (0)
294# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
295 (dstrxd) = (tmp); \
296 vmxnet3_RxDescToCPU((rxd), (tmp)); \
297 } while (0)
298
299#else
300
301# define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
302# define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
303# define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
304# define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
305# define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
306# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
307
308#endif /* __BIG_ENDIAN_BITFIELD */
309
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700310
311static void
312vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
313 struct pci_dev *pdev)
314{
315 if (tbi->map_type == VMXNET3_MAP_SINGLE)
Andy Kingb0eb57c2013-08-23 09:33:49 -0700316 dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700317 PCI_DMA_TODEVICE);
318 else if (tbi->map_type == VMXNET3_MAP_PAGE)
Andy Kingb0eb57c2013-08-23 09:33:49 -0700319 dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700320 PCI_DMA_TODEVICE);
321 else
322 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
323
324 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
325}
326
327
328static int
329vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
330 struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
331{
332 struct sk_buff *skb;
333 int entries = 0;
334
335 /* no out of order completion */
336 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000337 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700338
339 skb = tq->buf_info[eop_idx].skb;
340 BUG_ON(skb == NULL);
341 tq->buf_info[eop_idx].skb = NULL;
342
343 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
344
345 while (tq->tx_ring.next2comp != eop_idx) {
346 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
347 pdev);
348
349 /* update next2comp w/o tx_lock. Since we are marking more,
350 * instead of less, tx ring entries avail, the worst case is
351 * that the tx routine incorrectly re-queues a pkt due to
352 * insufficient tx ring entries.
353 */
354 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
355 entries++;
356 }
357
358 dev_kfree_skb_any(skb);
359 return entries;
360}
361
362
363static int
364vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
365 struct vmxnet3_adapter *adapter)
366{
367 int completed = 0;
368 union Vmxnet3_GenericDesc *gdesc;
369
370 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000371 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
372 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
373 &gdesc->tcd), tq, adapter->pdev,
374 adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700375
376 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
377 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
378 }
379
380 if (completed) {
381 spin_lock(&tq->tx_lock);
382 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
383 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
384 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
385 netif_carrier_ok(adapter->netdev))) {
386 vmxnet3_tq_wake(tq, adapter);
387 }
388 spin_unlock(&tq->tx_lock);
389 }
390 return completed;
391}
392
393
394static void
395vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
396 struct vmxnet3_adapter *adapter)
397{
398 int i;
399
400 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
401 struct vmxnet3_tx_buf_info *tbi;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700402
403 tbi = tq->buf_info + tq->tx_ring.next2comp;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700404
405 vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
406 if (tbi->skb) {
407 dev_kfree_skb_any(tbi->skb);
408 tbi->skb = NULL;
409 }
410 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
411 }
412
413 /* sanity check, verify all buffers are indeed unmapped and freed */
414 for (i = 0; i < tq->tx_ring.size; i++) {
415 BUG_ON(tq->buf_info[i].skb != NULL ||
416 tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
417 }
418
419 tq->tx_ring.gen = VMXNET3_INIT_GEN;
420 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
421
422 tq->comp_ring.gen = VMXNET3_INIT_GEN;
423 tq->comp_ring.next2proc = 0;
424}
425
426
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000427static void
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700428vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
429 struct vmxnet3_adapter *adapter)
430{
431 if (tq->tx_ring.base) {
Andy Kingb0eb57c2013-08-23 09:33:49 -0700432 dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
433 sizeof(struct Vmxnet3_TxDesc),
434 tq->tx_ring.base, tq->tx_ring.basePA);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700435 tq->tx_ring.base = NULL;
436 }
437 if (tq->data_ring.base) {
Andy Kingb0eb57c2013-08-23 09:33:49 -0700438 dma_free_coherent(&adapter->pdev->dev, tq->data_ring.size *
439 sizeof(struct Vmxnet3_TxDataDesc),
440 tq->data_ring.base, tq->data_ring.basePA);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700441 tq->data_ring.base = NULL;
442 }
443 if (tq->comp_ring.base) {
Andy Kingb0eb57c2013-08-23 09:33:49 -0700444 dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
445 sizeof(struct Vmxnet3_TxCompDesc),
446 tq->comp_ring.base, tq->comp_ring.basePA);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700447 tq->comp_ring.base = NULL;
448 }
Andy Kingb0eb57c2013-08-23 09:33:49 -0700449 if (tq->buf_info) {
450 dma_free_coherent(&adapter->pdev->dev,
451 tq->tx_ring.size * sizeof(tq->buf_info[0]),
452 tq->buf_info, tq->buf_info_pa);
453 tq->buf_info = NULL;
454 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700455}
456
457
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000458/* Destroy all tx queues */
459void
460vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
461{
462 int i;
463
464 for (i = 0; i < adapter->num_tx_queues; i++)
465 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
466}
467
468
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700469static void
470vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
471 struct vmxnet3_adapter *adapter)
472{
473 int i;
474
475 /* reset the tx ring contents to 0 and reset the tx ring states */
476 memset(tq->tx_ring.base, 0, tq->tx_ring.size *
477 sizeof(struct Vmxnet3_TxDesc));
478 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
479 tq->tx_ring.gen = VMXNET3_INIT_GEN;
480
481 memset(tq->data_ring.base, 0, tq->data_ring.size *
482 sizeof(struct Vmxnet3_TxDataDesc));
483
484 /* reset the tx comp ring contents to 0 and reset comp ring states */
485 memset(tq->comp_ring.base, 0, tq->comp_ring.size *
486 sizeof(struct Vmxnet3_TxCompDesc));
487 tq->comp_ring.next2proc = 0;
488 tq->comp_ring.gen = VMXNET3_INIT_GEN;
489
490 /* reset the bookkeeping data */
491 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
492 for (i = 0; i < tq->tx_ring.size; i++)
493 tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
494
495 /* stats are not reset */
496}
497
498
499static int
500vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
501 struct vmxnet3_adapter *adapter)
502{
Andy Kingb0eb57c2013-08-23 09:33:49 -0700503 size_t sz;
504
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700505 BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
506 tq->comp_ring.base || tq->buf_info);
507
Andy Kingb0eb57c2013-08-23 09:33:49 -0700508 tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
509 tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
510 &tq->tx_ring.basePA, GFP_KERNEL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700511 if (!tq->tx_ring.base) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +0000512 netdev_err(adapter->netdev, "failed to allocate tx ring\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700513 goto err;
514 }
515
Andy Kingb0eb57c2013-08-23 09:33:49 -0700516 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
517 tq->data_ring.size * sizeof(struct Vmxnet3_TxDataDesc),
518 &tq->data_ring.basePA, GFP_KERNEL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700519 if (!tq->data_ring.base) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +0000520 netdev_err(adapter->netdev, "failed to allocate data ring\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700521 goto err;
522 }
523
Andy Kingb0eb57c2013-08-23 09:33:49 -0700524 tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
525 tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
526 &tq->comp_ring.basePA, GFP_KERNEL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700527 if (!tq->comp_ring.base) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +0000528 netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700529 goto err;
530 }
531
Andy Kingb0eb57c2013-08-23 09:33:49 -0700532 sz = tq->tx_ring.size * sizeof(tq->buf_info[0]);
533 tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz,
534 &tq->buf_info_pa, GFP_KERNEL);
Joe Perchese404dec2012-01-29 12:56:23 +0000535 if (!tq->buf_info)
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700536 goto err;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700537
538 return 0;
539
540err:
541 vmxnet3_tq_destroy(tq, adapter);
542 return -ENOMEM;
543}
544
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000545static void
546vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
547{
548 int i;
549
550 for (i = 0; i < adapter->num_tx_queues; i++)
551 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
552}
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700553
554/*
555 * starting from ring->next2fill, allocate rx buffers for the given ring
556 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
557 * are allocated or allocation fails
558 */
559
560static int
561vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
562 int num_to_alloc, struct vmxnet3_adapter *adapter)
563{
564 int num_allocated = 0;
565 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
566 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
567 u32 val;
568
Shreyas Bhatewara5318d802011-07-05 14:34:05 +0000569 while (num_allocated <= num_to_alloc) {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700570 struct vmxnet3_rx_buf_info *rbi;
571 union Vmxnet3_GenericDesc *gd;
572
573 rbi = rbi_base + ring->next2fill;
574 gd = ring->base + ring->next2fill;
575
576 if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
577 if (rbi->skb == NULL) {
Stephen Hemminger0d735f12013-01-15 07:28:26 +0000578 rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
579 rbi->len,
580 GFP_KERNEL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700581 if (unlikely(rbi->skb == NULL)) {
582 rq->stats.rx_buf_alloc_failure++;
583 break;
584 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700585
Andy Kingb0eb57c2013-08-23 09:33:49 -0700586 rbi->dma_addr = dma_map_single(
587 &adapter->pdev->dev,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700588 rbi->skb->data, rbi->len,
589 PCI_DMA_FROMDEVICE);
590 } else {
591 /* rx buffer skipped by the device */
592 }
593 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
594 } else {
595 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
596 rbi->len != PAGE_SIZE);
597
598 if (rbi->page == NULL) {
599 rbi->page = alloc_page(GFP_ATOMIC);
600 if (unlikely(rbi->page == NULL)) {
601 rq->stats.rx_buf_alloc_failure++;
602 break;
603 }
Andy Kingb0eb57c2013-08-23 09:33:49 -0700604 rbi->dma_addr = dma_map_page(
605 &adapter->pdev->dev,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700606 rbi->page, 0, PAGE_SIZE,
607 PCI_DMA_FROMDEVICE);
608 } else {
609 /* rx buffers skipped by the device */
610 }
611 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
612 }
613
614 BUG_ON(rbi->dma_addr == 0);
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000615 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
Shreyas Bhatewara5318d802011-07-05 14:34:05 +0000616 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000617 | val | rbi->len);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700618
Shreyas Bhatewara5318d802011-07-05 14:34:05 +0000619 /* Fill the last buffer but dont mark it ready, or else the
620 * device will think that the queue is full */
621 if (num_allocated == num_to_alloc)
622 break;
623
624 gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700625 num_allocated++;
626 vmxnet3_cmd_ring_adv_next2fill(ring);
627 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700628
Stephen Hemmingerfdcd79b2013-01-15 07:28:29 +0000629 netdev_dbg(adapter->netdev,
Stephen Hemminger69b9a712013-01-15 07:28:27 +0000630 "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
631 num_allocated, ring->next2fill, ring->next2comp);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700632
633 /* so that the device can distinguish a full ring and an empty ring */
634 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
635
636 return num_allocated;
637}
638
639
640static void
641vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
642 struct vmxnet3_rx_buf_info *rbi)
643{
644 struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
645 skb_shinfo(skb)->nr_frags;
646
647 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
648
Ian Campbell0e0634d2011-09-21 21:53:28 +0000649 __skb_frag_set_page(frag, rbi->page);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700650 frag->page_offset = 0;
Eric Dumazet9e903e02011-10-18 21:00:24 +0000651 skb_frag_size_set(frag, rcd->len);
652 skb->data_len += rcd->len;
Eric Dumazet5e6c3552011-10-13 11:38:17 +0000653 skb->truesize += PAGE_SIZE;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700654 skb_shinfo(skb)->nr_frags++;
655}
656
657
658static void
659vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
660 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
661 struct vmxnet3_adapter *adapter)
662{
663 u32 dw2, len;
664 unsigned long buf_offset;
665 int i;
666 union Vmxnet3_GenericDesc *gdesc;
667 struct vmxnet3_tx_buf_info *tbi = NULL;
668
669 BUG_ON(ctx->copy_size > skb_headlen(skb));
670
671 /* use the previous gen bit for the SOP desc */
672 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
673
674 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
675 gdesc = ctx->sop_txd; /* both loops below can be skipped */
676
677 /* no need to map the buffer if headers are copied */
678 if (ctx->copy_size) {
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000679 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700680 tq->tx_ring.next2fill *
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000681 sizeof(struct Vmxnet3_TxDataDesc));
682 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700683 ctx->sop_txd->dword[3] = 0;
684
685 tbi = tq->buf_info + tq->tx_ring.next2fill;
686 tbi->map_type = VMXNET3_MAP_NONE;
687
Stephen Hemmingerfdcd79b2013-01-15 07:28:29 +0000688 netdev_dbg(adapter->netdev,
Randy Dunlapf69655822009-10-16 17:54:34 -0700689 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000690 tq->tx_ring.next2fill,
691 le64_to_cpu(ctx->sop_txd->txd.addr),
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700692 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
693 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
694
695 /* use the right gen for non-SOP desc */
696 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
697 }
698
699 /* linear part can use multiple tx desc if it's big */
700 len = skb_headlen(skb) - ctx->copy_size;
701 buf_offset = ctx->copy_size;
702 while (len) {
703 u32 buf_size;
704
Bhavesh Davda1f4b1612010-07-24 14:43:29 +0000705 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
706 buf_size = len;
707 dw2 |= len;
708 } else {
709 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
710 /* spec says that for TxDesc.len, 0 == 2^14 */
711 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700712
713 tbi = tq->buf_info + tq->tx_ring.next2fill;
714 tbi->map_type = VMXNET3_MAP_SINGLE;
Andy Kingb0eb57c2013-08-23 09:33:49 -0700715 tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700716 skb->data + buf_offset, buf_size,
717 PCI_DMA_TODEVICE);
718
Bhavesh Davda1f4b1612010-07-24 14:43:29 +0000719 tbi->len = buf_size;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700720
721 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
722 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
723
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000724 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
Bhavesh Davda1f4b1612010-07-24 14:43:29 +0000725 gdesc->dword[2] = cpu_to_le32(dw2);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700726 gdesc->dword[3] = 0;
727
Stephen Hemmingerfdcd79b2013-01-15 07:28:29 +0000728 netdev_dbg(adapter->netdev,
Randy Dunlapf69655822009-10-16 17:54:34 -0700729 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000730 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
731 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700732 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
733 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
734
735 len -= buf_size;
736 buf_offset += buf_size;
737 }
738
739 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000740 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000741 u32 buf_size;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700742
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000743 buf_offset = 0;
744 len = skb_frag_size(frag);
745 while (len) {
746 tbi = tq->buf_info + tq->tx_ring.next2fill;
747 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
748 buf_size = len;
749 dw2 |= len;
750 } else {
751 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
752 /* spec says that for TxDesc.len, 0 == 2^14 */
753 }
754 tbi->map_type = VMXNET3_MAP_PAGE;
755 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
756 buf_offset, buf_size,
757 DMA_TO_DEVICE);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700758
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000759 tbi->len = buf_size;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700760
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000761 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
762 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700763
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000764 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
765 gdesc->dword[2] = cpu_to_le32(dw2);
766 gdesc->dword[3] = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700767
Stephen Hemmingerfdcd79b2013-01-15 07:28:29 +0000768 netdev_dbg(adapter->netdev,
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000769 "txd[%u]: 0x%llu %u %u\n",
770 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
771 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
772 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
773 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
774
775 len -= buf_size;
776 buf_offset += buf_size;
777 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700778 }
779
780 ctx->eop_txd = gdesc;
781
782 /* set the last buf_info for the pkt */
783 tbi->skb = skb;
784 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
785}
786
787
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000788/* Init all tx queues */
789static void
790vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
791{
792 int i;
793
794 for (i = 0; i < adapter->num_tx_queues; i++)
795 vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
796}
797
798
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700799/*
800 * parse and copy relevant protocol headers:
801 * For a tso pkt, relevant headers are L2/3/4 including options
802 * For a pkt requesting csum offloading, they are L2/3 and may include L4
803 * if it's a TCP/UDP pkt
804 *
805 * Returns:
806 * -1: error happens during parsing
807 * 0: protocol headers parsed, but too big to be copied
808 * 1: protocol headers parsed and copied
809 *
810 * Other effects:
811 * 1. related *ctx fields are updated.
812 * 2. ctx->copy_size is # of bytes copied
813 * 3. the portion copied is guaranteed to be in the linear part
814 *
815 */
816static int
817vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
818 struct vmxnet3_tx_ctx *ctx,
819 struct vmxnet3_adapter *adapter)
820{
821 struct Vmxnet3_TxDataDesc *tdd;
822
Michał Mirosław0d0b1672010-12-14 15:24:08 +0000823 if (ctx->mss) { /* TSO */
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700824 ctx->eth_ip_hdr_size = skb_transport_offset(skb);
Eric Dumazet8bca5d12012-01-24 19:47:21 +0000825 ctx->l4_hdr_size = tcp_hdrlen(skb);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700826 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
827 } else {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700828 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Michał Mirosław0d0b1672010-12-14 15:24:08 +0000829 ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700830
831 if (ctx->ipv4) {
Eric Dumazet8bca5d12012-01-24 19:47:21 +0000832 const struct iphdr *iph = ip_hdr(skb);
833
Shreyas Bhatewara39d4a962011-01-14 14:59:41 +0000834 if (iph->protocol == IPPROTO_TCP)
Eric Dumazet8bca5d12012-01-24 19:47:21 +0000835 ctx->l4_hdr_size = tcp_hdrlen(skb);
Shreyas Bhatewara39d4a962011-01-14 14:59:41 +0000836 else if (iph->protocol == IPPROTO_UDP)
David S. Millerf6a1ad42012-03-05 21:16:26 -0500837 ctx->l4_hdr_size = sizeof(struct udphdr);
Shreyas Bhatewara39d4a962011-01-14 14:59:41 +0000838 else
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700839 ctx->l4_hdr_size = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700840 } else {
841 /* for simplicity, don't copy L4 headers */
842 ctx->l4_hdr_size = 0;
843 }
Neil Hormanb2032622012-02-16 01:48:56 +0000844 ctx->copy_size = min(ctx->eth_ip_hdr_size +
845 ctx->l4_hdr_size, skb->len);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700846 } else {
847 ctx->eth_ip_hdr_size = 0;
848 ctx->l4_hdr_size = 0;
849 /* copy as much as allowed */
850 ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE
851 , skb_headlen(skb));
852 }
853
854 /* make sure headers are accessible directly */
855 if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
856 goto err;
857 }
858
859 if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) {
860 tq->stats.oversized_hdr++;
861 ctx->copy_size = 0;
862 return 0;
863 }
864
865 tdd = tq->data_ring.base + tq->tx_ring.next2fill;
866
867 memcpy(tdd->data, skb->data, ctx->copy_size);
Stephen Hemmingerfdcd79b2013-01-15 07:28:29 +0000868 netdev_dbg(adapter->netdev,
Randy Dunlapf69655822009-10-16 17:54:34 -0700869 "copy %u bytes to dataRing[%u]\n",
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700870 ctx->copy_size, tq->tx_ring.next2fill);
871 return 1;
872
873err:
874 return -1;
875}
876
877
878static void
879vmxnet3_prepare_tso(struct sk_buff *skb,
880 struct vmxnet3_tx_ctx *ctx)
881{
Eric Dumazet8bca5d12012-01-24 19:47:21 +0000882 struct tcphdr *tcph = tcp_hdr(skb);
883
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700884 if (ctx->ipv4) {
Eric Dumazet8bca5d12012-01-24 19:47:21 +0000885 struct iphdr *iph = ip_hdr(skb);
886
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700887 iph->check = 0;
888 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
889 IPPROTO_TCP, 0);
890 } else {
Eric Dumazet8bca5d12012-01-24 19:47:21 +0000891 struct ipv6hdr *iph = ipv6_hdr(skb);
892
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700893 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
894 IPPROTO_TCP, 0);
895 }
896}
897
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000898static int txd_estimate(const struct sk_buff *skb)
899{
900 int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
901 int i;
902
903 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
904 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
905
906 count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
907 }
908 return count;
909}
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700910
911/*
912 * Transmits a pkt thru a given tq
913 * Returns:
914 * NETDEV_TX_OK: descriptors are setup successfully
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300915 * NETDEV_TX_OK: error occurred, the pkt is dropped
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700916 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
917 *
918 * Side-effects:
919 * 1. tx ring may be changed
920 * 2. tq stats may be updated accordingly
921 * 3. shared->txNumDeferred may be updated
922 */
923
924static int
925vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
926 struct vmxnet3_adapter *adapter, struct net_device *netdev)
927{
928 int ret;
929 u32 count;
930 unsigned long flags;
931 struct vmxnet3_tx_ctx ctx;
932 union Vmxnet3_GenericDesc *gdesc;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +0000933#ifdef __BIG_ENDIAN_BITFIELD
934 /* Use temporary descriptor to avoid touching bits multiple times */
935 union Vmxnet3_GenericDesc tempTxDesc;
936#endif
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700937
Eric Dumazeta4d7e482012-10-29 07:30:49 +0000938 count = txd_estimate(skb);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700939
Jesse Gross72e85c42011-06-23 13:04:39 +0000940 ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700941
942 ctx.mss = skb_shinfo(skb)->gso_size;
943 if (ctx.mss) {
944 if (skb_header_cloned(skb)) {
945 if (unlikely(pskb_expand_head(skb, 0, 0,
946 GFP_ATOMIC) != 0)) {
947 tq->stats.drop_tso++;
948 goto drop_pkt;
949 }
950 tq->stats.copy_skb_header++;
951 }
952 vmxnet3_prepare_tso(skb, &ctx);
953 } else {
954 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
955
956 /* non-tso pkts must not use more than
957 * VMXNET3_MAX_TXD_PER_PKT entries
958 */
959 if (skb_linearize(skb) != 0) {
960 tq->stats.drop_too_many_frags++;
961 goto drop_pkt;
962 }
963 tq->stats.linearized++;
964
965 /* recalculate the # of descriptors to use */
966 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
967 }
968 }
969
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000970 spin_lock_irqsave(&tq->tx_lock, flags);
971
972 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
973 tq->stats.tx_ring_full++;
Stephen Hemmingerfdcd79b2013-01-15 07:28:29 +0000974 netdev_dbg(adapter->netdev,
Shreyas Bhatewara09c50882010-11-19 10:55:24 +0000975 "tx queue stopped on %s, next2comp %u"
976 " next2fill %u\n", adapter->netdev->name,
977 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
978
979 vmxnet3_tq_stop(tq, adapter);
980 spin_unlock_irqrestore(&tq->tx_lock, flags);
981 return NETDEV_TX_BUSY;
982 }
983
984
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -0700985 ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
986 if (ret >= 0) {
987 BUG_ON(ret <= 0 && ctx.copy_size != 0);
988 /* hdrs parsed, check against other limits */
989 if (ctx.mss) {
990 if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
991 VMXNET3_MAX_TX_BUF_SIZE)) {
992 goto hdr_too_big;
993 }
994 } else {
995 if (skb->ip_summed == CHECKSUM_PARTIAL) {
996 if (unlikely(ctx.eth_ip_hdr_size +
997 skb->csum_offset >
998 VMXNET3_MAX_CSUM_OFFSET)) {
999 goto hdr_too_big;
1000 }
1001 }
1002 }
1003 } else {
1004 tq->stats.drop_hdr_inspect_err++;
Dan Carpenterf955e142010-12-20 03:03:15 +00001005 goto unlock_drop_pkt;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001006 }
1007
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001008 /* fill tx descs related to addr & len */
1009 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
1010
1011 /* setup the EOP desc */
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001012 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001013
1014 /* setup the SOP desc */
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001015#ifdef __BIG_ENDIAN_BITFIELD
1016 gdesc = &tempTxDesc;
1017 gdesc->dword[2] = ctx.sop_txd->dword[2];
1018 gdesc->dword[3] = ctx.sop_txd->dword[3];
1019#else
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001020 gdesc = ctx.sop_txd;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001021#endif
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001022 if (ctx.mss) {
1023 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
1024 gdesc->txd.om = VMXNET3_OM_TSO;
1025 gdesc->txd.msscof = ctx.mss;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001026 le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
1027 gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001028 } else {
1029 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1030 gdesc->txd.hlen = ctx.eth_ip_hdr_size;
1031 gdesc->txd.om = VMXNET3_OM_CSUM;
1032 gdesc->txd.msscof = ctx.eth_ip_hdr_size +
1033 skb->csum_offset;
1034 } else {
1035 gdesc->txd.om = 0;
1036 gdesc->txd.msscof = 0;
1037 }
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001038 le32_add_cpu(&tq->shared->txNumDeferred, 1);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001039 }
1040
1041 if (vlan_tx_tag_present(skb)) {
1042 gdesc->txd.ti = 1;
1043 gdesc->txd.tci = vlan_tx_tag_get(skb);
1044 }
1045
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001046 /* finally flips the GEN bit of the SOP desc. */
1047 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1048 VMXNET3_TXD_GEN);
1049#ifdef __BIG_ENDIAN_BITFIELD
1050 /* Finished updating in bitfields of Tx Desc, so write them in original
1051 * place.
1052 */
1053 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
1054 (struct Vmxnet3_TxDesc *)ctx.sop_txd);
1055 gdesc = ctx.sop_txd;
1056#endif
Stephen Hemmingerfdcd79b2013-01-15 07:28:29 +00001057 netdev_dbg(adapter->netdev,
Randy Dunlapf69655822009-10-16 17:54:34 -07001058 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
Joe Perchesc2fd03a2012-06-04 12:44:18 +00001059 (u32)(ctx.sop_txd -
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001060 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1061 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001062
1063 spin_unlock_irqrestore(&tq->tx_lock, flags);
1064
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001065 if (le32_to_cpu(tq->shared->txNumDeferred) >=
1066 le32_to_cpu(tq->shared->txThreshold)) {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001067 tq->shared->txNumDeferred = 0;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001068 VMXNET3_WRITE_BAR0_REG(adapter,
1069 VMXNET3_REG_TXPROD + tq->qid * 8,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001070 tq->tx_ring.next2fill);
1071 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001072
1073 return NETDEV_TX_OK;
1074
1075hdr_too_big:
1076 tq->stats.drop_oversized_hdr++;
Dan Carpenterf955e142010-12-20 03:03:15 +00001077unlock_drop_pkt:
1078 spin_unlock_irqrestore(&tq->tx_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001079drop_pkt:
1080 tq->stats.drop_total++;
1081 dev_kfree_skb(skb);
1082 return NETDEV_TX_OK;
1083}
1084
1085
1086static netdev_tx_t
1087vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1088{
1089 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001090
stephen hemminger96800ee2012-11-13 13:53:28 +00001091 BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
1092 return vmxnet3_tq_xmit(skb,
1093 &adapter->tx_queue[skb->queue_mapping],
1094 adapter, netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001095}
1096
1097
1098static void
1099vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1100 struct sk_buff *skb,
1101 union Vmxnet3_GenericDesc *gdesc)
1102{
Michał Mirosława0d27302011-04-18 13:31:21 +00001103 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001104 /* typical case: TCP/UDP over IP and both csums are correct */
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001105 if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001106 VMXNET3_RCD_CSUM_OK) {
1107 skb->ip_summed = CHECKSUM_UNNECESSARY;
1108 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1109 BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
1110 BUG_ON(gdesc->rcd.frg);
1111 } else {
1112 if (gdesc->rcd.csum) {
1113 skb->csum = htons(gdesc->rcd.csum);
1114 skb->ip_summed = CHECKSUM_PARTIAL;
1115 } else {
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001116 skb_checksum_none_assert(skb);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001117 }
1118 }
1119 } else {
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001120 skb_checksum_none_assert(skb);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001121 }
1122}
1123
1124
1125static void
1126vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1127 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
1128{
1129 rq->stats.drop_err++;
1130 if (!rcd->fcs)
1131 rq->stats.drop_fcs++;
1132
1133 rq->stats.drop_total++;
1134
1135 /*
1136 * We do not unmap and chain the rx buffer to the skb.
1137 * We basically pretend this buffer is not used and will be recycled
1138 * by vmxnet3_rq_alloc_rx_buf()
1139 */
1140
1141 /*
1142 * ctx->skb may be NULL if this is the first and the only one
1143 * desc for the pkt
1144 */
1145 if (ctx->skb)
1146 dev_kfree_skb_irq(ctx->skb);
1147
1148 ctx->skb = NULL;
1149}
1150
1151
1152static int
1153vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1154 struct vmxnet3_adapter *adapter, int quota)
1155{
Joe Perches215faf92010-12-21 02:16:10 -08001156 static const u32 rxprod_reg[2] = {
1157 VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
1158 };
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001159 u32 num_rxd = 0;
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001160 bool skip_page_frags = false;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001161 struct Vmxnet3_RxCompDesc *rcd;
1162 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001163#ifdef __BIG_ENDIAN_BITFIELD
1164 struct Vmxnet3_RxDesc rxCmdDesc;
1165 struct Vmxnet3_RxCompDesc rxComp;
1166#endif
1167 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1168 &rxComp);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001169 while (rcd->gen == rq->comp_ring.gen) {
1170 struct vmxnet3_rx_buf_info *rbi;
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001171 struct sk_buff *skb, *new_skb = NULL;
1172 struct page *new_page = NULL;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001173 int num_to_alloc;
1174 struct Vmxnet3_RxDesc *rxd;
1175 u32 idx, ring_idx;
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001176 struct vmxnet3_cmd_ring *ring = NULL;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001177 if (num_rxd >= quota) {
1178 /* we may stop even before we see the EOP desc of
1179 * the current pkt
1180 */
1181 break;
1182 }
1183 num_rxd++;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001184 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001185 idx = rcd->rxdIdx;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001186 ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001187 ring = rq->rx_ring + ring_idx;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001188 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1189 &rxCmdDesc);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001190 rbi = rq->buf_info[ring_idx] + idx;
1191
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001192 BUG_ON(rxd->addr != rbi->dma_addr ||
1193 rxd->len != rbi->len);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001194
1195 if (unlikely(rcd->eop && rcd->err)) {
1196 vmxnet3_rx_error(rq, rcd, ctx, adapter);
1197 goto rcd_done;
1198 }
1199
1200 if (rcd->sop) { /* first buf of the pkt */
1201 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
1202 rcd->rqID != rq->qid);
1203
1204 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
1205 BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
1206
1207 if (unlikely(rcd->len == 0)) {
1208 /* Pretend the rx buffer is skipped. */
1209 BUG_ON(!(rcd->sop && rcd->eop));
Stephen Hemmingerfdcd79b2013-01-15 07:28:29 +00001210 netdev_dbg(adapter->netdev,
Randy Dunlapf69655822009-10-16 17:54:34 -07001211 "rxRing[%u][%u] 0 length\n",
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001212 ring_idx, idx);
1213 goto rcd_done;
1214 }
1215
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001216 skip_page_frags = false;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001217 ctx->skb = rbi->skb;
Stephen Hemminger0d735f12013-01-15 07:28:26 +00001218 new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
1219 rbi->len);
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001220 if (new_skb == NULL) {
1221 /* Skb allocation failed, do not handover this
1222 * skb to stack. Reuse it. Drop the existing pkt
1223 */
1224 rq->stats.rx_buf_alloc_failure++;
1225 ctx->skb = NULL;
1226 rq->stats.drop_total++;
1227 skip_page_frags = true;
1228 goto rcd_done;
1229 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001230
Andy Kingb0eb57c2013-08-23 09:33:49 -07001231 dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr,
1232 rbi->len,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001233 PCI_DMA_FROMDEVICE);
1234
Stephen Hemminger7db11f72013-01-15 07:28:35 +00001235#ifdef VMXNET3_RSS
1236 if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
1237 (adapter->netdev->features & NETIF_F_RXHASH))
1238 ctx->skb->rxhash = le32_to_cpu(rcd->rssHash);
1239#endif
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001240 skb_put(ctx->skb, rcd->len);
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001241
1242 /* Immediate refill */
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001243 rbi->skb = new_skb;
Andy Kingb0eb57c2013-08-23 09:33:49 -07001244 rbi->dma_addr = dma_map_single(&adapter->pdev->dev,
stephen hemminger96800ee2012-11-13 13:53:28 +00001245 rbi->skb->data, rbi->len,
1246 PCI_DMA_FROMDEVICE);
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001247 rxd->addr = cpu_to_le64(rbi->dma_addr);
1248 rxd->len = rbi->len;
1249
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001250 } else {
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001251 BUG_ON(ctx->skb == NULL && !skip_page_frags);
1252
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001253 /* non SOP buffer must be type 1 in most cases */
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001254 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
1255 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001256
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001257 /* If an sop buffer was dropped, skip all
1258 * following non-sop fragments. They will be reused.
1259 */
1260 if (skip_page_frags)
1261 goto rcd_done;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001262
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001263 new_page = alloc_page(GFP_ATOMIC);
1264 if (unlikely(new_page == NULL)) {
1265 /* Replacement page frag could not be allocated.
1266 * Reuse this page. Drop the pkt and free the
1267 * skb which contained this page as a frag. Skip
1268 * processing all the following non-sop frags.
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001269 */
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001270 rq->stats.rx_buf_alloc_failure++;
1271 dev_kfree_skb(ctx->skb);
1272 ctx->skb = NULL;
1273 skip_page_frags = true;
1274 goto rcd_done;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001275 }
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001276
1277 if (rcd->len) {
Andy Kingb0eb57c2013-08-23 09:33:49 -07001278 dma_unmap_page(&adapter->pdev->dev,
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001279 rbi->dma_addr, rbi->len,
1280 PCI_DMA_FROMDEVICE);
1281
1282 vmxnet3_append_frag(ctx->skb, rcd, rbi);
1283 }
1284
1285 /* Immediate refill */
1286 rbi->page = new_page;
Andy Kingb0eb57c2013-08-23 09:33:49 -07001287 rbi->dma_addr = dma_map_page(&adapter->pdev->dev,
1288 rbi->page,
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001289 0, PAGE_SIZE,
1290 PCI_DMA_FROMDEVICE);
1291 rxd->addr = cpu_to_le64(rbi->dma_addr);
1292 rxd->len = rbi->len;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001293 }
1294
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001295
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001296 skb = ctx->skb;
1297 if (rcd->eop) {
1298 skb->len += skb->data_len;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001299
1300 vmxnet3_rx_csum(adapter, skb,
1301 (union Vmxnet3_GenericDesc *)rcd);
1302 skb->protocol = eth_type_trans(skb, adapter->netdev);
1303
Jesse Gross72e85c42011-06-23 13:04:39 +00001304 if (unlikely(rcd->ts))
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001305 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
Jesse Gross72e85c42011-06-23 13:04:39 +00001306
Jesse Gross213ade82011-06-24 14:24:35 +00001307 if (adapter->netdev->features & NETIF_F_LRO)
1308 netif_receive_skb(skb);
1309 else
1310 napi_gro_receive(&rq->napi, skb);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001311
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001312 ctx->skb = NULL;
1313 }
1314
1315rcd_done:
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001316 /* device may have skipped some rx descs */
1317 ring->next2comp = idx;
1318 num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
1319 ring = rq->rx_ring + ring_idx;
1320 while (num_to_alloc) {
1321 vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
1322 &rxCmdDesc);
1323 BUG_ON(!rxd->addr);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001324
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001325 /* Recv desc is ready to be used by the device */
1326 rxd->gen = ring->gen;
1327 vmxnet3_cmd_ring_adv_next2fill(ring);
1328 num_to_alloc--;
1329 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001330
Shreyas Bhatewara5318d802011-07-05 14:34:05 +00001331 /* if needed, update the register */
1332 if (unlikely(rq->shared->updateRxProd)) {
1333 VMXNET3_WRITE_BAR0_REG(adapter,
stephen hemminger96800ee2012-11-13 13:53:28 +00001334 rxprod_reg[ring_idx] + rq->qid * 8,
1335 ring->next2fill);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001336 }
1337
1338 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001339 vmxnet3_getRxComp(rcd,
stephen hemminger96800ee2012-11-13 13:53:28 +00001340 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001341 }
1342
1343 return num_rxd;
1344}
1345
1346
1347static void
1348vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1349 struct vmxnet3_adapter *adapter)
1350{
1351 u32 i, ring_idx;
1352 struct Vmxnet3_RxDesc *rxd;
1353
1354 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1355 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001356#ifdef __BIG_ENDIAN_BITFIELD
1357 struct Vmxnet3_RxDesc rxDesc;
1358#endif
1359 vmxnet3_getRxDesc(rxd,
1360 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001361
1362 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1363 rq->buf_info[ring_idx][i].skb) {
Andy Kingb0eb57c2013-08-23 09:33:49 -07001364 dma_unmap_single(&adapter->pdev->dev, rxd->addr,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001365 rxd->len, PCI_DMA_FROMDEVICE);
1366 dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1367 rq->buf_info[ring_idx][i].skb = NULL;
1368 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1369 rq->buf_info[ring_idx][i].page) {
Andy Kingb0eb57c2013-08-23 09:33:49 -07001370 dma_unmap_page(&adapter->pdev->dev, rxd->addr,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001371 rxd->len, PCI_DMA_FROMDEVICE);
1372 put_page(rq->buf_info[ring_idx][i].page);
1373 rq->buf_info[ring_idx][i].page = NULL;
1374 }
1375 }
1376
1377 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1378 rq->rx_ring[ring_idx].next2fill =
1379 rq->rx_ring[ring_idx].next2comp = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001380 }
1381
1382 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1383 rq->comp_ring.next2proc = 0;
1384}
1385
1386
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001387static void
1388vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
1389{
1390 int i;
1391
1392 for (i = 0; i < adapter->num_rx_queues; i++)
1393 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
1394}
1395
1396
stephen hemminger280b74f2013-02-22 08:26:29 +00001397static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1398 struct vmxnet3_adapter *adapter)
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001399{
1400 int i;
1401 int j;
1402
1403 /* all rx buffers must have already been freed */
1404 for (i = 0; i < 2; i++) {
1405 if (rq->buf_info[i]) {
1406 for (j = 0; j < rq->rx_ring[i].size; j++)
1407 BUG_ON(rq->buf_info[i][j].page != NULL);
1408 }
1409 }
1410
1411
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001412 for (i = 0; i < 2; i++) {
1413 if (rq->rx_ring[i].base) {
Andy Kingb0eb57c2013-08-23 09:33:49 -07001414 dma_free_coherent(&adapter->pdev->dev,
1415 rq->rx_ring[i].size
1416 * sizeof(struct Vmxnet3_RxDesc),
1417 rq->rx_ring[i].base,
1418 rq->rx_ring[i].basePA);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001419 rq->rx_ring[i].base = NULL;
1420 }
1421 rq->buf_info[i] = NULL;
1422 }
1423
1424 if (rq->comp_ring.base) {
Andy Kingb0eb57c2013-08-23 09:33:49 -07001425 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
1426 * sizeof(struct Vmxnet3_RxCompDesc),
1427 rq->comp_ring.base, rq->comp_ring.basePA);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001428 rq->comp_ring.base = NULL;
1429 }
Andy Kingb0eb57c2013-08-23 09:33:49 -07001430
1431 if (rq->buf_info[0]) {
1432 size_t sz = sizeof(struct vmxnet3_rx_buf_info) *
1433 (rq->rx_ring[0].size + rq->rx_ring[1].size);
1434 dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
1435 rq->buf_info_pa);
1436 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001437}
1438
1439
1440static int
1441vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1442 struct vmxnet3_adapter *adapter)
1443{
1444 int i;
1445
1446 /* initialize buf_info */
1447 for (i = 0; i < rq->rx_ring[0].size; i++) {
1448
1449 /* 1st buf for a pkt is skbuff */
1450 if (i % adapter->rx_buf_per_pkt == 0) {
1451 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
1452 rq->buf_info[0][i].len = adapter->skb_buf_size;
1453 } else { /* subsequent bufs for a pkt is frag */
1454 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
1455 rq->buf_info[0][i].len = PAGE_SIZE;
1456 }
1457 }
1458 for (i = 0; i < rq->rx_ring[1].size; i++) {
1459 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
1460 rq->buf_info[1][i].len = PAGE_SIZE;
1461 }
1462
1463 /* reset internal state and allocate buffers for both rings */
1464 for (i = 0; i < 2; i++) {
1465 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001466
1467 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
1468 sizeof(struct Vmxnet3_RxDesc));
1469 rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
1470 }
1471 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
1472 adapter) == 0) {
1473 /* at least has 1 rx buffer for the 1st ring */
1474 return -ENOMEM;
1475 }
1476 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
1477
1478 /* reset the comp ring */
1479 rq->comp_ring.next2proc = 0;
1480 memset(rq->comp_ring.base, 0, rq->comp_ring.size *
1481 sizeof(struct Vmxnet3_RxCompDesc));
1482 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1483
1484 /* reset rxctx */
1485 rq->rx_ctx.skb = NULL;
1486
1487 /* stats are not reset */
1488 return 0;
1489}
1490
1491
1492static int
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001493vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
1494{
1495 int i, err = 0;
1496
1497 for (i = 0; i < adapter->num_rx_queues; i++) {
1498 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
1499 if (unlikely(err)) {
1500 dev_err(&adapter->netdev->dev, "%s: failed to "
1501 "initialize rx queue%i\n",
1502 adapter->netdev->name, i);
1503 break;
1504 }
1505 }
1506 return err;
1507
1508}
1509
1510
1511static int
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001512vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1513{
1514 int i;
1515 size_t sz;
1516 struct vmxnet3_rx_buf_info *bi;
1517
1518 for (i = 0; i < 2; i++) {
1519
1520 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
Andy Kingb0eb57c2013-08-23 09:33:49 -07001521 rq->rx_ring[i].base = dma_alloc_coherent(
1522 &adapter->pdev->dev, sz,
1523 &rq->rx_ring[i].basePA,
1524 GFP_KERNEL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001525 if (!rq->rx_ring[i].base) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00001526 netdev_err(adapter->netdev,
1527 "failed to allocate rx ring %d\n", i);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001528 goto err;
1529 }
1530 }
1531
1532 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
Andy Kingb0eb57c2013-08-23 09:33:49 -07001533 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
1534 &rq->comp_ring.basePA,
1535 GFP_KERNEL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001536 if (!rq->comp_ring.base) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00001537 netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001538 goto err;
1539 }
1540
1541 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1542 rq->rx_ring[1].size);
Andy Kingb0eb57c2013-08-23 09:33:49 -07001543 bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
1544 GFP_KERNEL);
Joe Perchese404dec2012-01-29 12:56:23 +00001545 if (!bi)
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001546 goto err;
Joe Perchese404dec2012-01-29 12:56:23 +00001547
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001548 rq->buf_info[0] = bi;
1549 rq->buf_info[1] = bi + rq->rx_ring[0].size;
1550
1551 return 0;
1552
1553err:
1554 vmxnet3_rq_destroy(rq, adapter);
1555 return -ENOMEM;
1556}
1557
1558
1559static int
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001560vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
1561{
1562 int i, err = 0;
1563
1564 for (i = 0; i < adapter->num_rx_queues; i++) {
1565 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
1566 if (unlikely(err)) {
1567 dev_err(&adapter->netdev->dev,
1568 "%s: failed to create rx queue%i\n",
1569 adapter->netdev->name, i);
1570 goto err_out;
1571 }
1572 }
1573 return err;
1574err_out:
1575 vmxnet3_rq_destroy_all(adapter);
1576 return err;
1577
1578}
1579
1580/* Multiple queue aware polling function for tx and rx */
1581
1582static int
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001583vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
1584{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001585 int rcd_done = 0, i;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001586 if (unlikely(adapter->shared->ecr))
1587 vmxnet3_process_events(adapter);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001588 for (i = 0; i < adapter->num_tx_queues; i++)
1589 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001590
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001591 for (i = 0; i < adapter->num_rx_queues; i++)
1592 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
1593 adapter, budget);
1594 return rcd_done;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001595}
1596
1597
1598static int
1599vmxnet3_poll(struct napi_struct *napi, int budget)
1600{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001601 struct vmxnet3_rx_queue *rx_queue = container_of(napi,
1602 struct vmxnet3_rx_queue, napi);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001603 int rxd_done;
1604
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001605 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001606
1607 if (rxd_done < budget) {
1608 napi_complete(napi);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001609 vmxnet3_enable_all_intrs(rx_queue->adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001610 }
1611 return rxd_done;
1612}
1613
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001614/*
1615 * NAPI polling function for MSI-X mode with multiple Rx queues
1616 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
1617 */
1618
1619static int
1620vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
1621{
1622 struct vmxnet3_rx_queue *rq = container_of(napi,
1623 struct vmxnet3_rx_queue, napi);
1624 struct vmxnet3_adapter *adapter = rq->adapter;
1625 int rxd_done;
1626
1627 /* When sharing interrupt with corresponding tx queue, process
1628 * tx completions in that queue as well
1629 */
1630 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
1631 struct vmxnet3_tx_queue *tq =
1632 &adapter->tx_queue[rq - adapter->rx_queue];
1633 vmxnet3_tq_tx_complete(tq, adapter);
1634 }
1635
1636 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
1637
1638 if (rxd_done < budget) {
1639 napi_complete(napi);
1640 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
1641 }
1642 return rxd_done;
1643}
1644
1645
1646#ifdef CONFIG_PCI_MSI
1647
1648/*
1649 * Handle completion interrupts on tx queues
1650 * Returns whether or not the intr is handled
1651 */
1652
1653static irqreturn_t
1654vmxnet3_msix_tx(int irq, void *data)
1655{
1656 struct vmxnet3_tx_queue *tq = data;
1657 struct vmxnet3_adapter *adapter = tq->adapter;
1658
1659 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1660 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
1661
1662 /* Handle the case where only one irq is allocate for all tx queues */
1663 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
1664 int i;
1665 for (i = 0; i < adapter->num_tx_queues; i++) {
1666 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
1667 vmxnet3_tq_tx_complete(txq, adapter);
1668 }
1669 } else {
1670 vmxnet3_tq_tx_complete(tq, adapter);
1671 }
1672 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
1673
1674 return IRQ_HANDLED;
1675}
1676
1677
1678/*
1679 * Handle completion interrupts on rx queues. Returns whether or not the
1680 * intr is handled
1681 */
1682
1683static irqreturn_t
1684vmxnet3_msix_rx(int irq, void *data)
1685{
1686 struct vmxnet3_rx_queue *rq = data;
1687 struct vmxnet3_adapter *adapter = rq->adapter;
1688
1689 /* disable intr if needed */
1690 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1691 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
1692 napi_schedule(&rq->napi);
1693
1694 return IRQ_HANDLED;
1695}
1696
1697/*
1698 *----------------------------------------------------------------------------
1699 *
1700 * vmxnet3_msix_event --
1701 *
1702 * vmxnet3 msix event intr handler
1703 *
1704 * Result:
1705 * whether or not the intr is handled
1706 *
1707 *----------------------------------------------------------------------------
1708 */
1709
1710static irqreturn_t
1711vmxnet3_msix_event(int irq, void *data)
1712{
1713 struct net_device *dev = data;
1714 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1715
1716 /* disable intr if needed */
1717 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1718 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
1719
1720 if (adapter->shared->ecr)
1721 vmxnet3_process_events(adapter);
1722
1723 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
1724
1725 return IRQ_HANDLED;
1726}
1727
1728#endif /* CONFIG_PCI_MSI */
1729
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001730
1731/* Interrupt handler for vmxnet3 */
1732static irqreturn_t
1733vmxnet3_intr(int irq, void *dev_id)
1734{
1735 struct net_device *dev = dev_id;
1736 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1737
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001738 if (adapter->intr.type == VMXNET3_IT_INTX) {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001739 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
1740 if (unlikely(icr == 0))
1741 /* not ours */
1742 return IRQ_NONE;
1743 }
1744
1745
1746 /* disable intr if needed */
1747 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001748 vmxnet3_disable_all_intrs(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001749
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001750 napi_schedule(&adapter->rx_queue[0].napi);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001751
1752 return IRQ_HANDLED;
1753}
1754
1755#ifdef CONFIG_NET_POLL_CONTROLLER
1756
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001757/* netpoll callback. */
1758static void
1759vmxnet3_netpoll(struct net_device *netdev)
1760{
1761 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001762
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001763 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1764 vmxnet3_disable_all_intrs(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001765
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001766 vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size);
1767 vmxnet3_enable_all_intrs(adapter);
1768
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001769}
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001770#endif /* CONFIG_NET_POLL_CONTROLLER */
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001771
1772static int
1773vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1774{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001775 struct vmxnet3_intr *intr = &adapter->intr;
1776 int err = 0, i;
1777 int vector = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001778
Randy Dunlap8f7e5242009-10-14 20:38:58 -07001779#ifdef CONFIG_PCI_MSI
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001780 if (adapter->intr.type == VMXNET3_IT_MSIX) {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001781 for (i = 0; i < adapter->num_tx_queues; i++) {
1782 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
1783 sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
1784 adapter->netdev->name, vector);
1785 err = request_irq(
1786 intr->msix_entries[vector].vector,
1787 vmxnet3_msix_tx, 0,
1788 adapter->tx_queue[i].name,
1789 &adapter->tx_queue[i]);
1790 } else {
1791 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
1792 adapter->netdev->name, vector);
1793 }
1794 if (err) {
1795 dev_err(&adapter->netdev->dev,
1796 "Failed to request irq for MSIX, %s, "
1797 "error %d\n",
1798 adapter->tx_queue[i].name, err);
1799 return err;
1800 }
1801
1802 /* Handle the case where only 1 MSIx was allocated for
1803 * all tx queues */
1804 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
1805 for (; i < adapter->num_tx_queues; i++)
1806 adapter->tx_queue[i].comp_ring.intr_idx
1807 = vector;
1808 vector++;
1809 break;
1810 } else {
1811 adapter->tx_queue[i].comp_ring.intr_idx
1812 = vector++;
1813 }
1814 }
1815 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
1816 vector = 0;
1817
1818 for (i = 0; i < adapter->num_rx_queues; i++) {
1819 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
1820 sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
1821 adapter->netdev->name, vector);
1822 else
1823 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
1824 adapter->netdev->name, vector);
1825 err = request_irq(intr->msix_entries[vector].vector,
1826 vmxnet3_msix_rx, 0,
1827 adapter->rx_queue[i].name,
1828 &(adapter->rx_queue[i]));
1829 if (err) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00001830 netdev_err(adapter->netdev,
1831 "Failed to request irq for MSIX, "
1832 "%s, error %d\n",
1833 adapter->rx_queue[i].name, err);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001834 return err;
1835 }
1836
1837 adapter->rx_queue[i].comp_ring.intr_idx = vector++;
1838 }
1839
1840 sprintf(intr->event_msi_vector_name, "%s-event-%d",
1841 adapter->netdev->name, vector);
1842 err = request_irq(intr->msix_entries[vector].vector,
1843 vmxnet3_msix_event, 0,
1844 intr->event_msi_vector_name, adapter->netdev);
1845 intr->event_intr_idx = vector;
1846
1847 } else if (intr->type == VMXNET3_IT_MSI) {
1848 adapter->num_rx_queues = 1;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001849 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
1850 adapter->netdev->name, adapter->netdev);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001851 } else {
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00001852#endif
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001853 adapter->num_rx_queues = 1;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001854 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
1855 IRQF_SHARED, adapter->netdev->name,
1856 adapter->netdev);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001857#ifdef CONFIG_PCI_MSI
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001858 }
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001859#endif
1860 intr->num_intrs = vector + 1;
1861 if (err) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00001862 netdev_err(adapter->netdev,
1863 "Failed to request irq (intr type:%d), error %d\n",
1864 intr->type, err);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001865 } else {
1866 /* Number of rx queues will not change after this */
1867 for (i = 0; i < adapter->num_rx_queues; i++) {
1868 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
1869 rq->qid = i;
1870 rq->qid2 = i + adapter->num_rx_queues;
1871 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001872
1873
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001874
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001875 /* init our intr settings */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001876 for (i = 0; i < intr->num_intrs; i++)
1877 intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
1878 if (adapter->intr.type != VMXNET3_IT_MSIX) {
1879 adapter->intr.event_intr_idx = 0;
1880 for (i = 0; i < adapter->num_tx_queues; i++)
1881 adapter->tx_queue[i].comp_ring.intr_idx = 0;
1882 adapter->rx_queue[0].comp_ring.intr_idx = 0;
1883 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001884
Stephen Hemminger204a6e62013-01-15 07:28:30 +00001885 netdev_info(adapter->netdev,
1886 "intr type %u, mode %u, %u vectors allocated\n",
1887 intr->type, intr->mask_mode, intr->num_intrs);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001888 }
1889
1890 return err;
1891}
1892
1893
1894static void
1895vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1896{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001897 struct vmxnet3_intr *intr = &adapter->intr;
1898 BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001899
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001900 switch (intr->type) {
Randy Dunlap8f7e5242009-10-14 20:38:58 -07001901#ifdef CONFIG_PCI_MSI
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001902 case VMXNET3_IT_MSIX:
1903 {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001904 int i, vector = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001905
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00001906 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
1907 for (i = 0; i < adapter->num_tx_queues; i++) {
1908 free_irq(intr->msix_entries[vector++].vector,
1909 &(adapter->tx_queue[i]));
1910 if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
1911 break;
1912 }
1913 }
1914
1915 for (i = 0; i < adapter->num_rx_queues; i++) {
1916 free_irq(intr->msix_entries[vector++].vector,
1917 &(adapter->rx_queue[i]));
1918 }
1919
1920 free_irq(intr->msix_entries[vector].vector,
1921 adapter->netdev);
1922 BUG_ON(vector >= intr->num_intrs);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001923 break;
1924 }
Randy Dunlap8f7e5242009-10-14 20:38:58 -07001925#endif
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001926 case VMXNET3_IT_MSI:
1927 free_irq(adapter->pdev->irq, adapter->netdev);
1928 break;
1929 case VMXNET3_IT_INTX:
1930 free_irq(adapter->pdev->irq, adapter->netdev);
1931 break;
1932 default:
Sasha Levinc068e772012-11-08 10:23:03 +00001933 BUG();
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001934 }
1935}
1936
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001937
1938static void
1939vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
1940{
Jesse Gross72e85c42011-06-23 13:04:39 +00001941 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1942 u16 vid;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001943
Jesse Gross72e85c42011-06-23 13:04:39 +00001944 /* allow untagged pkts */
1945 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
1946
1947 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1948 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001949}
1950
1951
Jiri Pirko8e586132011-12-08 19:52:37 -05001952static int
Patrick McHardy80d5c362013-04-19 02:04:28 +00001953vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001954{
1955 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001956
Jesse Grossf6957f82011-08-07 23:15:47 +00001957 if (!(netdev->flags & IFF_PROMISC)) {
1958 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1959 unsigned long flags;
1960
1961 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1962 spin_lock_irqsave(&adapter->cmd_lock, flags);
1963 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1964 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1965 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1966 }
Jesse Gross72e85c42011-06-23 13:04:39 +00001967
1968 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05001969
1970 return 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001971}
1972
1973
Jiri Pirko8e586132011-12-08 19:52:37 -05001974static int
Patrick McHardy80d5c362013-04-19 02:04:28 +00001975vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001976{
1977 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001978
Jesse Grossf6957f82011-08-07 23:15:47 +00001979 if (!(netdev->flags & IFF_PROMISC)) {
1980 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1981 unsigned long flags;
1982
1983 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
1984 spin_lock_irqsave(&adapter->cmd_lock, flags);
1985 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1986 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1987 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1988 }
Jesse Gross72e85c42011-06-23 13:04:39 +00001989
1990 clear_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05001991
1992 return 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07001993}
1994
1995
1996static u8 *
1997vmxnet3_copy_mc(struct net_device *netdev)
1998{
1999 u8 *buf = NULL;
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002000 u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002001
2002 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
2003 if (sz <= 0xffff) {
2004 /* We may be called with BH disabled */
2005 buf = kmalloc(sz, GFP_ATOMIC);
2006 if (buf) {
Jiri Pirko22bedad32010-04-01 21:22:57 +00002007 struct netdev_hw_addr *ha;
Jiri Pirko567ec872010-02-23 23:17:07 +00002008 int i = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002009
Jiri Pirko22bedad32010-04-01 21:22:57 +00002010 netdev_for_each_mc_addr(ha, netdev)
2011 memcpy(buf + i++ * ETH_ALEN, ha->addr,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002012 ETH_ALEN);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002013 }
2014 }
2015 return buf;
2016}
2017
2018
2019static void
2020vmxnet3_set_mc(struct net_device *netdev)
2021{
2022 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002023 unsigned long flags;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002024 struct Vmxnet3_RxFilterConf *rxConf =
2025 &adapter->shared->devRead.rxFilterConf;
2026 u8 *new_table = NULL;
Andy Kingb0eb57c2013-08-23 09:33:49 -07002027 dma_addr_t new_table_pa = 0;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002028 u32 new_mode = VMXNET3_RXM_UCAST;
2029
Jesse Gross72e85c42011-06-23 13:04:39 +00002030 if (netdev->flags & IFF_PROMISC) {
2031 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2032 memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
2033
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002034 new_mode |= VMXNET3_RXM_PROMISC;
Jesse Gross72e85c42011-06-23 13:04:39 +00002035 } else {
2036 vmxnet3_restore_vlan(adapter);
2037 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002038
2039 if (netdev->flags & IFF_BROADCAST)
2040 new_mode |= VMXNET3_RXM_BCAST;
2041
2042 if (netdev->flags & IFF_ALLMULTI)
2043 new_mode |= VMXNET3_RXM_ALL_MULTI;
2044 else
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002045 if (!netdev_mc_empty(netdev)) {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002046 new_table = vmxnet3_copy_mc(netdev);
2047 if (new_table) {
2048 new_mode |= VMXNET3_RXM_MCAST;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002049 rxConf->mfTableLen = cpu_to_le16(
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002050 netdev_mc_count(netdev) * ETH_ALEN);
Andy Kingb0eb57c2013-08-23 09:33:49 -07002051 new_table_pa = dma_map_single(
2052 &adapter->pdev->dev,
2053 new_table,
2054 rxConf->mfTableLen,
2055 PCI_DMA_TODEVICE);
2056 rxConf->mfTablePA = cpu_to_le64(new_table_pa);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002057 } else {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002058 netdev_info(netdev, "failed to copy mcast list"
2059 ", setting ALL_MULTI\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002060 new_mode |= VMXNET3_RXM_ALL_MULTI;
2061 }
2062 }
2063
2064
2065 if (!(new_mode & VMXNET3_RXM_MCAST)) {
2066 rxConf->mfTableLen = 0;
2067 rxConf->mfTablePA = 0;
2068 }
2069
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002070 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002071 if (new_mode != rxConf->rxMode) {
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002072 rxConf->rxMode = cpu_to_le32(new_mode);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002073 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2074 VMXNET3_CMD_UPDATE_RX_MODE);
Jesse Gross72e85c42011-06-23 13:04:39 +00002075 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2076 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002077 }
2078
2079 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2080 VMXNET3_CMD_UPDATE_MAC_FILTERS);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002081 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002082
Andy Kingb0eb57c2013-08-23 09:33:49 -07002083 if (new_table) {
2084 dma_unmap_single(&adapter->pdev->dev, new_table_pa,
2085 rxConf->mfTableLen, PCI_DMA_TODEVICE);
2086 kfree(new_table);
2087 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002088}
2089
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002090void
2091vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
2092{
2093 int i;
2094
2095 for (i = 0; i < adapter->num_rx_queues; i++)
2096 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
2097}
2098
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002099
2100/*
2101 * Set up driver_shared based on settings in adapter.
2102 */
2103
2104static void
2105vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2106{
2107 struct Vmxnet3_DriverShared *shared = adapter->shared;
2108 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
2109 struct Vmxnet3_TxQueueConf *tqc;
2110 struct Vmxnet3_RxQueueConf *rqc;
2111 int i;
2112
2113 memset(shared, 0, sizeof(*shared));
2114
2115 /* driver settings */
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002116 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
2117 devRead->misc.driverInfo.version = cpu_to_le32(
2118 VMXNET3_DRIVER_VERSION_NUM);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002119 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
2120 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
2121 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002122 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
2123 *((u32 *)&devRead->misc.driverInfo.gos));
2124 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2125 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002126
Andy Kingb0eb57c2013-08-23 09:33:49 -07002127 devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002128 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002129
2130 /* set up feature flags */
Michał Mirosława0d27302011-04-18 13:31:21 +00002131 if (adapter->netdev->features & NETIF_F_RXCSUM)
Harvey Harrison3843e512010-10-21 18:05:32 +00002132 devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002133
Michał Mirosława0d27302011-04-18 13:31:21 +00002134 if (adapter->netdev->features & NETIF_F_LRO) {
Harvey Harrison3843e512010-10-21 18:05:32 +00002135 devRead->misc.uptFeatures |= UPT1_F_LRO;
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002136 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002137 }
Patrick McHardyf6469682013-04-19 02:04:27 +00002138 if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
Harvey Harrison3843e512010-10-21 18:05:32 +00002139 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002140
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002141 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2142 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
2143 devRead->misc.queueDescLen = cpu_to_le32(
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002144 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
2145 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002146
2147 /* tx queue settings */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002148 devRead->misc.numTxQueues = adapter->num_tx_queues;
2149 for (i = 0; i < adapter->num_tx_queues; i++) {
2150 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2151 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
2152 tqc = &adapter->tqd_start[i].conf;
2153 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
2154 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
2155 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
Andy Kingb0eb57c2013-08-23 09:33:49 -07002156 tqc->ddPA = cpu_to_le64(tq->buf_info_pa);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002157 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
2158 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
2159 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
2160 tqc->ddLen = cpu_to_le32(
2161 sizeof(struct vmxnet3_tx_buf_info) *
2162 tqc->txRingSize);
2163 tqc->intrIdx = tq->comp_ring.intr_idx;
2164 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002165
2166 /* rx queue settings */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002167 devRead->misc.numRxQueues = adapter->num_rx_queues;
2168 for (i = 0; i < adapter->num_rx_queues; i++) {
2169 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2170 rqc = &adapter->rqd_start[i].conf;
2171 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
2172 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
2173 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
Andy Kingb0eb57c2013-08-23 09:33:49 -07002174 rqc->ddPA = cpu_to_le64(rq->buf_info_pa);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002175 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
2176 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
2177 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
2178 rqc->ddLen = cpu_to_le32(
2179 sizeof(struct vmxnet3_rx_buf_info) *
2180 (rqc->rxRingSize[0] +
2181 rqc->rxRingSize[1]));
2182 rqc->intrIdx = rq->comp_ring.intr_idx;
2183 }
2184
2185#ifdef VMXNET3_RSS
2186 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
2187
2188 if (adapter->rss) {
2189 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
Stephen Hemminger66d35912013-01-15 07:28:34 +00002190 static const uint8_t rss_key[UPT1_RSS_MAX_KEY_SIZE] = {
2191 0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac,
2192 0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28,
2193 0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70,
2194 0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3,
2195 0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9,
2196 };
2197
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002198 devRead->misc.uptFeatures |= UPT1_F_RSS;
2199 devRead->misc.numRxQueues = adapter->num_rx_queues;
2200 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
2201 UPT1_RSS_HASH_TYPE_IPV4 |
2202 UPT1_RSS_HASH_TYPE_TCP_IPV6 |
2203 UPT1_RSS_HASH_TYPE_IPV6;
2204 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
2205 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
2206 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
Stephen Hemminger66d35912013-01-15 07:28:34 +00002207 memcpy(rssConf->hashKey, rss_key, sizeof(rss_key));
2208
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002209 for (i = 0; i < rssConf->indTableSize; i++)
Ben Hutchings278bc422011-12-15 13:56:49 +00002210 rssConf->indTable[i] = ethtool_rxfh_indir_default(
2211 i, adapter->num_rx_queues);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002212
2213 devRead->rssConfDesc.confVer = 1;
Andy Kingb0eb57c2013-08-23 09:33:49 -07002214 devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
2215 devRead->rssConfDesc.confPA =
2216 cpu_to_le64(adapter->rss_conf_pa);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002217 }
2218
2219#endif /* VMXNET3_RSS */
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002220
2221 /* intr settings */
2222 devRead->intrConf.autoMask = adapter->intr.mask_mode ==
2223 VMXNET3_IMM_AUTO;
2224 devRead->intrConf.numIntrs = adapter->intr.num_intrs;
2225 for (i = 0; i < adapter->intr.num_intrs; i++)
2226 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
2227
2228 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
Ronghua Zang6929fe82010-07-15 22:18:47 -07002229 devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002230
2231 /* rx filter settings */
2232 devRead->rxFilterConf.rxMode = 0;
2233 vmxnet3_restore_vlan(adapter);
Shreyas Bhatewaraf9f25022011-01-14 14:59:31 +00002234 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2235
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002236 /* the rest are already zeroed */
2237}
2238
2239
2240int
2241vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2242{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002243 int err, i;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002244 u32 ret;
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002245 unsigned long flags;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002246
Stephen Hemmingerfdcd79b2013-01-15 07:28:29 +00002247 netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002248 " ring sizes %u %u %u\n", adapter->netdev->name,
2249 adapter->skb_buf_size, adapter->rx_buf_per_pkt,
2250 adapter->tx_queue[0].tx_ring.size,
2251 adapter->rx_queue[0].rx_ring[0].size,
2252 adapter->rx_queue[0].rx_ring[1].size);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002253
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002254 vmxnet3_tq_init_all(adapter);
2255 err = vmxnet3_rq_init_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002256 if (err) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002257 netdev_err(adapter->netdev,
2258 "Failed to init rx queue error %d\n", err);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002259 goto rq_err;
2260 }
2261
2262 err = vmxnet3_request_irqs(adapter);
2263 if (err) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002264 netdev_err(adapter->netdev,
2265 "Failed to setup irq for error %d\n", err);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002266 goto irq_err;
2267 }
2268
2269 vmxnet3_setup_driver_shared(adapter);
2270
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00002271 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
2272 adapter->shared_pa));
2273 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
2274 adapter->shared_pa));
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002275 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002276 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2277 VMXNET3_CMD_ACTIVATE_DEV);
2278 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002279 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002280
2281 if (ret != 0) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002282 netdev_err(adapter->netdev,
2283 "Failed to activate dev: error %u\n", ret);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002284 err = -EINVAL;
2285 goto activate_err;
2286 }
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002287
2288 for (i = 0; i < adapter->num_rx_queues; i++) {
2289 VMXNET3_WRITE_BAR0_REG(adapter,
2290 VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
2291 adapter->rx_queue[i].rx_ring[0].next2fill);
2292 VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
2293 (i * VMXNET3_REG_ALIGN)),
2294 adapter->rx_queue[i].rx_ring[1].next2fill);
2295 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002296
2297 /* Apply the rx filter settins last. */
2298 vmxnet3_set_mc(adapter->netdev);
2299
2300 /*
2301 * Check link state when first activating device. It will start the
2302 * tx queue if the link is up.
2303 */
Shreyas Bhatewara4a1745fc2010-07-15 21:51:14 +00002304 vmxnet3_check_link(adapter, true);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002305 for (i = 0; i < adapter->num_rx_queues; i++)
2306 napi_enable(&adapter->rx_queue[i].napi);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002307 vmxnet3_enable_all_intrs(adapter);
2308 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2309 return 0;
2310
2311activate_err:
2312 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
2313 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
2314 vmxnet3_free_irqs(adapter);
2315irq_err:
2316rq_err:
2317 /* free up buffers we allocated */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002318 vmxnet3_rq_cleanup_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002319 return err;
2320}
2321
2322
2323void
2324vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
2325{
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002326 unsigned long flags;
2327 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002328 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002329 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002330}
2331
2332
2333int
2334vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
2335{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002336 int i;
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002337 unsigned long flags;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002338 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
2339 return 0;
2340
2341
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002342 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002343 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2344 VMXNET3_CMD_QUIESCE_DEV);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002345 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002346 vmxnet3_disable_all_intrs(adapter);
2347
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002348 for (i = 0; i < adapter->num_rx_queues; i++)
2349 napi_disable(&adapter->rx_queue[i].napi);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002350 netif_tx_disable(adapter->netdev);
2351 adapter->link_speed = 0;
2352 netif_carrier_off(adapter->netdev);
2353
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002354 vmxnet3_tq_cleanup_all(adapter);
2355 vmxnet3_rq_cleanup_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002356 vmxnet3_free_irqs(adapter);
2357 return 0;
2358}
2359
2360
2361static void
2362vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2363{
2364 u32 tmp;
2365
2366 tmp = *(u32 *)mac;
2367 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
2368
2369 tmp = (mac[5] << 8) | mac[4];
2370 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
2371}
2372
2373
2374static int
2375vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
2376{
2377 struct sockaddr *addr = p;
2378 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2379
2380 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2381 vmxnet3_write_mac_addr(adapter, addr->sa_data);
2382
2383 return 0;
2384}
2385
2386
2387/* ==================== initialization and cleanup routines ============ */
2388
2389static int
2390vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
2391{
2392 int err;
2393 unsigned long mmio_start, mmio_len;
2394 struct pci_dev *pdev = adapter->pdev;
2395
2396 err = pci_enable_device(pdev);
2397 if (err) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002398 dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002399 return err;
2400 }
2401
2402 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
2403 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002404 dev_err(&pdev->dev,
2405 "pci_set_consistent_dma_mask failed\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002406 err = -EIO;
2407 goto err_set_mask;
2408 }
2409 *dma64 = true;
2410 } else {
2411 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002412 dev_err(&pdev->dev,
2413 "pci_set_dma_mask failed\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002414 err = -EIO;
2415 goto err_set_mask;
2416 }
2417 *dma64 = false;
2418 }
2419
2420 err = pci_request_selected_regions(pdev, (1 << 2) - 1,
2421 vmxnet3_driver_name);
2422 if (err) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002423 dev_err(&pdev->dev,
2424 "Failed to request region for adapter: error %d\n", err);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002425 goto err_set_mask;
2426 }
2427
2428 pci_set_master(pdev);
2429
2430 mmio_start = pci_resource_start(pdev, 0);
2431 mmio_len = pci_resource_len(pdev, 0);
2432 adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
2433 if (!adapter->hw_addr0) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002434 dev_err(&pdev->dev, "Failed to map bar0\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002435 err = -EIO;
2436 goto err_ioremap;
2437 }
2438
2439 mmio_start = pci_resource_start(pdev, 1);
2440 mmio_len = pci_resource_len(pdev, 1);
2441 adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
2442 if (!adapter->hw_addr1) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002443 dev_err(&pdev->dev, "Failed to map bar1\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002444 err = -EIO;
2445 goto err_bar1;
2446 }
2447 return 0;
2448
2449err_bar1:
2450 iounmap(adapter->hw_addr0);
2451err_ioremap:
2452 pci_release_selected_regions(pdev, (1 << 2) - 1);
2453err_set_mask:
2454 pci_disable_device(pdev);
2455 return err;
2456}
2457
2458
2459static void
2460vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
2461{
2462 BUG_ON(!adapter->pdev);
2463
2464 iounmap(adapter->hw_addr0);
2465 iounmap(adapter->hw_addr1);
2466 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
2467 pci_disable_device(adapter->pdev);
2468}
2469
2470
2471static void
2472vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2473{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002474 size_t sz, i, ring0_size, ring1_size, comp_size;
2475 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0];
2476
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002477
2478 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
2479 VMXNET3_MAX_ETH_HDR_SIZE) {
2480 adapter->skb_buf_size = adapter->netdev->mtu +
2481 VMXNET3_MAX_ETH_HDR_SIZE;
2482 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
2483 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
2484
2485 adapter->rx_buf_per_pkt = 1;
2486 } else {
2487 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
2488 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
2489 VMXNET3_MAX_ETH_HDR_SIZE;
2490 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
2491 }
2492
2493 /*
2494 * for simplicity, force the ring0 size to be a multiple of
2495 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2496 */
2497 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002498 ring0_size = adapter->rx_queue[0].rx_ring[0].size;
2499 ring0_size = (ring0_size + sz - 1) / sz * sz;
Shreyas Bhatewaraa53255d2011-01-14 14:59:25 +00002500 ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002501 sz * sz);
2502 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
2503 comp_size = ring0_size + ring1_size;
2504
2505 for (i = 0; i < adapter->num_rx_queues; i++) {
2506 rq = &adapter->rx_queue[i];
2507 rq->rx_ring[0].size = ring0_size;
2508 rq->rx_ring[1].size = ring1_size;
2509 rq->comp_ring.size = comp_size;
2510 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002511}
2512
2513
2514int
2515vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
2516 u32 rx_ring_size, u32 rx_ring2_size)
2517{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002518 int err = 0, i;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002519
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002520 for (i = 0; i < adapter->num_tx_queues; i++) {
2521 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2522 tq->tx_ring.size = tx_ring_size;
2523 tq->data_ring.size = tx_ring_size;
2524 tq->comp_ring.size = tx_ring_size;
2525 tq->shared = &adapter->tqd_start[i].ctrl;
2526 tq->stopped = true;
2527 tq->adapter = adapter;
2528 tq->qid = i;
2529 err = vmxnet3_tq_create(tq, adapter);
2530 /*
2531 * Too late to change num_tx_queues. We cannot do away with
2532 * lesser number of queues than what we asked for
2533 */
2534 if (err)
2535 goto queue_err;
2536 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002537
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002538 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
2539 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002540 vmxnet3_adjust_rx_ring_size(adapter);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002541 for (i = 0; i < adapter->num_rx_queues; i++) {
2542 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2543 /* qid and qid2 for rx queues will be assigned later when num
2544 * of rx queues is finalized after allocating intrs */
2545 rq->shared = &adapter->rqd_start[i].ctrl;
2546 rq->adapter = adapter;
2547 err = vmxnet3_rq_create(rq, adapter);
2548 if (err) {
2549 if (i == 0) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002550 netdev_err(adapter->netdev,
2551 "Could not allocate any rx queues. "
2552 "Aborting.\n");
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002553 goto queue_err;
2554 } else {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002555 netdev_info(adapter->netdev,
2556 "Number of rx queues changed "
2557 "to : %d.\n", i);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002558 adapter->num_rx_queues = i;
2559 err = 0;
2560 break;
2561 }
2562 }
2563 }
2564 return err;
2565queue_err:
2566 vmxnet3_tq_destroy_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002567 return err;
2568}
2569
2570static int
2571vmxnet3_open(struct net_device *netdev)
2572{
2573 struct vmxnet3_adapter *adapter;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002574 int err, i;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002575
2576 adapter = netdev_priv(netdev);
2577
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002578 for (i = 0; i < adapter->num_tx_queues; i++)
2579 spin_lock_init(&adapter->tx_queue[i].tx_lock);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002580
2581 err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
2582 VMXNET3_DEF_RX_RING_SIZE,
2583 VMXNET3_DEF_RX_RING_SIZE);
2584 if (err)
2585 goto queue_err;
2586
2587 err = vmxnet3_activate_dev(adapter);
2588 if (err)
2589 goto activate_err;
2590
2591 return 0;
2592
2593activate_err:
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002594 vmxnet3_rq_destroy_all(adapter);
2595 vmxnet3_tq_destroy_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002596queue_err:
2597 return err;
2598}
2599
2600
2601static int
2602vmxnet3_close(struct net_device *netdev)
2603{
2604 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2605
2606 /*
2607 * Reset_work may be in the middle of resetting the device, wait for its
2608 * completion.
2609 */
2610 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2611 msleep(1);
2612
2613 vmxnet3_quiesce_dev(adapter);
2614
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002615 vmxnet3_rq_destroy_all(adapter);
2616 vmxnet3_tq_destroy_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002617
2618 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2619
2620
2621 return 0;
2622}
2623
2624
2625void
2626vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2627{
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002628 int i;
2629
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002630 /*
2631 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2632 * vmxnet3_close() will deadlock.
2633 */
2634 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
2635
2636 /* we need to enable NAPI, otherwise dev_close will deadlock */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002637 for (i = 0; i < adapter->num_rx_queues; i++)
2638 napi_enable(&adapter->rx_queue[i].napi);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002639 dev_close(adapter->netdev);
2640}
2641
2642
2643static int
2644vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
2645{
2646 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2647 int err = 0;
2648
2649 if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
2650 return -EINVAL;
2651
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002652 netdev->mtu = new_mtu;
2653
2654 /*
2655 * Reset_work may be in the middle of resetting the device, wait for its
2656 * completion.
2657 */
2658 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2659 msleep(1);
2660
2661 if (netif_running(netdev)) {
2662 vmxnet3_quiesce_dev(adapter);
2663 vmxnet3_reset_dev(adapter);
2664
2665 /* we need to re-create the rx queue based on the new mtu */
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002666 vmxnet3_rq_destroy_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002667 vmxnet3_adjust_rx_ring_size(adapter);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002668 err = vmxnet3_rq_create_all(adapter);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002669 if (err) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002670 netdev_err(netdev,
2671 "failed to re-create rx queues, "
2672 " error %d. Closing it.\n", err);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002673 goto out;
2674 }
2675
2676 err = vmxnet3_activate_dev(adapter);
2677 if (err) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002678 netdev_err(netdev,
2679 "failed to re-activate, error %d. "
2680 "Closing it\n", err);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002681 goto out;
2682 }
2683 }
2684
2685out:
2686 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2687 if (err)
2688 vmxnet3_force_close(adapter);
2689
2690 return err;
2691}
2692
2693
2694static void
2695vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
2696{
2697 struct net_device *netdev = adapter->netdev;
2698
Michał Mirosława0d27302011-04-18 13:31:21 +00002699 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00002700 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
2701 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
Jesse Gross72e85c42011-06-23 13:04:39 +00002702 NETIF_F_LRO;
Michał Mirosława0d27302011-04-18 13:31:21 +00002703 if (dma64)
Shreyas Bhatewaraebbf9292011-07-20 17:21:51 +00002704 netdev->hw_features |= NETIF_F_HIGHDMA;
Jesse Gross72e85c42011-06-23 13:04:39 +00002705 netdev->vlan_features = netdev->hw_features &
Patrick McHardyf6469682013-04-19 02:04:27 +00002706 ~(NETIF_F_HW_VLAN_CTAG_TX |
2707 NETIF_F_HW_VLAN_CTAG_RX);
2708 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002709}
2710
2711
2712static void
2713vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2714{
2715 u32 tmp;
2716
2717 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
2718 *(u32 *)mac = tmp;
2719
2720 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
2721 mac[4] = tmp & 0xff;
2722 mac[5] = (tmp >> 8) & 0xff;
2723}
2724
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002725#ifdef CONFIG_PCI_MSI
2726
2727/*
2728 * Enable MSIx vectors.
2729 * Returns :
2730 * 0 on successful enabling of required vectors,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002731 * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002732 * could be enabled.
2733 * number of vectors which can be enabled otherwise (this number is smaller
2734 * than VMXNET3_LINUX_MIN_MSIX_VECT)
2735 */
2736
2737static int
2738vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
2739 int vectors)
2740{
2741 int err = 0, vector_threshold;
2742 vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT;
2743
2744 while (vectors >= vector_threshold) {
2745 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
2746 vectors);
2747 if (!err) {
2748 adapter->intr.num_intrs = vectors;
2749 return 0;
2750 } else if (err < 0) {
Stephen Hemminger4bad25f2013-01-15 07:28:28 +00002751 dev_err(&adapter->netdev->dev,
Shreyas Bhatewara4c1dc802012-02-28 22:08:39 +00002752 "Failed to enable MSI-X, error: %d\n", err);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002753 vectors = 0;
2754 } else if (err < vector_threshold) {
2755 break;
2756 } else {
2757 /* If fails to enable required number of MSI-x vectors
Shreyas Bhatewara7e96fbf2011-01-14 15:00:03 +00002758 * try enabling minimum number of vectors required.
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002759 */
Stephen Hemminger4bad25f2013-01-15 07:28:28 +00002760 dev_err(&adapter->netdev->dev,
2761 "Failed to enable %d MSI-X, trying %d instead\n",
Shreyas Bhatewara4c1dc802012-02-28 22:08:39 +00002762 vectors, vector_threshold);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002763 vectors = vector_threshold;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002764 }
2765 }
2766
Stephen Hemminger4bad25f2013-01-15 07:28:28 +00002767 dev_info(&adapter->pdev->dev,
2768 "Number of MSI-X interrupts which can be allocated "
2769 "is lower than min threshold required.\n");
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002770 return err;
2771}
2772
2773
2774#endif /* CONFIG_PCI_MSI */
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002775
2776static void
2777vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2778{
2779 u32 cfg;
Roland Dreiere328d412011-05-06 08:32:53 +00002780 unsigned long flags;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002781
2782 /* intr settings */
Roland Dreiere328d412011-05-06 08:32:53 +00002783 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002784 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2785 VMXNET3_CMD_GET_CONF_INTR);
2786 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
Roland Dreiere328d412011-05-06 08:32:53 +00002787 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002788 adapter->intr.type = cfg & 0x3;
2789 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
2790
2791 if (adapter->intr.type == VMXNET3_IT_AUTO) {
Shreyas Bhatewara0bdc0d72010-07-15 15:21:27 +00002792 adapter->intr.type = VMXNET3_IT_MSIX;
2793 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002794
Randy Dunlap8f7e5242009-10-14 20:38:58 -07002795#ifdef CONFIG_PCI_MSI
Shreyas Bhatewara0bdc0d72010-07-15 15:21:27 +00002796 if (adapter->intr.type == VMXNET3_IT_MSIX) {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002797 int vector, err = 0;
Shreyas Bhatewara0bdc0d72010-07-15 15:21:27 +00002798
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002799 adapter->intr.num_intrs = (adapter->share_intr ==
2800 VMXNET3_INTR_TXSHARE) ? 1 :
2801 adapter->num_tx_queues;
2802 adapter->intr.num_intrs += (adapter->share_intr ==
2803 VMXNET3_INTR_BUDDYSHARE) ? 0 :
2804 adapter->num_rx_queues;
2805 adapter->intr.num_intrs += 1; /* for link event */
2806
2807 adapter->intr.num_intrs = (adapter->intr.num_intrs >
2808 VMXNET3_LINUX_MIN_MSIX_VECT
2809 ? adapter->intr.num_intrs :
2810 VMXNET3_LINUX_MIN_MSIX_VECT);
2811
2812 for (vector = 0; vector < adapter->intr.num_intrs; vector++)
2813 adapter->intr.msix_entries[vector].entry = vector;
2814
2815 err = vmxnet3_acquire_msix_vectors(adapter,
2816 adapter->intr.num_intrs);
2817 /* If we cannot allocate one MSIx vector per queue
2818 * then limit the number of rx queues to 1
2819 */
2820 if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
2821 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
Shreyas Bhatewara7e96fbf2011-01-14 15:00:03 +00002822 || adapter->num_rx_queues != 1) {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002823 adapter->share_intr = VMXNET3_INTR_TXSHARE;
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002824 netdev_err(adapter->netdev,
2825 "Number of rx queues : 1\n");
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002826 adapter->num_rx_queues = 1;
2827 adapter->intr.num_intrs =
2828 VMXNET3_LINUX_MIN_MSIX_VECT;
2829 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002830 return;
2831 }
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002832 if (!err)
2833 return;
2834
2835 /* If we cannot allocate MSIx vectors use only one rx queue */
Stephen Hemminger4bad25f2013-01-15 07:28:28 +00002836 dev_info(&adapter->pdev->dev,
2837 "Failed to enable MSI-X, error %d. "
2838 "Limiting #rx queues to 1, try MSI.\n", err);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002839
Shreyas Bhatewara0bdc0d72010-07-15 15:21:27 +00002840 adapter->intr.type = VMXNET3_IT_MSI;
2841 }
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002842
Shreyas Bhatewara0bdc0d72010-07-15 15:21:27 +00002843 if (adapter->intr.type == VMXNET3_IT_MSI) {
2844 int err;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002845 err = pci_enable_msi(adapter->pdev);
2846 if (!err) {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002847 adapter->num_rx_queues = 1;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002848 adapter->intr.num_intrs = 1;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002849 return;
2850 }
2851 }
Shreyas Bhatewara0bdc0d72010-07-15 15:21:27 +00002852#endif /* CONFIG_PCI_MSI */
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002853
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002854 adapter->num_rx_queues = 1;
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002855 dev_info(&adapter->netdev->dev,
2856 "Using INTx interrupt, #Rx queues: 1.\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002857 adapter->intr.type = VMXNET3_IT_INTX;
2858
2859 /* INT-X related setting */
2860 adapter->intr.num_intrs = 1;
2861}
2862
2863
2864static void
2865vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
2866{
2867 if (adapter->intr.type == VMXNET3_IT_MSIX)
2868 pci_disable_msix(adapter->pdev);
2869 else if (adapter->intr.type == VMXNET3_IT_MSI)
2870 pci_disable_msi(adapter->pdev);
2871 else
2872 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
2873}
2874
2875
2876static void
2877vmxnet3_tx_timeout(struct net_device *netdev)
2878{
2879 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2880 adapter->tx_timeout_count++;
2881
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002882 netdev_err(adapter->netdev, "tx hang\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002883 schedule_work(&adapter->work);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002884 netif_wake_queue(adapter->netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002885}
2886
2887
2888static void
2889vmxnet3_reset_work(struct work_struct *data)
2890{
2891 struct vmxnet3_adapter *adapter;
2892
2893 adapter = container_of(data, struct vmxnet3_adapter, work);
2894
2895 /* if another thread is resetting the device, no need to proceed */
2896 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2897 return;
2898
2899 /* if the device is closed, we must leave it alone */
Shreyas Bhatewarad9a5f212010-07-19 07:02:13 +00002900 rtnl_lock();
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002901 if (netif_running(adapter->netdev)) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002902 netdev_notice(adapter->netdev, "resetting\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002903 vmxnet3_quiesce_dev(adapter);
2904 vmxnet3_reset_dev(adapter);
2905 vmxnet3_activate_dev(adapter);
2906 } else {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002907 netdev_info(adapter->netdev, "already closed\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002908 }
Shreyas Bhatewarad9a5f212010-07-19 07:02:13 +00002909 rtnl_unlock();
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002910
2911 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2912}
2913
2914
Bill Pemberton3a4751a2012-12-03 09:24:16 -05002915static int
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002916vmxnet3_probe_device(struct pci_dev *pdev,
2917 const struct pci_device_id *id)
2918{
2919 static const struct net_device_ops vmxnet3_netdev_ops = {
2920 .ndo_open = vmxnet3_open,
2921 .ndo_stop = vmxnet3_close,
2922 .ndo_start_xmit = vmxnet3_xmit_frame,
2923 .ndo_set_mac_address = vmxnet3_set_mac_addr,
2924 .ndo_change_mtu = vmxnet3_change_mtu,
Michał Mirosława0d27302011-04-18 13:31:21 +00002925 .ndo_set_features = vmxnet3_set_features,
stephen hemminger95305f62011-06-08 14:53:57 +00002926 .ndo_get_stats64 = vmxnet3_get_stats64,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002927 .ndo_tx_timeout = vmxnet3_tx_timeout,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00002928 .ndo_set_rx_mode = vmxnet3_set_mc,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002929 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
2930 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
2931#ifdef CONFIG_NET_POLL_CONTROLLER
2932 .ndo_poll_controller = vmxnet3_netpoll,
2933#endif
2934 };
2935 int err;
2936 bool dma64 = false; /* stupid gcc */
2937 u32 ver;
2938 struct net_device *netdev;
2939 struct vmxnet3_adapter *adapter;
2940 u8 mac[ETH_ALEN];
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002941 int size;
2942 int num_tx_queues;
2943 int num_rx_queues;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002944
Shreyas Bhatewarae154b632011-05-10 06:13:56 +00002945 if (!pci_msi_enabled())
2946 enable_mq = 0;
2947
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002948#ifdef VMXNET3_RSS
2949 if (enable_mq)
2950 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
2951 (int)num_online_cpus());
2952 else
2953#endif
2954 num_rx_queues = 1;
Shreyas Bhatewaraeebb02b2011-07-07 00:25:52 -07002955 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002956
2957 if (enable_mq)
2958 num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
2959 (int)num_online_cpus());
2960 else
2961 num_tx_queues = 1;
2962
Shreyas Bhatewaraeebb02b2011-07-07 00:25:52 -07002963 num_tx_queues = rounddown_pow_of_two(num_tx_queues);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002964 netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
2965 max(num_tx_queues, num_rx_queues));
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002966 dev_info(&pdev->dev,
2967 "# of Tx queues : %d, # of Rx queues : %d\n",
2968 num_tx_queues, num_rx_queues);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002969
Joe Perches41de8d42012-01-29 13:47:52 +00002970 if (!netdev)
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002971 return -ENOMEM;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002972
2973 pci_set_drvdata(pdev, netdev);
2974 adapter = netdev_priv(netdev);
2975 adapter->netdev = netdev;
2976 adapter->pdev = pdev;
2977
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00002978 spin_lock_init(&adapter->cmd_lock);
Andy Kingb0eb57c2013-08-23 09:33:49 -07002979 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
2980 sizeof(struct vmxnet3_adapter),
2981 PCI_DMA_TODEVICE);
2982 adapter->shared = dma_alloc_coherent(
2983 &adapter->pdev->dev,
2984 sizeof(struct Vmxnet3_DriverShared),
2985 &adapter->shared_pa, GFP_KERNEL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002986 if (!adapter->shared) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00002987 dev_err(&pdev->dev, "Failed to allocate memory\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07002988 err = -ENOMEM;
2989 goto err_alloc_shared;
2990 }
2991
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002992 adapter->num_rx_queues = num_rx_queues;
2993 adapter->num_tx_queues = num_tx_queues;
Bhavesh Davdae4fabf22013-03-06 12:04:53 +00002994 adapter->rx_buf_per_pkt = 1;
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00002995
2996 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
2997 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
Andy Kingb0eb57c2013-08-23 09:33:49 -07002998 adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
2999 &adapter->queue_desc_pa,
3000 GFP_KERNEL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003001
3002 if (!adapter->tqd_start) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00003003 dev_err(&pdev->dev, "Failed to allocate memory\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003004 err = -ENOMEM;
3005 goto err_alloc_queue_desc;
3006 }
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003007 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
stephen hemminger96800ee2012-11-13 13:53:28 +00003008 adapter->num_tx_queues);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003009
Andy Kingb0eb57c2013-08-23 09:33:49 -07003010 adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
3011 sizeof(struct Vmxnet3_PMConf),
3012 &adapter->pm_conf_pa,
3013 GFP_KERNEL);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003014 if (adapter->pm_conf == NULL) {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003015 err = -ENOMEM;
3016 goto err_alloc_pm;
3017 }
3018
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003019#ifdef VMXNET3_RSS
3020
Andy Kingb0eb57c2013-08-23 09:33:49 -07003021 adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
3022 sizeof(struct UPT1_RSSConf),
3023 &adapter->rss_conf_pa,
3024 GFP_KERNEL);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003025 if (adapter->rss_conf == NULL) {
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003026 err = -ENOMEM;
3027 goto err_alloc_rss;
3028 }
3029#endif /* VMXNET3_RSS */
3030
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003031 err = vmxnet3_alloc_pci_resources(adapter, &dma64);
3032 if (err < 0)
3033 goto err_alloc_pci;
3034
3035 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
3036 if (ver & 1) {
3037 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
3038 } else {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00003039 dev_err(&pdev->dev,
3040 "Incompatible h/w version (0x%x) for adapter\n", ver);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003041 err = -EBUSY;
3042 goto err_ver;
3043 }
3044
3045 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
3046 if (ver & 1) {
3047 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
3048 } else {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00003049 dev_err(&pdev->dev,
3050 "Incompatible upt version (0x%x) for adapter\n", ver);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003051 err = -EBUSY;
3052 goto err_ver;
3053 }
3054
Shreyas Bhatewarae101e7d2011-07-20 16:01:11 +00003055 SET_NETDEV_DEV(netdev, &pdev->dev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003056 vmxnet3_declare_features(adapter, dma64);
3057
Stephen Hemminger4db37a72013-01-15 07:28:33 +00003058 if (adapter->num_tx_queues == adapter->num_rx_queues)
3059 adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
3060 else
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003061 adapter->share_intr = VMXNET3_INTR_DONTSHARE;
3062
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003063 vmxnet3_alloc_intr_resources(adapter);
3064
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003065#ifdef VMXNET3_RSS
3066 if (adapter->num_rx_queues > 1 &&
3067 adapter->intr.type == VMXNET3_IT_MSIX) {
3068 adapter->rss = true;
Stephen Hemminger7db11f72013-01-15 07:28:35 +00003069 netdev->hw_features |= NETIF_F_RXHASH;
3070 netdev->features |= NETIF_F_RXHASH;
Stephen Hemminger204a6e62013-01-15 07:28:30 +00003071 dev_dbg(&pdev->dev, "RSS is enabled.\n");
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003072 } else {
3073 adapter->rss = false;
3074 }
3075#endif
3076
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003077 vmxnet3_read_mac_addr(adapter, mac);
3078 memcpy(netdev->dev_addr, mac, netdev->addr_len);
3079
3080 netdev->netdev_ops = &vmxnet3_netdev_ops;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003081 vmxnet3_set_ethtool_ops(netdev);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003082 netdev->watchdog_timeo = 5 * HZ;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003083
3084 INIT_WORK(&adapter->work, vmxnet3_reset_work);
Steve Hodgsone3bc4ff2012-08-14 17:13:36 +01003085 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003086
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003087 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3088 int i;
3089 for (i = 0; i < adapter->num_rx_queues; i++) {
3090 netif_napi_add(adapter->netdev,
3091 &adapter->rx_queue[i].napi,
3092 vmxnet3_poll_rx_only, 64);
3093 }
3094 } else {
3095 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
3096 vmxnet3_poll, 64);
3097 }
3098
3099 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
3100 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
3101
Neil Horman6cdd20c2013-01-29 16:15:45 -05003102 netif_carrier_off(netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003103 err = register_netdev(netdev);
3104
3105 if (err) {
Stephen Hemminger204a6e62013-01-15 07:28:30 +00003106 dev_err(&pdev->dev, "Failed to register adapter\n");
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003107 goto err_register;
3108 }
3109
Shreyas Bhatewara4a1745fc2010-07-15 21:51:14 +00003110 vmxnet3_check_link(adapter, false);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003111 return 0;
3112
3113err_register:
3114 vmxnet3_free_intr_resources(adapter);
3115err_ver:
3116 vmxnet3_free_pci_resources(adapter);
3117err_alloc_pci:
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003118#ifdef VMXNET3_RSS
Andy Kingb0eb57c2013-08-23 09:33:49 -07003119 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3120 adapter->rss_conf, adapter->rss_conf_pa);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003121err_alloc_rss:
3122#endif
Andy Kingb0eb57c2013-08-23 09:33:49 -07003123 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3124 adapter->pm_conf, adapter->pm_conf_pa);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003125err_alloc_pm:
Andy Kingb0eb57c2013-08-23 09:33:49 -07003126 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
3127 adapter->queue_desc_pa);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003128err_alloc_queue_desc:
Andy Kingb0eb57c2013-08-23 09:33:49 -07003129 dma_free_coherent(&adapter->pdev->dev,
3130 sizeof(struct Vmxnet3_DriverShared),
3131 adapter->shared, adapter->shared_pa);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003132err_alloc_shared:
Andy Kingb0eb57c2013-08-23 09:33:49 -07003133 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3134 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003135 pci_set_drvdata(pdev, NULL);
3136 free_netdev(netdev);
3137 return err;
3138}
3139
3140
Bill Pemberton3a4751a2012-12-03 09:24:16 -05003141static void
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003142vmxnet3_remove_device(struct pci_dev *pdev)
3143{
3144 struct net_device *netdev = pci_get_drvdata(pdev);
3145 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003146 int size = 0;
3147 int num_rx_queues;
3148
3149#ifdef VMXNET3_RSS
3150 if (enable_mq)
3151 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3152 (int)num_online_cpus());
3153 else
3154#endif
3155 num_rx_queues = 1;
Shreyas Bhatewaraeebb02b2011-07-07 00:25:52 -07003156 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003157
Tejun Heo23f333a2010-12-12 16:45:14 +01003158 cancel_work_sync(&adapter->work);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003159
3160 unregister_netdev(netdev);
3161
3162 vmxnet3_free_intr_resources(adapter);
3163 vmxnet3_free_pci_resources(adapter);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003164#ifdef VMXNET3_RSS
Andy Kingb0eb57c2013-08-23 09:33:49 -07003165 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3166 adapter->rss_conf, adapter->rss_conf_pa);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003167#endif
Andy Kingb0eb57c2013-08-23 09:33:49 -07003168 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3169 adapter->pm_conf, adapter->pm_conf_pa);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003170
3171 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3172 size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
Andy Kingb0eb57c2013-08-23 09:33:49 -07003173 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
3174 adapter->queue_desc_pa);
3175 dma_free_coherent(&adapter->pdev->dev,
3176 sizeof(struct Vmxnet3_DriverShared),
3177 adapter->shared, adapter->shared_pa);
3178 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3179 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003180 free_netdev(netdev);
3181}
3182
3183
3184#ifdef CONFIG_PM
3185
3186static int
3187vmxnet3_suspend(struct device *device)
3188{
3189 struct pci_dev *pdev = to_pci_dev(device);
3190 struct net_device *netdev = pci_get_drvdata(pdev);
3191 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3192 struct Vmxnet3_PMConf *pmConf;
3193 struct ethhdr *ehdr;
3194 struct arphdr *ahdr;
3195 u8 *arpreq;
3196 struct in_device *in_dev;
3197 struct in_ifaddr *ifa;
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00003198 unsigned long flags;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003199 int i = 0;
3200
3201 if (!netif_running(netdev))
3202 return 0;
3203
Shreyas Bhatewara51956cd2011-01-14 14:59:52 +00003204 for (i = 0; i < adapter->num_rx_queues; i++)
3205 napi_disable(&adapter->rx_queue[i].napi);
3206
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003207 vmxnet3_disable_all_intrs(adapter);
3208 vmxnet3_free_irqs(adapter);
3209 vmxnet3_free_intr_resources(adapter);
3210
3211 netif_device_detach(netdev);
Shreyas Bhatewara09c50882010-11-19 10:55:24 +00003212 netif_tx_stop_all_queues(netdev);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003213
3214 /* Create wake-up filters. */
3215 pmConf = adapter->pm_conf;
3216 memset(pmConf, 0, sizeof(*pmConf));
3217
3218 if (adapter->wol & WAKE_UCAST) {
3219 pmConf->filters[i].patternSize = ETH_ALEN;
3220 pmConf->filters[i].maskSize = 1;
3221 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
3222 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
3223
Harvey Harrison3843e512010-10-21 18:05:32 +00003224 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003225 i++;
3226 }
3227
3228 if (adapter->wol & WAKE_ARP) {
3229 in_dev = in_dev_get(netdev);
3230 if (!in_dev)
3231 goto skip_arp;
3232
3233 ifa = (struct in_ifaddr *)in_dev->ifa_list;
3234 if (!ifa)
3235 goto skip_arp;
3236
3237 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
3238 sizeof(struct arphdr) + /* ARP header */
3239 2 * ETH_ALEN + /* 2 Ethernet addresses*/
3240 2 * sizeof(u32); /*2 IPv4 addresses */
3241 pmConf->filters[i].maskSize =
3242 (pmConf->filters[i].patternSize - 1) / 8 + 1;
3243
3244 /* ETH_P_ARP in Ethernet header. */
3245 ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
3246 ehdr->h_proto = htons(ETH_P_ARP);
3247
3248 /* ARPOP_REQUEST in ARP header. */
3249 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
3250 ahdr->ar_op = htons(ARPOP_REQUEST);
3251 arpreq = (u8 *)(ahdr + 1);
3252
3253 /* The Unicast IPv4 address in 'tip' field. */
3254 arpreq += 2 * ETH_ALEN + sizeof(u32);
3255 *(u32 *)arpreq = ifa->ifa_address;
3256
3257 /* The mask for the relevant bits. */
3258 pmConf->filters[i].mask[0] = 0x00;
3259 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
3260 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
3261 pmConf->filters[i].mask[3] = 0x00;
3262 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
3263 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
3264 in_dev_put(in_dev);
3265
Harvey Harrison3843e512010-10-21 18:05:32 +00003266 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003267 i++;
3268 }
3269
3270skip_arp:
3271 if (adapter->wol & WAKE_MAGIC)
Harvey Harrison3843e512010-10-21 18:05:32 +00003272 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003273
3274 pmConf->numFilters = i;
3275
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00003276 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3277 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3278 *pmConf));
Andy Kingb0eb57c2013-08-23 09:33:49 -07003279 adapter->shared->devRead.pmConfDesc.confPA =
3280 cpu_to_le64(adapter->pm_conf_pa);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003281
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00003282 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003283 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3284 VMXNET3_CMD_UPDATE_PMCFG);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00003285 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003286
3287 pci_save_state(pdev);
3288 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
3289 adapter->wol);
3290 pci_disable_device(pdev);
3291 pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
3292
3293 return 0;
3294}
3295
3296
3297static int
3298vmxnet3_resume(struct device *device)
3299{
Shreyas Bhatewara51956cd2011-01-14 14:59:52 +00003300 int err, i = 0;
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00003301 unsigned long flags;
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003302 struct pci_dev *pdev = to_pci_dev(device);
3303 struct net_device *netdev = pci_get_drvdata(pdev);
3304 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3305 struct Vmxnet3_PMConf *pmConf;
3306
3307 if (!netif_running(netdev))
3308 return 0;
3309
3310 /* Destroy wake-up filters. */
3311 pmConf = adapter->pm_conf;
3312 memset(pmConf, 0, sizeof(*pmConf));
3313
Shreyas Bhatewara115924b2009-11-16 13:41:33 +00003314 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3315 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3316 *pmConf));
Andy Kingb0eb57c2013-08-23 09:33:49 -07003317 adapter->shared->devRead.pmConfDesc.confPA =
3318 cpu_to_le64(adapter->pm_conf_pa);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003319
3320 netif_device_attach(netdev);
3321 pci_set_power_state(pdev, PCI_D0);
3322 pci_restore_state(pdev);
3323 err = pci_enable_device_mem(pdev);
3324 if (err != 0)
3325 return err;
3326
3327 pci_enable_wake(pdev, PCI_D0, 0);
3328
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00003329 spin_lock_irqsave(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003330 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3331 VMXNET3_CMD_UPDATE_PMCFG);
Shreyas Bhatewara83d0fef2011-01-14 14:59:57 +00003332 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003333 vmxnet3_alloc_intr_resources(adapter);
3334 vmxnet3_request_irqs(adapter);
Shreyas Bhatewara51956cd2011-01-14 14:59:52 +00003335 for (i = 0; i < adapter->num_rx_queues; i++)
3336 napi_enable(&adapter->rx_queue[i].napi);
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003337 vmxnet3_enable_all_intrs(adapter);
3338
3339 return 0;
3340}
3341
Alexey Dobriyan47145212009-12-14 18:00:08 -08003342static const struct dev_pm_ops vmxnet3_pm_ops = {
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003343 .suspend = vmxnet3_suspend,
3344 .resume = vmxnet3_resume,
3345};
3346#endif
3347
3348static struct pci_driver vmxnet3_driver = {
3349 .name = vmxnet3_driver_name,
3350 .id_table = vmxnet3_pciid_table,
3351 .probe = vmxnet3_probe_device,
Bill Pemberton3a4751a2012-12-03 09:24:16 -05003352 .remove = vmxnet3_remove_device,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003353#ifdef CONFIG_PM
3354 .driver.pm = &vmxnet3_pm_ops,
3355#endif
3356};
3357
3358
3359static int __init
3360vmxnet3_init_module(void)
3361{
Stephen Hemminger204a6e62013-01-15 07:28:30 +00003362 pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC,
Shreyas Bhatewarad1a890fa2009-10-13 00:15:51 -07003363 VMXNET3_DRIVER_VERSION_REPORT);
3364 return pci_register_driver(&vmxnet3_driver);
3365}
3366
3367module_init(vmxnet3_init_module);
3368
3369
3370static void
3371vmxnet3_exit_module(void)
3372{
3373 pci_unregister_driver(&vmxnet3_driver);
3374}
3375
3376module_exit(vmxnet3_exit_module);
3377
3378MODULE_AUTHOR("VMware, Inc.");
3379MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
3380MODULE_LICENSE("GPL v2");
3381MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);