blob: a4b3d66b39a0406d0c0d8a06a36342b1beeb0ac8 [file] [log] [blame]
Greg Rose92915f72010-01-09 02:24:10 +00001/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
Mark Rustad2e7cfbd2014-03-04 03:02:13 +00004 Copyright(c) 1999 - 2014 Intel Corporation.
Greg Rose92915f72010-01-09 02:24:10 +00005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28
29/******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31******************************************************************************/
Jeff Kirsherdbd96362011-10-21 19:38:18 +000032
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
Greg Rose92915f72010-01-09 02:24:10 +000035#include <linux/types.h>
Jiri Pirkodadcd652011-07-21 03:25:09 +000036#include <linux/bitops.h>
Greg Rose92915f72010-01-09 02:24:10 +000037#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/netdevice.h>
40#include <linux/vmalloc.h>
41#include <linux/string.h>
42#include <linux/in.h>
43#include <linux/ip.h>
44#include <linux/tcp.h>
Alexander Duyck70a10e22012-05-11 08:33:21 +000045#include <linux/sctp.h>
Greg Rose92915f72010-01-09 02:24:10 +000046#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090047#include <linux/slab.h>
Greg Rose92915f72010-01-09 02:24:10 +000048#include <net/checksum.h>
49#include <net/ip6_checksum.h>
50#include <linux/ethtool.h>
Jiri Pirko01789342011-08-16 06:29:00 +000051#include <linux/if.h>
Greg Rose92915f72010-01-09 02:24:10 +000052#include <linux/if_vlan.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040053#include <linux/prefetch.h>
Greg Rose92915f72010-01-09 02:24:10 +000054
55#include "ixgbevf.h"
56
Stephen Hemminger3d8fe982012-01-18 22:13:34 +000057const char ixgbevf_driver_name[] = "ixgbevf";
Greg Rose92915f72010-01-09 02:24:10 +000058static const char ixgbevf_driver_string[] =
Greg Rose422e05d2011-03-12 02:01:29 +000059 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
Greg Rose92915f72010-01-09 02:24:10 +000060
Don Skidmore86f359f2014-01-17 01:21:38 -080061#define DRV_VERSION "2.12.1-k"
Greg Rose92915f72010-01-09 02:24:10 +000062const char ixgbevf_driver_version[] = DRV_VERSION;
Greg Rose66c87bd2010-11-16 19:26:43 -080063static char ixgbevf_copyright[] =
Greg Rose5c47a2b2012-01-06 02:53:30 +000064 "Copyright (c) 2009 - 2012 Intel Corporation.";
Greg Rose92915f72010-01-09 02:24:10 +000065
66static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
Greg Rose2316aa22010-12-02 07:12:26 +000067 [board_82599_vf] = &ixgbevf_82599_vf_info,
68 [board_X540_vf] = &ixgbevf_X540_vf_info,
Emil Tantilov47068b02014-11-22 07:59:56 +000069 [board_X550_vf] = &ixgbevf_X550_vf_info,
70 [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
Greg Rose92915f72010-01-09 02:24:10 +000071};
72
73/* ixgbevf_pci_tbl - PCI Device ID Table
74 *
75 * Wildcard entries (PCI_ANY_ID) should come last
76 * Last entry must be all 0s
77 *
78 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
79 * Class, Class Mask, private data (not used) }
80 */
Benoit Taine9baa3c32014-08-08 15:56:03 +020081static const struct pci_device_id ixgbevf_pci_tbl[] = {
Stephen Hemminger39ba22b2013-02-06 02:37:04 +000082 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
83 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
Emil Tantilov47068b02014-11-22 07:59:56 +000084 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
85 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
Greg Rose92915f72010-01-09 02:24:10 +000086 /* required last entry */
87 {0, }
88};
89MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
90
91MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
Emil Tantilovb8ce18c2014-04-05 05:39:42 +000092MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
Greg Rose92915f72010-01-09 02:24:10 +000093MODULE_LICENSE("GPL");
94MODULE_VERSION(DRV_VERSION);
95
stephen hemmingerb3f4d592012-03-13 06:04:20 +000096#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
97static int debug = -1;
98module_param(debug, int, 0);
99MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
Greg Rose92915f72010-01-09 02:24:10 +0000100
101/* forward decls */
Don Skidmore220fe052013-09-21 01:40:49 +0000102static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000103static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
Alexander Duyck56e94092012-07-20 08:10:03 +0000104static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
Greg Rose92915f72010-01-09 02:24:10 +0000105
Mark Rustaddbf8b0d2014-03-04 03:02:34 +0000106static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
107{
108 struct ixgbevf_adapter *adapter = hw->back;
109
110 if (!hw->hw_addr)
111 return;
112 hw->hw_addr = NULL;
113 dev_err(&adapter->pdev->dev, "Adapter removed\n");
Mark Rustadea699562014-03-12 00:38:51 +0000114 if (test_bit(__IXGBEVF_WORK_INIT, &adapter->state))
115 schedule_work(&adapter->watchdog_task);
Mark Rustaddbf8b0d2014-03-04 03:02:34 +0000116}
117
118static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
119{
120 u32 value;
121
122 /* The following check not only optimizes a bit by not
123 * performing a read on the status register when the
124 * register just read was a status register read that
125 * returned IXGBE_FAILED_READ_REG. It also blocks any
126 * potential recursion.
127 */
128 if (reg == IXGBE_VFSTATUS) {
129 ixgbevf_remove_adapter(hw);
130 return;
131 }
Mark Rustad32c74942014-03-18 07:03:35 +0000132 value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
Mark Rustaddbf8b0d2014-03-04 03:02:34 +0000133 if (value == IXGBE_FAILED_READ_REG)
134 ixgbevf_remove_adapter(hw);
135}
136
Mark Rustad32c74942014-03-18 07:03:35 +0000137u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
Mark Rustaddbf8b0d2014-03-04 03:02:34 +0000138{
139 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
140 u32 value;
141
142 if (IXGBE_REMOVED(reg_addr))
143 return IXGBE_FAILED_READ_REG;
144 value = readl(reg_addr + reg);
145 if (unlikely(value == IXGBE_FAILED_READ_REG))
146 ixgbevf_check_remove(hw, reg);
147 return value;
148}
149
Ben Hutchings49ce9c22012-07-10 10:56:00 +0000150/**
Greg Rose65d676c2011-02-03 06:54:13 +0000151 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
Greg Rose92915f72010-01-09 02:24:10 +0000152 * @adapter: pointer to adapter struct
153 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
154 * @queue: queue to map the corresponding interrupt to
155 * @msix_vector: the vector to map to the corresponding queue
Greg Rose92915f72010-01-09 02:24:10 +0000156 */
157static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
158 u8 queue, u8 msix_vector)
159{
160 u32 ivar, index;
161 struct ixgbe_hw *hw = &adapter->hw;
162 if (direction == -1) {
163 /* other causes */
164 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
165 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
166 ivar &= ~0xFF;
167 ivar |= msix_vector;
168 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
169 } else {
170 /* tx or rx causes */
171 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
172 index = ((16 * (queue & 1)) + (8 * direction));
173 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
174 ivar &= ~(0xFF << index);
175 ivar |= (msix_vector << index);
176 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
177 }
178}
179
Alexander Duyck70a10e22012-05-11 08:33:21 +0000180static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800181 struct ixgbevf_tx_buffer *tx_buffer)
Greg Rose92915f72010-01-09 02:24:10 +0000182{
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800183 if (tx_buffer->skb) {
184 dev_kfree_skb_any(tx_buffer->skb);
185 if (dma_unmap_len(tx_buffer, len))
Alexander Duyck70a10e22012-05-11 08:33:21 +0000186 dma_unmap_single(tx_ring->dev,
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800187 dma_unmap_addr(tx_buffer, dma),
188 dma_unmap_len(tx_buffer, len),
Nick Nunley2a1f8792010-04-27 13:10:50 +0000189 DMA_TO_DEVICE);
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800190 } else if (dma_unmap_len(tx_buffer, len)) {
191 dma_unmap_page(tx_ring->dev,
192 dma_unmap_addr(tx_buffer, dma),
193 dma_unmap_len(tx_buffer, len),
194 DMA_TO_DEVICE);
Greg Rose92915f72010-01-09 02:24:10 +0000195 }
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800196 tx_buffer->next_to_watch = NULL;
197 tx_buffer->skb = NULL;
198 dma_unmap_len_set(tx_buffer, len, 0);
199 /* tx_buffer must be completely set up in the transmit path */
Greg Rose92915f72010-01-09 02:24:10 +0000200}
201
Greg Rose92915f72010-01-09 02:24:10 +0000202#define IXGBE_MAX_TXD_PWR 14
203#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
204
205/* Tx Descriptors needed, worst case */
Alexander Duyck35959902012-05-11 08:32:40 +0000206#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
207#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
Greg Rose92915f72010-01-09 02:24:10 +0000208
209static void ixgbevf_tx_timeout(struct net_device *netdev);
210
211/**
212 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000213 * @q_vector: board private structure
Greg Rose92915f72010-01-09 02:24:10 +0000214 * @tx_ring: tx ring to clean
215 **/
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000216static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
Greg Rose92915f72010-01-09 02:24:10 +0000217 struct ixgbevf_ring *tx_ring)
218{
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000219 struct ixgbevf_adapter *adapter = q_vector->adapter;
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800220 struct ixgbevf_tx_buffer *tx_buffer;
221 union ixgbe_adv_tx_desc *tx_desc;
Greg Rose92915f72010-01-09 02:24:10 +0000222 unsigned int total_bytes = 0, total_packets = 0;
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800223 unsigned int budget = tx_ring->count / 2;
224 unsigned int i = tx_ring->next_to_clean;
Greg Rose92915f72010-01-09 02:24:10 +0000225
Alexander Duyck10cc1bd2012-07-16 23:44:48 +0000226 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
227 return true;
228
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800229 tx_buffer = &tx_ring->tx_buffer_info[i];
230 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
231 i -= tx_ring->count;
Greg Rose92915f72010-01-09 02:24:10 +0000232
Alexander Duycke757e3e2013-01-31 07:43:22 +0000233 do {
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800234 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
Alexander Duycke757e3e2013-01-31 07:43:22 +0000235
236 /* if next_to_watch is not set then there is no work pending */
237 if (!eop_desc)
238 break;
239
240 /* prevent any other reads prior to eop_desc */
241 read_barrier_depends();
242
243 /* if DD is not set pending work has not been completed */
244 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
245 break;
246
247 /* clear next_to_watch to prevent false hangs */
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800248 tx_buffer->next_to_watch = NULL;
Alexander Duycke757e3e2013-01-31 07:43:22 +0000249
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800250 /* update the statistics for this packet */
251 total_bytes += tx_buffer->bytecount;
252 total_packets += tx_buffer->gso_segs;
Greg Rose92915f72010-01-09 02:24:10 +0000253
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800254 /* free the skb */
255 dev_kfree_skb_any(tx_buffer->skb);
256
257 /* unmap skb header data */
258 dma_unmap_single(tx_ring->dev,
259 dma_unmap_addr(tx_buffer, dma),
260 dma_unmap_len(tx_buffer, len),
261 DMA_TO_DEVICE);
262
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800263 /* clear tx_buffer data */
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800264 tx_buffer->skb = NULL;
265 dma_unmap_len_set(tx_buffer, len, 0);
Greg Rose92915f72010-01-09 02:24:10 +0000266
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800267 /* unmap remaining buffers */
268 while (tx_desc != eop_desc) {
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800269 tx_buffer++;
270 tx_desc++;
Greg Rose92915f72010-01-09 02:24:10 +0000271 i++;
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800272 if (unlikely(!i)) {
273 i -= tx_ring->count;
274 tx_buffer = tx_ring->tx_buffer_info;
275 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
276 }
Alexander Duycke757e3e2013-01-31 07:43:22 +0000277
Emil Tantilov9bdfefd2014-01-17 18:30:04 -0800278 /* unmap any remaining paged data */
279 if (dma_unmap_len(tx_buffer, len)) {
280 dma_unmap_page(tx_ring->dev,
281 dma_unmap_addr(tx_buffer, dma),
282 dma_unmap_len(tx_buffer, len),
283 DMA_TO_DEVICE);
284 dma_unmap_len_set(tx_buffer, len, 0);
285 }
Greg Rose92915f72010-01-09 02:24:10 +0000286 }
287
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800288 /* move us one more past the eop_desc for start of next pkt */
289 tx_buffer++;
290 tx_desc++;
291 i++;
292 if (unlikely(!i)) {
293 i -= tx_ring->count;
294 tx_buffer = tx_ring->tx_buffer_info;
295 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
296 }
297
298 /* issue prefetch for next Tx descriptor */
299 prefetch(tx_desc);
300
301 /* update budget accounting */
302 budget--;
303 } while (likely(budget));
304
305 i += tx_ring->count;
Greg Rose92915f72010-01-09 02:24:10 +0000306 tx_ring->next_to_clean = i;
Eric Dumazet4197aa72011-06-22 05:01:35 +0000307 u64_stats_update_begin(&tx_ring->syncp);
Emil Tantilov095e2612014-01-17 18:30:00 -0800308 tx_ring->stats.bytes += total_bytes;
309 tx_ring->stats.packets += total_packets;
Eric Dumazet4197aa72011-06-22 05:01:35 +0000310 u64_stats_update_end(&tx_ring->syncp);
Greg Roseac6ed8f2012-08-31 05:59:28 +0000311 q_vector->tx.total_bytes += total_bytes;
312 q_vector->tx.total_packets += total_packets;
Greg Rose92915f72010-01-09 02:24:10 +0000313
Emil Tantilov7ad1a092014-01-17 18:30:03 -0800314#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
315 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
316 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
317 /* Make sure that anybody stopping the queue after this
318 * sees the new next_to_clean.
319 */
320 smp_mb();
321
322 if (__netif_subqueue_stopped(tx_ring->netdev,
323 tx_ring->queue_index) &&
324 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
325 netif_wake_subqueue(tx_ring->netdev,
326 tx_ring->queue_index);
327 ++tx_ring->tx_stats.restart_queue;
328 }
329 }
330
331 return !!budget;
Greg Rose92915f72010-01-09 02:24:10 +0000332}
333
334/**
Jacob Keller08681612013-09-21 06:24:09 +0000335 * ixgbevf_rx_skb - Helper function to determine proper Rx method
336 * @q_vector: structure containing interrupt and ring information
337 * @skb: packet to send up
Jacob Keller08681612013-09-21 06:24:09 +0000338 **/
339static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
Emil Tantilovdff80522014-11-08 01:39:25 +0000340 struct sk_buff *skb)
Jacob Keller08681612013-09-21 06:24:09 +0000341{
Jacob Kellerc777cdf2013-09-21 06:24:20 +0000342#ifdef CONFIG_NET_RX_BUSY_POLL
343 skb_mark_napi_id(skb, &q_vector->napi);
344
345 if (ixgbevf_qv_busy_polling(q_vector)) {
346 netif_receive_skb(skb);
347 /* exit early if we busy polled */
348 return;
349 }
350#endif /* CONFIG_NET_RX_BUSY_POLL */
Emil Tantilov688ff322014-11-08 01:39:56 +0000351
352 napi_gro_receive(&q_vector->napi, skb);
Jacob Keller08681612013-09-21 06:24:09 +0000353}
354
Emil Tantilovec62fe22014-11-08 01:39:20 +0000355/* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
356 * @ring: structure containig ring specific data
357 * @rx_desc: current Rx descriptor being processed
Greg Rose92915f72010-01-09 02:24:10 +0000358 * @skb: skb currently being received and modified
Emil Tantilovec62fe22014-11-08 01:39:20 +0000359 */
Greg Rose55fb2772012-11-06 05:53:32 +0000360static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
Emil Tantilovec62fe22014-11-08 01:39:20 +0000361 union ixgbe_adv_rx_desc *rx_desc,
362 struct sk_buff *skb)
Greg Rose92915f72010-01-09 02:24:10 +0000363{
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700364 skb_checksum_none_assert(skb);
Greg Rose92915f72010-01-09 02:24:10 +0000365
366 /* Rx csum disabled */
Alexander Duyckfb401952012-05-11 08:33:16 +0000367 if (!(ring->netdev->features & NETIF_F_RXCSUM))
Greg Rose92915f72010-01-09 02:24:10 +0000368 return;
369
370 /* if IP and error */
Emil Tantilovec62fe22014-11-08 01:39:20 +0000371 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
372 ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
Emil Tantilov095e2612014-01-17 18:30:00 -0800373 ring->rx_stats.csum_err++;
Greg Rose92915f72010-01-09 02:24:10 +0000374 return;
375 }
376
Emil Tantilovec62fe22014-11-08 01:39:20 +0000377 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
Greg Rose92915f72010-01-09 02:24:10 +0000378 return;
379
Emil Tantilovec62fe22014-11-08 01:39:20 +0000380 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
Emil Tantilov095e2612014-01-17 18:30:00 -0800381 ring->rx_stats.csum_err++;
Greg Rose92915f72010-01-09 02:24:10 +0000382 return;
383 }
384
385 /* It must be a TCP or UDP packet with a valid checksum */
386 skb->ip_summed = CHECKSUM_UNNECESSARY;
Greg Rose92915f72010-01-09 02:24:10 +0000387}
388
Emil Tantilovdff80522014-11-08 01:39:25 +0000389/* ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
390 * @rx_ring: rx descriptor ring packet is being transacted on
391 * @rx_desc: pointer to the EOP Rx descriptor
392 * @skb: pointer to current skb being populated
393 *
394 * This function checks the ring, descriptor, and packet information in
395 * order to populate the checksum, VLAN, protocol, and other fields within
396 * the skb.
397 */
398static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
399 union ixgbe_adv_rx_desc *rx_desc,
400 struct sk_buff *skb)
401{
402 ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
403
404 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
405 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
406 unsigned long *active_vlans = netdev_priv(rx_ring->netdev);
407
408 if (test_bit(vid & VLAN_VID_MASK, active_vlans))
409 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
410 }
411
412 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
413}
414
Emil Tantilov4b95fe32014-11-08 01:39:41 +0000415/**
416 * ixgbevf_is_non_eop - process handling of non-EOP buffers
417 * @rx_ring: Rx ring being processed
418 * @rx_desc: Rx descriptor for current buffer
419 * @skb: current socket buffer containing buffer in progress
420 *
421 * This function updates next to clean. If the buffer is an EOP buffer
422 * this function exits returning false, otherwise it will place the
423 * sk_buff in the next buffer to be chained and return true indicating
424 * that this is in fact a non-EOP buffer.
425 **/
426static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
Emil Tantilovbad17232014-11-21 02:57:15 +0000427 union ixgbe_adv_rx_desc *rx_desc)
Emil Tantilov4b95fe32014-11-08 01:39:41 +0000428{
429 u32 ntc = rx_ring->next_to_clean + 1;
430
431 /* fetch, update, and store next to clean */
432 ntc = (ntc < rx_ring->count) ? ntc : 0;
433 rx_ring->next_to_clean = ntc;
434
435 prefetch(IXGBEVF_RX_DESC(rx_ring, ntc));
436
437 if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
438 return false;
439
440 return true;
441}
442
Emil Tantilovbad17232014-11-21 02:57:15 +0000443static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
444 struct ixgbevf_rx_buffer *bi)
Emil Tantilovbafa5782014-11-08 01:39:15 +0000445{
Emil Tantilovbad17232014-11-21 02:57:15 +0000446 struct page *page = bi->page;
Emil Tantilovbafa5782014-11-08 01:39:15 +0000447 dma_addr_t dma = bi->dma;
448
Emil Tantilovbad17232014-11-21 02:57:15 +0000449 /* since we are recycling buffers we should seldom need to alloc */
450 if (likely(page))
Emil Tantilovbafa5782014-11-08 01:39:15 +0000451 return true;
452
Emil Tantilovbad17232014-11-21 02:57:15 +0000453 /* alloc new page for storage */
454 page = dev_alloc_page();
455 if (unlikely(!page)) {
456 rx_ring->rx_stats.alloc_rx_page_failed++;
Emil Tantilovbafa5782014-11-08 01:39:15 +0000457 return false;
458 }
459
Emil Tantilovbad17232014-11-21 02:57:15 +0000460 /* map page for use */
461 dma = dma_map_page(rx_ring->dev, page, 0,
462 PAGE_SIZE, DMA_FROM_DEVICE);
Emil Tantilovbafa5782014-11-08 01:39:15 +0000463
464 /* if mapping failed free memory back to system since
465 * there isn't much point in holding memory we can't use
466 */
467 if (dma_mapping_error(rx_ring->dev, dma)) {
Emil Tantilovbad17232014-11-21 02:57:15 +0000468 __free_page(page);
Emil Tantilovbafa5782014-11-08 01:39:15 +0000469
470 rx_ring->rx_stats.alloc_rx_buff_failed++;
471 return false;
472 }
473
Emil Tantilovbafa5782014-11-08 01:39:15 +0000474 bi->dma = dma;
Emil Tantilovbad17232014-11-21 02:57:15 +0000475 bi->page = page;
476 bi->page_offset = 0;
Emil Tantilovbafa5782014-11-08 01:39:15 +0000477
478 return true;
479}
480
Greg Rose92915f72010-01-09 02:24:10 +0000481/**
482 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
Emil Tantilov095e2612014-01-17 18:30:00 -0800483 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
Emil Tantilovbafa5782014-11-08 01:39:15 +0000484 * @cleaned_count: number of buffers to replace
Greg Rose92915f72010-01-09 02:24:10 +0000485 **/
Emil Tantilov095e2612014-01-17 18:30:00 -0800486static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
Emil Tantilovbafa5782014-11-08 01:39:15 +0000487 u16 cleaned_count)
Greg Rose92915f72010-01-09 02:24:10 +0000488{
Greg Rose92915f72010-01-09 02:24:10 +0000489 union ixgbe_adv_rx_desc *rx_desc;
490 struct ixgbevf_rx_buffer *bi;
Alexander Duyckfb401952012-05-11 08:33:16 +0000491 unsigned int i = rx_ring->next_to_use;
Greg Rose92915f72010-01-09 02:24:10 +0000492
Emil Tantilovbafa5782014-11-08 01:39:15 +0000493 /* nothing to do or no valid netdev defined */
494 if (!cleaned_count || !rx_ring->netdev)
495 return;
Greg Roseb9dd2452012-11-02 05:50:21 +0000496
Emil Tantilovbafa5782014-11-08 01:39:15 +0000497 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
498 bi = &rx_ring->rx_buffer_info[i];
499 i -= rx_ring->count;
Greg Roseb9dd2452012-11-02 05:50:21 +0000500
Emil Tantilovbafa5782014-11-08 01:39:15 +0000501 do {
Emil Tantilovbad17232014-11-21 02:57:15 +0000502 if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
Emil Tantilovbafa5782014-11-08 01:39:15 +0000503 break;
Emil Tantilov05d063a2014-01-17 18:29:59 -0800504
Emil Tantilovbafa5782014-11-08 01:39:15 +0000505 /* Refresh the desc even if pkt_addr didn't change
506 * because each write-back erases this info.
507 */
Emil Tantilovbad17232014-11-21 02:57:15 +0000508 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
Greg Rose92915f72010-01-09 02:24:10 +0000509
Emil Tantilovbafa5782014-11-08 01:39:15 +0000510 rx_desc++;
511 bi++;
Greg Rose92915f72010-01-09 02:24:10 +0000512 i++;
Emil Tantilovbafa5782014-11-08 01:39:15 +0000513 if (unlikely(!i)) {
514 rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
515 bi = rx_ring->rx_buffer_info;
516 i -= rx_ring->count;
517 }
Greg Rose92915f72010-01-09 02:24:10 +0000518
Emil Tantilovbafa5782014-11-08 01:39:15 +0000519 /* clear the hdr_addr for the next_to_use descriptor */
520 rx_desc->read.hdr_addr = 0;
521
522 cleaned_count--;
523 } while (cleaned_count);
524
525 i += rx_ring->count;
526
527 if (rx_ring->next_to_use != i) {
528 /* record the next descriptor to use */
529 rx_ring->next_to_use = i;
530
Emil Tantilovbad17232014-11-21 02:57:15 +0000531 /* update next to alloc since we have filled the ring */
532 rx_ring->next_to_alloc = i;
533
Emil Tantilovbafa5782014-11-08 01:39:15 +0000534 /* Force memory writes to complete before letting h/w
535 * know there are new descriptors to fetch. (Only
536 * applicable for weak-ordered memory model archs,
537 * such as IA-64).
538 */
539 wmb();
540 ixgbevf_write_tail(rx_ring, i);
541 }
Greg Rose92915f72010-01-09 02:24:10 +0000542}
543
Emil Tantilovbad17232014-11-21 02:57:15 +0000544/* ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail
545 * @rx_ring: rx descriptor ring packet is being transacted on
546 * @skb: pointer to current skb being adjusted
547 *
548 * This function is an ixgbevf specific version of __pskb_pull_tail. The
549 * main difference between this version and the original function is that
550 * this function can make several assumptions about the state of things
551 * that allow for significant optimizations versus the standard function.
552 * As a result we can do things like drop a frag and maintain an accurate
553 * truesize for the skb.
554 */
555static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring,
556 struct sk_buff *skb)
557{
558 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
559 unsigned char *va;
560 unsigned int pull_len;
561
562 /* it is valid to use page_address instead of kmap since we are
563 * working with pages allocated out of the lomem pool per
564 * alloc_page(GFP_ATOMIC)
565 */
566 va = skb_frag_address(frag);
567
568 /* we need the header to contain the greater of either ETH_HLEN or
569 * 60 bytes if the skb->len is less than 60 for skb_pad.
570 */
571 pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
572
573 /* align pull length to size of long to optimize memcpy performance */
574 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
575
576 /* update all of the pointers */
577 skb_frag_size_sub(frag, pull_len);
578 frag->page_offset += pull_len;
579 skb->data_len -= pull_len;
580 skb->tail += pull_len;
581}
582
583/* ixgbevf_cleanup_headers - Correct corrupted or empty headers
584 * @rx_ring: rx descriptor ring packet is being transacted on
585 * @rx_desc: pointer to the EOP Rx descriptor
586 * @skb: pointer to current skb being fixed
587 *
588 * Check for corrupted packet headers caused by senders on the local L2
589 * embedded NIC switch not setting up their Tx Descriptors right. These
590 * should be very rare.
591 *
592 * Also address the case where we are pulling data in on pages only
593 * and as such no data is present in the skb header.
594 *
595 * In addition if skb is not at least 60 bytes we need to pad it so that
596 * it is large enough to qualify as a valid Ethernet frame.
597 *
598 * Returns true if an error was encountered and skb was freed.
599 */
600static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
601 union ixgbe_adv_rx_desc *rx_desc,
602 struct sk_buff *skb)
603{
604 /* verify that the packet does not have any known errors */
605 if (unlikely(ixgbevf_test_staterr(rx_desc,
606 IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
607 struct net_device *netdev = rx_ring->netdev;
608
609 if (!(netdev->features & NETIF_F_RXALL)) {
610 dev_kfree_skb_any(skb);
611 return true;
612 }
613 }
614
615 /* place header in linear portion of buffer */
616 if (skb_is_nonlinear(skb))
617 ixgbevf_pull_tail(rx_ring, skb);
618
Alexander Duycka94d9e22014-12-03 08:17:39 -0800619 /* if eth_skb_pad returns an error the skb was freed */
620 if (eth_skb_pad(skb))
621 return true;
Emil Tantilovbad17232014-11-21 02:57:15 +0000622
623 return false;
624}
625
626/* ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
627 * @rx_ring: rx descriptor ring to store buffers on
628 * @old_buff: donor buffer to have page reused
629 *
630 * Synchronizes page for reuse by the adapter
631 */
632static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
633 struct ixgbevf_rx_buffer *old_buff)
634{
635 struct ixgbevf_rx_buffer *new_buff;
636 u16 nta = rx_ring->next_to_alloc;
637
638 new_buff = &rx_ring->rx_buffer_info[nta];
639
640 /* update, and store next to alloc */
641 nta++;
642 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
643
644 /* transfer page from old buffer to new buffer */
645 new_buff->page = old_buff->page;
646 new_buff->dma = old_buff->dma;
647 new_buff->page_offset = old_buff->page_offset;
648
649 /* sync the buffer for use by the device */
650 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
651 new_buff->page_offset,
652 IXGBEVF_RX_BUFSZ,
653 DMA_FROM_DEVICE);
654}
655
656static inline bool ixgbevf_page_is_reserved(struct page *page)
657{
658 return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
659}
660
661/* ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
662 * @rx_ring: rx descriptor ring to transact packets on
663 * @rx_buffer: buffer containing page to add
664 * @rx_desc: descriptor containing length of buffer written by hardware
665 * @skb: sk_buff to place the data into
666 *
667 * This function will add the data contained in rx_buffer->page to the skb.
668 * This is done either through a direct copy if the data in the buffer is
669 * less than the skb header size, otherwise it will just attach the page as
670 * a frag to the skb.
671 *
672 * The function will then update the page offset if necessary and return
673 * true if the buffer can be reused by the adapter.
674 */
675static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
676 struct ixgbevf_rx_buffer *rx_buffer,
677 union ixgbe_adv_rx_desc *rx_desc,
678 struct sk_buff *skb)
679{
680 struct page *page = rx_buffer->page;
681 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
682#if (PAGE_SIZE < 8192)
683 unsigned int truesize = IXGBEVF_RX_BUFSZ;
684#else
685 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
686#endif
687
688 if ((size <= IXGBEVF_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
689 unsigned char *va = page_address(page) + rx_buffer->page_offset;
690
691 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
692
693 /* page is not reserved, we can reuse buffer as is */
694 if (likely(!ixgbevf_page_is_reserved(page)))
695 return true;
696
697 /* this page cannot be reused so discard it */
698 put_page(page);
699 return false;
700 }
701
702 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
703 rx_buffer->page_offset, size, truesize);
704
705 /* avoid re-using remote pages */
706 if (unlikely(ixgbevf_page_is_reserved(page)))
707 return false;
708
709#if (PAGE_SIZE < 8192)
710 /* if we are only owner of page we can reuse it */
711 if (unlikely(page_count(page) != 1))
712 return false;
713
714 /* flip page offset to other buffer */
715 rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
716
717#else
718 /* move offset up to the next cache line */
719 rx_buffer->page_offset += truesize;
720
721 if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
722 return false;
723
724#endif
725 /* Even if we own the page, we are not allowed to use atomic_set()
726 * This would break get_page_unless_zero() users.
727 */
728 atomic_inc(&page->_count);
729
730 return true;
731}
732
733static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
734 union ixgbe_adv_rx_desc *rx_desc,
735 struct sk_buff *skb)
736{
737 struct ixgbevf_rx_buffer *rx_buffer;
738 struct page *page;
739
740 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
741 page = rx_buffer->page;
742 prefetchw(page);
743
744 if (likely(!skb)) {
745 void *page_addr = page_address(page) +
746 rx_buffer->page_offset;
747
748 /* prefetch first cache line of first page */
749 prefetch(page_addr);
750#if L1_CACHE_BYTES < 128
751 prefetch(page_addr + L1_CACHE_BYTES);
752#endif
753
754 /* allocate a skb to store the frags */
755 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
756 IXGBEVF_RX_HDR_SIZE);
757 if (unlikely(!skb)) {
758 rx_ring->rx_stats.alloc_rx_buff_failed++;
759 return NULL;
760 }
761
762 /* we will be copying header into skb->data in
763 * pskb_may_pull so it is in our interest to prefetch
764 * it now to avoid a possible cache miss
765 */
766 prefetchw(skb->data);
767 }
768
769 /* we are reusing so sync this buffer for CPU use */
770 dma_sync_single_range_for_cpu(rx_ring->dev,
771 rx_buffer->dma,
772 rx_buffer->page_offset,
773 IXGBEVF_RX_BUFSZ,
774 DMA_FROM_DEVICE);
775
776 /* pull page into skb */
777 if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
778 /* hand second half of page back to the ring */
779 ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
780 } else {
781 /* we are not reusing the buffer so unmap it */
782 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
783 PAGE_SIZE, DMA_FROM_DEVICE);
784 }
785
786 /* clear contents of buffer_info */
787 rx_buffer->dma = 0;
788 rx_buffer->page = NULL;
789
790 return skb;
791}
792
Greg Rose92915f72010-01-09 02:24:10 +0000793static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000794 u32 qmask)
Greg Rose92915f72010-01-09 02:24:10 +0000795{
Greg Rose92915f72010-01-09 02:24:10 +0000796 struct ixgbe_hw *hw = &adapter->hw;
797
Alexander Duyck5f3600e2012-05-11 08:32:55 +0000798 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
Greg Rose92915f72010-01-09 02:24:10 +0000799}
800
Jacob Keller08e50a22013-09-21 06:24:14 +0000801static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
802 struct ixgbevf_ring *rx_ring,
803 int budget)
Greg Rose92915f72010-01-09 02:24:10 +0000804{
Greg Rose92915f72010-01-09 02:24:10 +0000805 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
Emil Tantilovbafa5782014-11-08 01:39:15 +0000806 u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
Emil Tantilovbad17232014-11-21 02:57:15 +0000807 struct sk_buff *skb = rx_ring->skb;
Greg Rose92915f72010-01-09 02:24:10 +0000808
Emil Tantilov66224022014-11-08 01:39:51 +0000809 while (likely(total_rx_packets < budget)) {
Emil Tantilov4b95fe32014-11-08 01:39:41 +0000810 union ixgbe_adv_rx_desc *rx_desc;
Emil Tantilovb97fe3b2014-11-08 01:39:30 +0000811
Emil Tantilov0579eef2014-11-08 01:39:35 +0000812 /* return some buffers to hardware, one at a time is too slow */
813 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
814 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
815 cleaned_count = 0;
816 }
817
Emil Tantilovbad17232014-11-21 02:57:15 +0000818 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
Emil Tantilov0579eef2014-11-08 01:39:35 +0000819
820 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
Greg Rose92915f72010-01-09 02:24:10 +0000821 break;
Greg Rose92915f72010-01-09 02:24:10 +0000822
Emil Tantilov0579eef2014-11-08 01:39:35 +0000823 /* This memory barrier is needed to keep us from reading
824 * any other fields out of the rx_desc until we know the
825 * RXD_STAT_DD bit is set
826 */
827 rmb();
Emil Tantilovec62fe22014-11-08 01:39:20 +0000828
Emil Tantilovbad17232014-11-21 02:57:15 +0000829 /* retrieve a buffer from the ring */
830 skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb);
Greg Rose92915f72010-01-09 02:24:10 +0000831
Emil Tantilovbad17232014-11-21 02:57:15 +0000832 /* exit if we failed to retrieve a buffer */
833 if (!skb)
834 break;
Greg Rose92915f72010-01-09 02:24:10 +0000835
Emil Tantilovb97fe3b2014-11-08 01:39:30 +0000836 cleaned_count++;
837
Emil Tantilovbad17232014-11-21 02:57:15 +0000838 /* fetch next buffer in frame if non-eop */
839 if (ixgbevf_is_non_eop(rx_ring, rx_desc))
Emil Tantilov0579eef2014-11-08 01:39:35 +0000840 continue;
Greg Rose92915f72010-01-09 02:24:10 +0000841
Emil Tantilovbad17232014-11-21 02:57:15 +0000842 /* verify the packet layout is correct */
843 if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
844 skb = NULL;
Emil Tantilov0579eef2014-11-08 01:39:35 +0000845 continue;
Greg Rose92915f72010-01-09 02:24:10 +0000846 }
847
Greg Rose92915f72010-01-09 02:24:10 +0000848 /* probably a little skewed due to removing CRC */
849 total_rx_bytes += skb->len;
Greg Rose92915f72010-01-09 02:24:10 +0000850
John Fastabend815cccb2012-10-24 08:13:09 +0000851 /* Workaround hardware that can't do proper VEPA multicast
852 * source pruning.
853 */
Florian Fainellibd9d5592014-02-28 15:46:49 -0800854 if ((skb->pkt_type == PACKET_BROADCAST ||
855 skb->pkt_type == PACKET_MULTICAST) &&
Emil Tantilov095e2612014-01-17 18:30:00 -0800856 ether_addr_equal(rx_ring->netdev->dev_addr,
Joe Perches7367d0b2013-09-01 11:51:23 -0700857 eth_hdr(skb)->h_source)) {
John Fastabend815cccb2012-10-24 08:13:09 +0000858 dev_kfree_skb_irq(skb);
Emil Tantilov0579eef2014-11-08 01:39:35 +0000859 continue;
John Fastabend815cccb2012-10-24 08:13:09 +0000860 }
861
Emil Tantilovdff80522014-11-08 01:39:25 +0000862 /* populate checksum, VLAN, and protocol */
863 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb);
864
865 ixgbevf_rx_skb(q_vector, skb);
Greg Rose92915f72010-01-09 02:24:10 +0000866
Emil Tantilovbad17232014-11-21 02:57:15 +0000867 /* reset skb pointer */
868 skb = NULL;
869
Emil Tantilov0579eef2014-11-08 01:39:35 +0000870 /* update budget accounting */
Emil Tantilov66224022014-11-08 01:39:51 +0000871 total_rx_packets++;
872 }
Greg Rose92915f72010-01-09 02:24:10 +0000873
Emil Tantilovbad17232014-11-21 02:57:15 +0000874 /* place incomplete frames back on ring for completion */
875 rx_ring->skb = skb;
876
Eric Dumazet4197aa72011-06-22 05:01:35 +0000877 u64_stats_update_begin(&rx_ring->syncp);
Emil Tantilov095e2612014-01-17 18:30:00 -0800878 rx_ring->stats.packets += total_rx_packets;
879 rx_ring->stats.bytes += total_rx_bytes;
Eric Dumazet4197aa72011-06-22 05:01:35 +0000880 u64_stats_update_end(&rx_ring->syncp);
Greg Roseac6ed8f2012-08-31 05:59:28 +0000881 q_vector->rx.total_packets += total_rx_packets;
882 q_vector->rx.total_bytes += total_rx_bytes;
Greg Rose92915f72010-01-09 02:24:10 +0000883
Jacob Keller08e50a22013-09-21 06:24:14 +0000884 return total_rx_packets;
Greg Rose92915f72010-01-09 02:24:10 +0000885}
886
887/**
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000888 * ixgbevf_poll - NAPI polling calback
Greg Rose92915f72010-01-09 02:24:10 +0000889 * @napi: napi struct with our devices info in it
890 * @budget: amount of work driver is allowed to do this pass, in packets
891 *
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000892 * This function will clean more than one or more rings associated with a
Greg Rose92915f72010-01-09 02:24:10 +0000893 * q_vector.
894 **/
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000895static int ixgbevf_poll(struct napi_struct *napi, int budget)
Greg Rose92915f72010-01-09 02:24:10 +0000896{
897 struct ixgbevf_q_vector *q_vector =
898 container_of(napi, struct ixgbevf_q_vector, napi);
899 struct ixgbevf_adapter *adapter = q_vector->adapter;
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000900 struct ixgbevf_ring *ring;
901 int per_ring_budget;
902 bool clean_complete = true;
903
904 ixgbevf_for_each_ring(ring, q_vector->tx)
905 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
Greg Rose92915f72010-01-09 02:24:10 +0000906
Jacob Kellerc777cdf2013-09-21 06:24:20 +0000907#ifdef CONFIG_NET_RX_BUSY_POLL
908 if (!ixgbevf_qv_lock_napi(q_vector))
909 return budget;
910#endif
911
Greg Rose92915f72010-01-09 02:24:10 +0000912 /* attempt to distribute budget to each queue fairly, but don't allow
913 * the budget to go below 1 because we'll exit polling */
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000914 if (q_vector->rx.count > 1)
915 per_ring_budget = max(budget/q_vector->rx.count, 1);
916 else
917 per_ring_budget = budget;
Greg Rose92915f72010-01-09 02:24:10 +0000918
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000919 ixgbevf_for_each_ring(ring, q_vector->rx)
Jacob Keller08e50a22013-09-21 06:24:14 +0000920 clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
921 per_ring_budget)
922 < per_ring_budget);
Greg Rose92915f72010-01-09 02:24:10 +0000923
Jacob Kellerc777cdf2013-09-21 06:24:20 +0000924#ifdef CONFIG_NET_RX_BUSY_POLL
925 ixgbevf_qv_unlock_napi(q_vector);
926#endif
927
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000928 /* If all work not completed, return budget and keep polling */
929 if (!clean_complete)
930 return budget;
931 /* all work done, exit the polling mode */
932 napi_complete(napi);
933 if (adapter->rx_itr_setting & 1)
934 ixgbevf_set_itr(q_vector);
Mark Rustad2e7cfbd2014-03-04 03:02:13 +0000935 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
936 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000937 ixgbevf_irq_enable_queues(adapter,
938 1 << q_vector->v_idx);
Greg Rose92915f72010-01-09 02:24:10 +0000939
Alexander Duyckfa71ae22012-05-11 08:32:50 +0000940 return 0;
Greg Rose92915f72010-01-09 02:24:10 +0000941}
942
Greg Rosece422602012-05-22 02:17:49 +0000943/**
944 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
945 * @q_vector: structure containing interrupt and ring information
946 */
Jacob Keller38496232013-10-22 06:19:18 +0000947void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
Greg Rosece422602012-05-22 02:17:49 +0000948{
949 struct ixgbevf_adapter *adapter = q_vector->adapter;
950 struct ixgbe_hw *hw = &adapter->hw;
951 int v_idx = q_vector->v_idx;
952 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
953
954 /*
955 * set the WDIS bit to not clear the timer bits and cause an
956 * immediate assertion of the interrupt
957 */
958 itr_reg |= IXGBE_EITR_CNT_WDIS;
959
960 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
961}
Greg Rose92915f72010-01-09 02:24:10 +0000962
Jacob Kellerc777cdf2013-09-21 06:24:20 +0000963#ifdef CONFIG_NET_RX_BUSY_POLL
964/* must be called with local_bh_disable()d */
965static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
966{
967 struct ixgbevf_q_vector *q_vector =
968 container_of(napi, struct ixgbevf_q_vector, napi);
969 struct ixgbevf_adapter *adapter = q_vector->adapter;
970 struct ixgbevf_ring *ring;
971 int found = 0;
972
973 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
974 return LL_FLUSH_FAILED;
975
976 if (!ixgbevf_qv_lock_poll(q_vector))
977 return LL_FLUSH_BUSY;
978
979 ixgbevf_for_each_ring(ring, q_vector->rx) {
980 found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
Jacob Keller3b5dca22013-09-21 06:24:25 +0000981#ifdef BP_EXTENDED_STATS
982 if (found)
Emil Tantilov095e2612014-01-17 18:30:00 -0800983 ring->stats.cleaned += found;
Jacob Keller3b5dca22013-09-21 06:24:25 +0000984 else
Emil Tantilov095e2612014-01-17 18:30:00 -0800985 ring->stats.misses++;
Jacob Keller3b5dca22013-09-21 06:24:25 +0000986#endif
Jacob Kellerc777cdf2013-09-21 06:24:20 +0000987 if (found)
988 break;
989 }
990
991 ixgbevf_qv_unlock_poll(q_vector);
992
993 return found;
994}
995#endif /* CONFIG_NET_RX_BUSY_POLL */
996
Greg Rose92915f72010-01-09 02:24:10 +0000997/**
998 * ixgbevf_configure_msix - Configure MSI-X hardware
999 * @adapter: board private structure
1000 *
1001 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
1002 * interrupts.
1003 **/
1004static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
1005{
1006 struct ixgbevf_q_vector *q_vector;
Alexander Duyck6b43c442012-05-11 08:32:45 +00001007 int q_vectors, v_idx;
Greg Rose92915f72010-01-09 02:24:10 +00001008
1009 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001010 adapter->eims_enable_mask = 0;
Greg Rose92915f72010-01-09 02:24:10 +00001011
1012 /*
1013 * Populate the IVAR table and set the ITR values to the
1014 * corresponding register.
1015 */
1016 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
Alexander Duyck6b43c442012-05-11 08:32:45 +00001017 struct ixgbevf_ring *ring;
Greg Rose92915f72010-01-09 02:24:10 +00001018 q_vector = adapter->q_vector[v_idx];
Greg Rose92915f72010-01-09 02:24:10 +00001019
Alexander Duyck6b43c442012-05-11 08:32:45 +00001020 ixgbevf_for_each_ring(ring, q_vector->rx)
1021 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
Greg Rose92915f72010-01-09 02:24:10 +00001022
Alexander Duyck6b43c442012-05-11 08:32:45 +00001023 ixgbevf_for_each_ring(ring, q_vector->tx)
1024 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
Greg Rose92915f72010-01-09 02:24:10 +00001025
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001026 if (q_vector->tx.ring && !q_vector->rx.ring) {
1027 /* tx only vector */
1028 if (adapter->tx_itr_setting == 1)
1029 q_vector->itr = IXGBE_10K_ITR;
1030 else
1031 q_vector->itr = adapter->tx_itr_setting;
1032 } else {
1033 /* rx or rx/tx vector */
1034 if (adapter->rx_itr_setting == 1)
1035 q_vector->itr = IXGBE_20K_ITR;
1036 else
1037 q_vector->itr = adapter->rx_itr_setting;
1038 }
Greg Rose92915f72010-01-09 02:24:10 +00001039
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001040 /* add q_vector eims value to global eims_enable_mask */
1041 adapter->eims_enable_mask |= 1 << v_idx;
1042
1043 ixgbevf_write_eitr(q_vector);
Greg Rose92915f72010-01-09 02:24:10 +00001044 }
1045
1046 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001047 /* setup eims_other and add value to global eims_enable_mask */
1048 adapter->eims_other = 1 << v_idx;
1049 adapter->eims_enable_mask |= adapter->eims_other;
Greg Rose92915f72010-01-09 02:24:10 +00001050}
1051
1052enum latency_range {
1053 lowest_latency = 0,
1054 low_latency = 1,
1055 bulk_latency = 2,
1056 latency_invalid = 255
1057};
1058
1059/**
1060 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001061 * @q_vector: structure containing interrupt and ring information
1062 * @ring_container: structure containing ring performance data
Greg Rose92915f72010-01-09 02:24:10 +00001063 *
1064 * Stores a new ITR value based on packets and byte
1065 * counts during the last interrupt. The advantage of per interrupt
1066 * computation is faster updates and more accurate ITR for the current
1067 * traffic pattern. Constants in this function were computed
1068 * based on theoretical maximum wire speed and thresholds were set based
1069 * on testing data as well as attempting to minimize response time
1070 * while increasing bulk throughput.
1071 **/
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001072static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
1073 struct ixgbevf_ring_container *ring_container)
Greg Rose92915f72010-01-09 02:24:10 +00001074{
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001075 int bytes = ring_container->total_bytes;
1076 int packets = ring_container->total_packets;
Greg Rose92915f72010-01-09 02:24:10 +00001077 u32 timepassed_us;
1078 u64 bytes_perint;
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001079 u8 itr_setting = ring_container->itr;
Greg Rose92915f72010-01-09 02:24:10 +00001080
1081 if (packets == 0)
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001082 return;
Greg Rose92915f72010-01-09 02:24:10 +00001083
1084 /* simple throttlerate management
1085 * 0-20MB/s lowest (100000 ints/s)
1086 * 20-100MB/s low (20000 ints/s)
1087 * 100-1249MB/s bulk (8000 ints/s)
1088 */
1089 /* what was last interrupt timeslice? */
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001090 timepassed_us = q_vector->itr >> 2;
Greg Rose92915f72010-01-09 02:24:10 +00001091 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1092
1093 switch (itr_setting) {
1094 case lowest_latency:
Alexander Duycke2c28ce2012-05-11 08:32:34 +00001095 if (bytes_perint > 10)
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001096 itr_setting = low_latency;
Greg Rose92915f72010-01-09 02:24:10 +00001097 break;
1098 case low_latency:
Alexander Duycke2c28ce2012-05-11 08:32:34 +00001099 if (bytes_perint > 20)
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001100 itr_setting = bulk_latency;
Alexander Duycke2c28ce2012-05-11 08:32:34 +00001101 else if (bytes_perint <= 10)
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001102 itr_setting = lowest_latency;
Greg Rose92915f72010-01-09 02:24:10 +00001103 break;
1104 case bulk_latency:
Alexander Duycke2c28ce2012-05-11 08:32:34 +00001105 if (bytes_perint <= 20)
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001106 itr_setting = low_latency;
Greg Rose92915f72010-01-09 02:24:10 +00001107 break;
1108 }
1109
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001110 /* clear work counters since we have the values we need */
1111 ring_container->total_bytes = 0;
1112 ring_container->total_packets = 0;
1113
1114 /* write updated itr to ring container */
1115 ring_container->itr = itr_setting;
Greg Rose92915f72010-01-09 02:24:10 +00001116}
1117
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001118static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
Greg Rose92915f72010-01-09 02:24:10 +00001119{
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001120 u32 new_itr = q_vector->itr;
1121 u8 current_itr;
Greg Rose92915f72010-01-09 02:24:10 +00001122
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001123 ixgbevf_update_itr(q_vector, &q_vector->tx);
1124 ixgbevf_update_itr(q_vector, &q_vector->rx);
Greg Rose92915f72010-01-09 02:24:10 +00001125
Alexander Duyck6b43c442012-05-11 08:32:45 +00001126 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
Greg Rose92915f72010-01-09 02:24:10 +00001127
1128 switch (current_itr) {
1129 /* counts and packets in update_itr are dependent on these numbers */
1130 case lowest_latency:
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001131 new_itr = IXGBE_100K_ITR;
Greg Rose92915f72010-01-09 02:24:10 +00001132 break;
1133 case low_latency:
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001134 new_itr = IXGBE_20K_ITR;
Greg Rose92915f72010-01-09 02:24:10 +00001135 break;
1136 case bulk_latency:
1137 default:
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001138 new_itr = IXGBE_8K_ITR;
Greg Rose92915f72010-01-09 02:24:10 +00001139 break;
1140 }
1141
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001142 if (new_itr != q_vector->itr) {
Greg Rose92915f72010-01-09 02:24:10 +00001143 /* do an exponential smoothing */
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001144 new_itr = (10 * new_itr * q_vector->itr) /
1145 ((9 * new_itr) + q_vector->itr);
1146
1147 /* save the algorithm value here */
1148 q_vector->itr = new_itr;
1149
1150 ixgbevf_write_eitr(q_vector);
Greg Rose92915f72010-01-09 02:24:10 +00001151 }
Greg Rose92915f72010-01-09 02:24:10 +00001152}
1153
Alexander Duyck4b2cd272012-08-02 01:16:59 +00001154static irqreturn_t ixgbevf_msix_other(int irq, void *data)
Greg Rose92915f72010-01-09 02:24:10 +00001155{
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001156 struct ixgbevf_adapter *adapter = data;
Greg Rose92915f72010-01-09 02:24:10 +00001157 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00001158
Alexander Duyck4b2cd272012-08-02 01:16:59 +00001159 hw->mac.get_link_status = 1;
Greg Rose375b27c2012-01-18 22:13:31 +00001160
Mark Rustad2e7cfbd2014-03-04 03:02:13 +00001161 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
1162 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
Don Skidmorec7bb4172013-10-01 04:33:49 -07001163 mod_timer(&adapter->watchdog_timer, jiffies);
Greg Rose3a2c4032012-02-01 01:28:15 +00001164
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001165 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
1166
Greg Rose92915f72010-01-09 02:24:10 +00001167 return IRQ_HANDLED;
1168}
1169
Greg Rose92915f72010-01-09 02:24:10 +00001170/**
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001171 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
Greg Rose92915f72010-01-09 02:24:10 +00001172 * @irq: unused
1173 * @data: pointer to our q_vector struct for this interrupt vector
1174 **/
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001175static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
Greg Rose92915f72010-01-09 02:24:10 +00001176{
1177 struct ixgbevf_q_vector *q_vector = data;
Greg Rose92915f72010-01-09 02:24:10 +00001178
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001179 /* EIAM disabled interrupts (on this vector) for us */
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001180 if (q_vector->rx.ring || q_vector->tx.ring)
1181 napi_schedule(&q_vector->napi);
Greg Rose92915f72010-01-09 02:24:10 +00001182
1183 return IRQ_HANDLED;
1184}
1185
1186static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
1187 int r_idx)
1188{
1189 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1190
Don Skidmore87e70ab2014-01-16 02:30:08 -08001191 a->rx_ring[r_idx]->next = q_vector->rx.ring;
1192 q_vector->rx.ring = a->rx_ring[r_idx];
Alexander Duyck6b43c442012-05-11 08:32:45 +00001193 q_vector->rx.count++;
Greg Rose92915f72010-01-09 02:24:10 +00001194}
1195
1196static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
1197 int t_idx)
1198{
1199 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1200
Don Skidmore87e70ab2014-01-16 02:30:08 -08001201 a->tx_ring[t_idx]->next = q_vector->tx.ring;
1202 q_vector->tx.ring = a->tx_ring[t_idx];
Alexander Duyck6b43c442012-05-11 08:32:45 +00001203 q_vector->tx.count++;
Greg Rose92915f72010-01-09 02:24:10 +00001204}
1205
1206/**
1207 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1208 * @adapter: board private structure to initialize
1209 *
1210 * This function maps descriptor rings to the queue-specific vectors
1211 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1212 * one vector per ring/queue, but on a constrained vector budget, we
1213 * group the rings as "efficiently" as possible. You would add new
1214 * mapping configurations in here.
1215 **/
1216static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
1217{
1218 int q_vectors;
1219 int v_start = 0;
1220 int rxr_idx = 0, txr_idx = 0;
1221 int rxr_remaining = adapter->num_rx_queues;
1222 int txr_remaining = adapter->num_tx_queues;
1223 int i, j;
1224 int rqpv, tqpv;
1225 int err = 0;
1226
1227 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1228
1229 /*
1230 * The ideal configuration...
1231 * We have enough vectors to map one per queue.
1232 */
1233 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1234 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1235 map_vector_to_rxq(adapter, v_start, rxr_idx);
1236
1237 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1238 map_vector_to_txq(adapter, v_start, txr_idx);
1239 goto out;
1240 }
1241
1242 /*
1243 * If we don't have enough vectors for a 1-to-1
1244 * mapping, we'll have to group them so there are
1245 * multiple queues per vector.
1246 */
1247 /* Re-adjusting *qpv takes care of the remainder. */
1248 for (i = v_start; i < q_vectors; i++) {
1249 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
1250 for (j = 0; j < rqpv; j++) {
1251 map_vector_to_rxq(adapter, i, rxr_idx);
1252 rxr_idx++;
1253 rxr_remaining--;
1254 }
1255 }
1256 for (i = v_start; i < q_vectors; i++) {
1257 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
1258 for (j = 0; j < tqpv; j++) {
1259 map_vector_to_txq(adapter, i, txr_idx);
1260 txr_idx++;
1261 txr_remaining--;
1262 }
1263 }
1264
1265out:
1266 return err;
1267}
1268
1269/**
1270 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1271 * @adapter: board private structure
1272 *
1273 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1274 * interrupts from the kernel.
1275 **/
1276static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1277{
1278 struct net_device *netdev = adapter->netdev;
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001279 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1280 int vector, err;
Greg Rose92915f72010-01-09 02:24:10 +00001281 int ri = 0, ti = 0;
1282
Greg Rose92915f72010-01-09 02:24:10 +00001283 for (vector = 0; vector < q_vectors; vector++) {
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001284 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
1285 struct msix_entry *entry = &adapter->msix_entries[vector];
Greg Rose92915f72010-01-09 02:24:10 +00001286
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001287 if (q_vector->tx.ring && q_vector->rx.ring) {
1288 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1289 "%s-%s-%d", netdev->name, "TxRx", ri++);
1290 ti++;
1291 } else if (q_vector->rx.ring) {
1292 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1293 "%s-%s-%d", netdev->name, "rx", ri++);
1294 } else if (q_vector->tx.ring) {
1295 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1296 "%s-%s-%d", netdev->name, "tx", ti++);
Greg Rose92915f72010-01-09 02:24:10 +00001297 } else {
1298 /* skip this unused q_vector */
1299 continue;
1300 }
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001301 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
1302 q_vector->name, q_vector);
Greg Rose92915f72010-01-09 02:24:10 +00001303 if (err) {
1304 hw_dbg(&adapter->hw,
1305 "request_irq failed for MSIX interrupt "
1306 "Error: %d\n", err);
1307 goto free_queue_irqs;
1308 }
1309 }
1310
Greg Rose92915f72010-01-09 02:24:10 +00001311 err = request_irq(adapter->msix_entries[vector].vector,
Alexander Duyck4b2cd272012-08-02 01:16:59 +00001312 &ixgbevf_msix_other, 0, netdev->name, adapter);
Greg Rose92915f72010-01-09 02:24:10 +00001313 if (err) {
1314 hw_dbg(&adapter->hw,
Alexander Duyck4b2cd272012-08-02 01:16:59 +00001315 "request_irq for msix_other failed: %d\n", err);
Greg Rose92915f72010-01-09 02:24:10 +00001316 goto free_queue_irqs;
1317 }
1318
1319 return 0;
1320
1321free_queue_irqs:
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001322 while (vector) {
1323 vector--;
1324 free_irq(adapter->msix_entries[vector].vector,
1325 adapter->q_vector[vector]);
1326 }
xunleera1f6c6b2013-03-05 07:44:20 +00001327 /* This failure is non-recoverable - it indicates the system is
1328 * out of MSIX vector resources and the VF driver cannot run
1329 * without them. Set the number of msix vectors to zero
1330 * indicating that not enough can be allocated. The error
1331 * will be returned to the user indicating device open failed.
1332 * Any further attempts to force the driver to open will also
1333 * fail. The only way to recover is to unload the driver and
1334 * reload it again. If the system has recovered some MSIX
1335 * vectors then it may succeed.
1336 */
1337 adapter->num_msix_vectors = 0;
Greg Rose92915f72010-01-09 02:24:10 +00001338 return err;
1339}
1340
1341static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1342{
1343 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1344
1345 for (i = 0; i < q_vectors; i++) {
1346 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
Alexander Duyck6b43c442012-05-11 08:32:45 +00001347 q_vector->rx.ring = NULL;
1348 q_vector->tx.ring = NULL;
1349 q_vector->rx.count = 0;
1350 q_vector->tx.count = 0;
Greg Rose92915f72010-01-09 02:24:10 +00001351 }
1352}
1353
1354/**
1355 * ixgbevf_request_irq - initialize interrupts
1356 * @adapter: board private structure
1357 *
1358 * Attempts to configure interrupts using the best available
1359 * capabilities of the hardware and kernel.
1360 **/
1361static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1362{
1363 int err = 0;
1364
1365 err = ixgbevf_request_msix_irqs(adapter);
1366
1367 if (err)
1368 hw_dbg(&adapter->hw,
1369 "request_irq failed, Error %d\n", err);
1370
1371 return err;
1372}
1373
1374static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1375{
Greg Rose92915f72010-01-09 02:24:10 +00001376 int i, q_vectors;
1377
1378 q_vectors = adapter->num_msix_vectors;
Greg Rose92915f72010-01-09 02:24:10 +00001379 i = q_vectors - 1;
1380
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001381 free_irq(adapter->msix_entries[i].vector, adapter);
Greg Rose92915f72010-01-09 02:24:10 +00001382 i--;
1383
1384 for (; i >= 0; i--) {
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001385 /* free only the irqs that were actually requested */
1386 if (!adapter->q_vector[i]->rx.ring &&
1387 !adapter->q_vector[i]->tx.ring)
1388 continue;
1389
Greg Rose92915f72010-01-09 02:24:10 +00001390 free_irq(adapter->msix_entries[i].vector,
1391 adapter->q_vector[i]);
1392 }
1393
1394 ixgbevf_reset_q_vectors(adapter);
1395}
1396
1397/**
1398 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1399 * @adapter: board private structure
1400 **/
1401static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1402{
Greg Rose92915f72010-01-09 02:24:10 +00001403 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001404 int i;
Greg Rose92915f72010-01-09 02:24:10 +00001405
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001406 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
Greg Rose92915f72010-01-09 02:24:10 +00001407 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001408 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
Greg Rose92915f72010-01-09 02:24:10 +00001409
1410 IXGBE_WRITE_FLUSH(hw);
1411
1412 for (i = 0; i < adapter->num_msix_vectors; i++)
1413 synchronize_irq(adapter->msix_entries[i].vector);
1414}
1415
1416/**
1417 * ixgbevf_irq_enable - Enable default interrupt generation settings
1418 * @adapter: board private structure
1419 **/
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001420static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
Greg Rose92915f72010-01-09 02:24:10 +00001421{
1422 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00001423
Alexander Duyck5f3600e2012-05-11 08:32:55 +00001424 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1425 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1426 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
Greg Rose92915f72010-01-09 02:24:10 +00001427}
1428
1429/**
Don Skidmorede02dec2014-01-16 02:30:09 -08001430 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1431 * @adapter: board private structure
1432 * @ring: structure containing ring specific data
1433 *
1434 * Configure the Tx descriptor ring after a reset.
1435 **/
1436static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1437 struct ixgbevf_ring *ring)
1438{
1439 struct ixgbe_hw *hw = &adapter->hw;
1440 u64 tdba = ring->dma;
1441 int wait_loop = 10;
1442 u32 txdctl = IXGBE_TXDCTL_ENABLE;
1443 u8 reg_idx = ring->reg_idx;
1444
1445 /* disable queue to avoid issues while updating state */
1446 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1447 IXGBE_WRITE_FLUSH(hw);
1448
1449 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1450 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1451 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1452 ring->count * sizeof(union ixgbe_adv_tx_desc));
1453
1454 /* disable head writeback */
1455 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1456 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1457
1458 /* enable relaxed ordering */
1459 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1460 (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1461 IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1462
1463 /* reset head and tail pointers */
1464 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1465 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
Mark Rustaddbf8b0d2014-03-04 03:02:34 +00001466 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
Don Skidmorede02dec2014-01-16 02:30:09 -08001467
1468 /* reset ntu and ntc to place SW in sync with hardwdare */
1469 ring->next_to_clean = 0;
1470 ring->next_to_use = 0;
1471
1472 /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1473 * to or less than the number of on chip descriptors, which is
1474 * currently 40.
1475 */
1476 txdctl |= (8 << 16); /* WTHRESH = 8 */
1477
1478 /* Setting PTHRESH to 32 both improves performance */
1479 txdctl |= (1 << 8) | /* HTHRESH = 1 */
1480 32; /* PTHRESH = 32 */
1481
1482 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1483
1484 /* poll to verify queue is enabled */
1485 do {
1486 usleep_range(1000, 2000);
1487 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1488 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1489 if (!wait_loop)
1490 pr_err("Could not enable Tx Queue %d\n", reg_idx);
1491}
1492
1493/**
Greg Rose92915f72010-01-09 02:24:10 +00001494 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1495 * @adapter: board private structure
1496 *
1497 * Configure the Tx unit of the MAC after a reset.
1498 **/
1499static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1500{
Don Skidmorede02dec2014-01-16 02:30:09 -08001501 u32 i;
Greg Rose92915f72010-01-09 02:24:10 +00001502
1503 /* Setup the HW Tx Head and Tail descriptor pointers */
Don Skidmorede02dec2014-01-16 02:30:09 -08001504 for (i = 0; i < adapter->num_tx_queues; i++)
1505 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00001506}
1507
1508#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1509
1510static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1511{
Greg Rose92915f72010-01-09 02:24:10 +00001512 struct ixgbe_hw *hw = &adapter->hw;
1513 u32 srrctl;
1514
Greg Rose92915f72010-01-09 02:24:10 +00001515 srrctl = IXGBE_SRRCTL_DROP_EN;
1516
Emil Tantilovbad17232014-11-21 02:57:15 +00001517 srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
1518 srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
Alexander Duyck77d5dfc2012-05-11 08:32:19 +00001519 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
Greg Rose92915f72010-01-09 02:24:10 +00001520
Greg Rose92915f72010-01-09 02:24:10 +00001521 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1522}
1523
Don Skidmore1bb9c632013-09-21 01:57:33 +00001524static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1525{
1526 struct ixgbe_hw *hw = &adapter->hw;
1527
1528 /* PSRTYPE must be initialized in 82599 */
1529 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1530 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1531 IXGBE_PSRTYPE_L2HDR;
1532
1533 if (adapter->num_rx_queues > 1)
1534 psrtype |= 1 << 29;
1535
1536 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1537}
1538
Don Skidmorede02dec2014-01-16 02:30:09 -08001539#define IXGBEVF_MAX_RX_DESC_POLL 10
1540static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1541 struct ixgbevf_ring *ring)
1542{
1543 struct ixgbe_hw *hw = &adapter->hw;
1544 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1545 u32 rxdctl;
1546 u8 reg_idx = ring->reg_idx;
1547
Mark Rustad26597802014-03-04 03:02:45 +00001548 if (IXGBE_REMOVED(hw->hw_addr))
1549 return;
Don Skidmorede02dec2014-01-16 02:30:09 -08001550 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1551 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1552
1553 /* write value back with RXDCTL.ENABLE bit cleared */
1554 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1555
1556 /* the hardware may take up to 100us to really disable the rx queue */
1557 do {
1558 udelay(10);
1559 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1560 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1561
1562 if (!wait_loop)
1563 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1564 reg_idx);
1565}
1566
1567static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1568 struct ixgbevf_ring *ring)
1569{
1570 struct ixgbe_hw *hw = &adapter->hw;
1571 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1572 u32 rxdctl;
1573 u8 reg_idx = ring->reg_idx;
1574
Mark Rustad26597802014-03-04 03:02:45 +00001575 if (IXGBE_REMOVED(hw->hw_addr))
1576 return;
Don Skidmorede02dec2014-01-16 02:30:09 -08001577 do {
1578 usleep_range(1000, 2000);
1579 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1580 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1581
1582 if (!wait_loop)
1583 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1584 reg_idx);
1585}
1586
Emil Tantilov9295edb2014-12-06 09:19:09 +00001587static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
1588{
1589 struct ixgbe_hw *hw = &adapter->hw;
1590 u32 vfmrqc = 0, vfreta = 0;
1591 u32 rss_key[10];
1592 u16 rss_i = adapter->num_rx_queues;
1593 int i, j;
1594
1595 /* Fill out hash function seeds */
1596 netdev_rss_key_fill(rss_key, sizeof(rss_key));
1597 for (i = 0; i < 10; i++)
1598 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1599
1600 /* Fill out redirection table */
1601 for (i = 0, j = 0; i < 64; i++, j++) {
1602 if (j == rss_i)
1603 j = 0;
1604 vfreta = (vfreta << 8) | (j * 0x1);
1605 if ((i & 3) == 3)
1606 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
1607 }
1608
1609 /* Perform hash on these packet types */
1610 vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 |
1611 IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP |
1612 IXGBE_VFMRQC_RSS_FIELD_IPV6 |
1613 IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP;
1614
1615 vfmrqc |= IXGBE_VFMRQC_RSSEN;
1616
1617 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc);
1618}
1619
Don Skidmorede02dec2014-01-16 02:30:09 -08001620static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1621 struct ixgbevf_ring *ring)
1622{
1623 struct ixgbe_hw *hw = &adapter->hw;
1624 u64 rdba = ring->dma;
1625 u32 rxdctl;
1626 u8 reg_idx = ring->reg_idx;
1627
1628 /* disable queue to avoid issues while updating state */
1629 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1630 ixgbevf_disable_rx_queue(adapter, ring);
1631
1632 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1633 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1634 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1635 ring->count * sizeof(union ixgbe_adv_rx_desc));
1636
1637 /* enable relaxed ordering */
1638 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1639 IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1640
1641 /* reset head and tail pointers */
1642 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1643 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
Mark Rustaddbf8b0d2014-03-04 03:02:34 +00001644 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
Don Skidmorede02dec2014-01-16 02:30:09 -08001645
1646 /* reset ntu and ntc to place SW in sync with hardwdare */
1647 ring->next_to_clean = 0;
1648 ring->next_to_use = 0;
Emil Tantilovbad17232014-11-21 02:57:15 +00001649 ring->next_to_alloc = 0;
Don Skidmorede02dec2014-01-16 02:30:09 -08001650
1651 ixgbevf_configure_srrctl(adapter, reg_idx);
1652
Emil Tantilovbad17232014-11-21 02:57:15 +00001653 /* allow any size packet since we can handle overflow */
1654 rxdctl &= ~IXGBE_RXDCTL_RLPML_EN;
1655
Don Skidmorede02dec2014-01-16 02:30:09 -08001656 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1657 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1658
1659 ixgbevf_rx_desc_queue_enable(adapter, ring);
Emil Tantilov095e2612014-01-17 18:30:00 -08001660 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
Don Skidmorede02dec2014-01-16 02:30:09 -08001661}
1662
Greg Rose92915f72010-01-09 02:24:10 +00001663/**
1664 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1665 * @adapter: board private structure
1666 *
1667 * Configure the Rx unit of the MAC after a reset.
1668 **/
1669static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1670{
Don Skidmorede02dec2014-01-16 02:30:09 -08001671 int i;
Emil Tantilovbad17232014-11-21 02:57:15 +00001672 struct ixgbe_hw *hw = &adapter->hw;
1673 struct net_device *netdev = adapter->netdev;
Greg Rose92915f72010-01-09 02:24:10 +00001674
Don Skidmore1bb9c632013-09-21 01:57:33 +00001675 ixgbevf_setup_psrtype(adapter);
Emil Tantilov9295edb2014-12-06 09:19:09 +00001676 if (hw->mac.type >= ixgbe_mac_X550_vf)
1677 ixgbevf_setup_vfmrqc(adapter);
Alexander Duyckdd1fe112012-07-20 08:09:48 +00001678
Emil Tantilovbad17232014-11-21 02:57:15 +00001679 /* notify the PF of our intent to use this size of frame */
1680 ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
Greg Rose92915f72010-01-09 02:24:10 +00001681
Greg Rose92915f72010-01-09 02:24:10 +00001682 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1683 * the Base and Length of the Rx Descriptor Ring */
Don Skidmorede02dec2014-01-16 02:30:09 -08001684 for (i = 0; i < adapter->num_rx_queues; i++)
1685 ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00001686}
1687
Patrick McHardy80d5c362013-04-19 02:04:28 +00001688static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
1689 __be16 proto, u16 vid)
Greg Rose92915f72010-01-09 02:24:10 +00001690{
1691 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1692 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001693 int err;
1694
John Fastabend55fdd45b2012-10-01 14:52:20 +00001695 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001696
Greg Rose92915f72010-01-09 02:24:10 +00001697 /* add VID to filter table */
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001698 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001699
John Fastabend55fdd45b2012-10-01 14:52:20 +00001700 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001701
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001702 /* translate error return types so error makes sense */
1703 if (err == IXGBE_ERR_MBX)
1704 return -EIO;
1705
1706 if (err == IXGBE_ERR_INVALID_ARGUMENT)
1707 return -EACCES;
1708
Jiri Pirkodadcd652011-07-21 03:25:09 +00001709 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05001710
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001711 return err;
Greg Rose92915f72010-01-09 02:24:10 +00001712}
1713
Patrick McHardy80d5c362013-04-19 02:04:28 +00001714static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
1715 __be16 proto, u16 vid)
Greg Rose92915f72010-01-09 02:24:10 +00001716{
1717 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1718 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001719 int err = -EOPNOTSUPP;
Greg Rose92915f72010-01-09 02:24:10 +00001720
John Fastabend55fdd45b2012-10-01 14:52:20 +00001721 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001722
Greg Rose92915f72010-01-09 02:24:10 +00001723 /* remove VID from filter table */
Greg Rose92fe0bf2012-11-02 05:50:47 +00001724 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001725
John Fastabend55fdd45b2012-10-01 14:52:20 +00001726 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001727
Jiri Pirkodadcd652011-07-21 03:25:09 +00001728 clear_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05001729
Alexander Duyck2ddc7fe2012-08-21 00:15:13 +00001730 return err;
Greg Rose92915f72010-01-09 02:24:10 +00001731}
1732
1733static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1734{
Jiri Pirkodadcd652011-07-21 03:25:09 +00001735 u16 vid;
Greg Rose92915f72010-01-09 02:24:10 +00001736
Jiri Pirkodadcd652011-07-21 03:25:09 +00001737 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
Patrick McHardy80d5c362013-04-19 02:04:28 +00001738 ixgbevf_vlan_rx_add_vid(adapter->netdev,
1739 htons(ETH_P_8021Q), vid);
Greg Rose92915f72010-01-09 02:24:10 +00001740}
1741
Greg Rose46ec20f2011-05-13 01:33:42 +00001742static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1743{
1744 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1745 struct ixgbe_hw *hw = &adapter->hw;
1746 int count = 0;
1747
1748 if ((netdev_uc_count(netdev)) > 10) {
Jeff Kirsherdbd96362011-10-21 19:38:18 +00001749 pr_err("Too many unicast filters - No Space\n");
Greg Rose46ec20f2011-05-13 01:33:42 +00001750 return -ENOSPC;
1751 }
1752
1753 if (!netdev_uc_empty(netdev)) {
1754 struct netdev_hw_addr *ha;
1755 netdev_for_each_uc_addr(ha, netdev) {
1756 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1757 udelay(200);
1758 }
1759 } else {
1760 /*
1761 * If the list is empty then send message to PF driver to
1762 * clear all macvlans on this VF.
1763 */
1764 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1765 }
1766
1767 return count;
1768}
1769
Greg Rose92915f72010-01-09 02:24:10 +00001770/**
Greg Rosedee847f2012-11-02 05:50:57 +00001771 * ixgbevf_set_rx_mode - Multicast and unicast set
Greg Rose92915f72010-01-09 02:24:10 +00001772 * @netdev: network interface device structure
1773 *
1774 * The set_rx_method entry point is called whenever the multicast address
Greg Rosedee847f2012-11-02 05:50:57 +00001775 * list, unicast address list or the network interface flags are updated.
1776 * This routine is responsible for configuring the hardware for proper
1777 * multicast mode and configuring requested unicast filters.
Greg Rose92915f72010-01-09 02:24:10 +00001778 **/
1779static void ixgbevf_set_rx_mode(struct net_device *netdev)
1780{
1781 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1782 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00001783
John Fastabend55fdd45b2012-10-01 14:52:20 +00001784 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001785
Greg Rose92915f72010-01-09 02:24:10 +00001786 /* reprogram multicast list */
Greg Rose92fe0bf2012-11-02 05:50:47 +00001787 hw->mac.ops.update_mc_addr_list(hw, netdev);
Greg Rose46ec20f2011-05-13 01:33:42 +00001788
1789 ixgbevf_write_uc_addr_list(netdev);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001790
John Fastabend55fdd45b2012-10-01 14:52:20 +00001791 spin_unlock_bh(&adapter->mbx_lock);
Greg Rose92915f72010-01-09 02:24:10 +00001792}
1793
1794static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1795{
1796 int q_idx;
1797 struct ixgbevf_q_vector *q_vector;
1798 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1799
1800 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
Greg Rose92915f72010-01-09 02:24:10 +00001801 q_vector = adapter->q_vector[q_idx];
Jacob Kellerc777cdf2013-09-21 06:24:20 +00001802#ifdef CONFIG_NET_RX_BUSY_POLL
1803 ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
1804#endif
Alexander Duyckfa71ae22012-05-11 08:32:50 +00001805 napi_enable(&q_vector->napi);
Greg Rose92915f72010-01-09 02:24:10 +00001806 }
1807}
1808
1809static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1810{
1811 int q_idx;
1812 struct ixgbevf_q_vector *q_vector;
1813 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1814
1815 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1816 q_vector = adapter->q_vector[q_idx];
Greg Rose92915f72010-01-09 02:24:10 +00001817 napi_disable(&q_vector->napi);
Jacob Kellerc777cdf2013-09-21 06:24:20 +00001818#ifdef CONFIG_NET_RX_BUSY_POLL
1819 while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
1820 pr_info("QV %d locked\n", q_idx);
1821 usleep_range(1000, 20000);
1822 }
1823#endif /* CONFIG_NET_RX_BUSY_POLL */
Greg Rose92915f72010-01-09 02:24:10 +00001824 }
1825}
1826
Don Skidmore220fe052013-09-21 01:40:49 +00001827static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1828{
1829 struct ixgbe_hw *hw = &adapter->hw;
1830 unsigned int def_q = 0;
1831 unsigned int num_tcs = 0;
Emil Tantilov2dc571a2014-12-06 09:19:02 +00001832 unsigned int num_rx_queues = adapter->num_rx_queues;
1833 unsigned int num_tx_queues = adapter->num_tx_queues;
Don Skidmore220fe052013-09-21 01:40:49 +00001834 int err;
1835
1836 spin_lock_bh(&adapter->mbx_lock);
1837
1838 /* fetch queue configuration from the PF */
1839 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1840
1841 spin_unlock_bh(&adapter->mbx_lock);
1842
1843 if (err)
1844 return err;
1845
1846 if (num_tcs > 1) {
Emil Tantilov2dc571a2014-12-06 09:19:02 +00001847 /* we need only one Tx queue */
1848 num_tx_queues = 1;
1849
Don Skidmore220fe052013-09-21 01:40:49 +00001850 /* update default Tx ring register index */
Don Skidmore87e70ab2014-01-16 02:30:08 -08001851 adapter->tx_ring[0]->reg_idx = def_q;
Don Skidmore220fe052013-09-21 01:40:49 +00001852
1853 /* we need as many queues as traffic classes */
1854 num_rx_queues = num_tcs;
1855 }
1856
1857 /* if we have a bad config abort request queue reset */
Emil Tantilov2dc571a2014-12-06 09:19:02 +00001858 if ((adapter->num_rx_queues != num_rx_queues) ||
1859 (adapter->num_tx_queues != num_tx_queues)) {
Don Skidmore220fe052013-09-21 01:40:49 +00001860 /* force mailbox timeout to prevent further messages */
1861 hw->mbx.timeout = 0;
1862
1863 /* wait for watchdog to come around and bail us out */
1864 adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
1865 }
1866
1867 return 0;
1868}
1869
Greg Rose92915f72010-01-09 02:24:10 +00001870static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1871{
Don Skidmore220fe052013-09-21 01:40:49 +00001872 ixgbevf_configure_dcb(adapter);
1873
Don Skidmorede02dec2014-01-16 02:30:09 -08001874 ixgbevf_set_rx_mode(adapter->netdev);
Greg Rose92915f72010-01-09 02:24:10 +00001875
1876 ixgbevf_restore_vlan(adapter);
1877
1878 ixgbevf_configure_tx(adapter);
1879 ixgbevf_configure_rx(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00001880}
1881
Greg Rose33bd9f62010-03-19 02:59:52 +00001882static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1883{
1884 /* Only save pre-reset stats if there are some */
1885 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1886 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1887 adapter->stats.base_vfgprc;
1888 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1889 adapter->stats.base_vfgptc;
1890 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1891 adapter->stats.base_vfgorc;
1892 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1893 adapter->stats.base_vfgotc;
1894 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1895 adapter->stats.base_vfmprc;
1896 }
1897}
1898
1899static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1900{
1901 struct ixgbe_hw *hw = &adapter->hw;
1902
1903 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1904 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1905 adapter->stats.last_vfgorc |=
1906 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1907 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1908 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1909 adapter->stats.last_vfgotc |=
1910 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1911 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1912
1913 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1914 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1915 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1916 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1917 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1918}
1919
Alexander Duyck31186782012-07-20 08:09:58 +00001920static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1921{
1922 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck56e94092012-07-20 08:10:03 +00001923 int api[] = { ixgbe_mbox_api_11,
1924 ixgbe_mbox_api_10,
Alexander Duyck31186782012-07-20 08:09:58 +00001925 ixgbe_mbox_api_unknown };
1926 int err = 0, idx = 0;
1927
John Fastabend55fdd45b2012-10-01 14:52:20 +00001928 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck31186782012-07-20 08:09:58 +00001929
1930 while (api[idx] != ixgbe_mbox_api_unknown) {
1931 err = ixgbevf_negotiate_api_version(hw, api[idx]);
1932 if (!err)
1933 break;
1934 idx++;
1935 }
1936
John Fastabend55fdd45b2012-10-01 14:52:20 +00001937 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck31186782012-07-20 08:09:58 +00001938}
1939
Greg Rose795180d2012-04-17 04:29:34 +00001940static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
Greg Rose92915f72010-01-09 02:24:10 +00001941{
1942 struct net_device *netdev = adapter->netdev;
1943 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00001944
1945 ixgbevf_configure_msix(adapter);
1946
John Fastabend55fdd45b2012-10-01 14:52:20 +00001947 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001948
Greg Rose92fe0bf2012-11-02 05:50:47 +00001949 if (is_valid_ether_addr(hw->mac.addr))
1950 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1951 else
1952 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
Greg Rose92915f72010-01-09 02:24:10 +00001953
John Fastabend55fdd45b2012-10-01 14:52:20 +00001954 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00001955
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001956 smp_mb__before_atomic();
Greg Rose92915f72010-01-09 02:24:10 +00001957 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1958 ixgbevf_napi_enable_all(adapter);
1959
Emil Tantilovd9bdb572015-01-28 03:21:18 +00001960 /* clear any pending interrupts, may auto mask */
1961 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1962 ixgbevf_irq_enable(adapter);
1963
Greg Rose92915f72010-01-09 02:24:10 +00001964 /* enable transmits */
1965 netif_tx_start_all_queues(netdev);
1966
Greg Rose33bd9f62010-03-19 02:59:52 +00001967 ixgbevf_save_reset_stats(adapter);
1968 ixgbevf_init_last_counter_stats(adapter);
1969
Alexander Duyck4b2cd272012-08-02 01:16:59 +00001970 hw->mac.get_link_status = 1;
Greg Rose92915f72010-01-09 02:24:10 +00001971 mod_timer(&adapter->watchdog_timer, jiffies);
Greg Rose92915f72010-01-09 02:24:10 +00001972}
1973
Greg Rose795180d2012-04-17 04:29:34 +00001974void ixgbevf_up(struct ixgbevf_adapter *adapter)
Greg Rose92915f72010-01-09 02:24:10 +00001975{
Greg Rose92915f72010-01-09 02:24:10 +00001976 ixgbevf_configure(adapter);
1977
Greg Rose795180d2012-04-17 04:29:34 +00001978 ixgbevf_up_complete(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00001979}
1980
1981/**
1982 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
Greg Rose92915f72010-01-09 02:24:10 +00001983 * @rx_ring: ring to free buffers from
1984 **/
Emil Tantilov05d063a2014-01-17 18:29:59 -08001985static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
Greg Rose92915f72010-01-09 02:24:10 +00001986{
Emil Tantilovbad17232014-11-21 02:57:15 +00001987 struct device *dev = rx_ring->dev;
Greg Rose92915f72010-01-09 02:24:10 +00001988 unsigned long size;
1989 unsigned int i;
1990
Emil Tantilovbad17232014-11-21 02:57:15 +00001991 /* Free Rx ring sk_buff */
1992 if (rx_ring->skb) {
1993 dev_kfree_skb(rx_ring->skb);
1994 rx_ring->skb = NULL;
1995 }
1996
1997 /* ring already cleared, nothing to do */
Greg Rosec0456c22010-01-22 22:47:18 +00001998 if (!rx_ring->rx_buffer_info)
1999 return;
Greg Rose92915f72010-01-09 02:24:10 +00002000
Emil Tantilovbad17232014-11-21 02:57:15 +00002001 /* Free all the Rx ring pages */
Greg Rose92915f72010-01-09 02:24:10 +00002002 for (i = 0; i < rx_ring->count; i++) {
Emil Tantilovbad17232014-11-21 02:57:15 +00002003 struct ixgbevf_rx_buffer *rx_buffer;
Greg Rose92915f72010-01-09 02:24:10 +00002004
Emil Tantilovbad17232014-11-21 02:57:15 +00002005 rx_buffer = &rx_ring->rx_buffer_info[i];
2006 if (rx_buffer->dma)
2007 dma_unmap_page(dev, rx_buffer->dma,
2008 PAGE_SIZE, DMA_FROM_DEVICE);
2009 rx_buffer->dma = 0;
2010 if (rx_buffer->page)
2011 __free_page(rx_buffer->page);
2012 rx_buffer->page = NULL;
Greg Rose92915f72010-01-09 02:24:10 +00002013 }
2014
2015 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2016 memset(rx_ring->rx_buffer_info, 0, size);
2017
2018 /* Zero out the descriptor ring */
2019 memset(rx_ring->desc, 0, rx_ring->size);
Greg Rose92915f72010-01-09 02:24:10 +00002020}
2021
2022/**
2023 * ixgbevf_clean_tx_ring - Free Tx Buffers
Greg Rose92915f72010-01-09 02:24:10 +00002024 * @tx_ring: ring to be cleaned
2025 **/
Emil Tantilov05d063a2014-01-17 18:29:59 -08002026static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
Greg Rose92915f72010-01-09 02:24:10 +00002027{
2028 struct ixgbevf_tx_buffer *tx_buffer_info;
2029 unsigned long size;
2030 unsigned int i;
2031
Greg Rosec0456c22010-01-22 22:47:18 +00002032 if (!tx_ring->tx_buffer_info)
2033 return;
2034
Greg Rose92915f72010-01-09 02:24:10 +00002035 /* Free all the Tx ring sk_buffs */
Greg Rose92915f72010-01-09 02:24:10 +00002036 for (i = 0; i < tx_ring->count; i++) {
2037 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Alexander Duyck70a10e22012-05-11 08:33:21 +00002038 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
Greg Rose92915f72010-01-09 02:24:10 +00002039 }
2040
2041 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2042 memset(tx_ring->tx_buffer_info, 0, size);
2043
2044 memset(tx_ring->desc, 0, tx_ring->size);
Greg Rose92915f72010-01-09 02:24:10 +00002045}
2046
2047/**
2048 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
2049 * @adapter: board private structure
2050 **/
2051static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
2052{
2053 int i;
2054
2055 for (i = 0; i < adapter->num_rx_queues; i++)
Emil Tantilov05d063a2014-01-17 18:29:59 -08002056 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00002057}
2058
2059/**
2060 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
2061 * @adapter: board private structure
2062 **/
2063static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
2064{
2065 int i;
2066
2067 for (i = 0; i < adapter->num_tx_queues; i++)
Emil Tantilov05d063a2014-01-17 18:29:59 -08002068 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00002069}
2070
2071void ixgbevf_down(struct ixgbevf_adapter *adapter)
2072{
2073 struct net_device *netdev = adapter->netdev;
2074 struct ixgbe_hw *hw = &adapter->hw;
Don Skidmorede02dec2014-01-16 02:30:09 -08002075 int i;
Greg Rose92915f72010-01-09 02:24:10 +00002076
2077 /* signal that we are down to the interrupt handler */
Mark Rustad5b346dc2014-03-04 03:02:18 +00002078 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
2079 return; /* do nothing if already down */
Don Skidmore858c3dd2013-10-01 04:33:50 -07002080
2081 /* disable all enabled rx queues */
2082 for (i = 0; i < adapter->num_rx_queues; i++)
Don Skidmore87e70ab2014-01-16 02:30:08 -08002083 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00002084
Emil Tantilovd9bdb572015-01-28 03:21:18 +00002085 usleep_range(10000, 20000);
Greg Rose92915f72010-01-09 02:24:10 +00002086
2087 netif_tx_stop_all_queues(netdev);
2088
Emil Tantilovd9bdb572015-01-28 03:21:18 +00002089 /* call carrier off first to avoid false dev_watchdog timeouts */
2090 netif_carrier_off(netdev);
2091 netif_tx_disable(netdev);
2092
Greg Rose92915f72010-01-09 02:24:10 +00002093 ixgbevf_irq_disable(adapter);
2094
2095 ixgbevf_napi_disable_all(adapter);
2096
2097 del_timer_sync(&adapter->watchdog_timer);
Emil Tantilovd9bdb572015-01-28 03:21:18 +00002098
Greg Rose92915f72010-01-09 02:24:10 +00002099 /* can't call flush scheduled work here because it can deadlock
2100 * if linkwatch_event tries to acquire the rtnl_lock which we are
2101 * holding */
2102 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
2103 msleep(1);
2104
2105 /* disable transmits in the hardware now that interrupts are off */
2106 for (i = 0; i < adapter->num_tx_queues; i++) {
Don Skidmorede02dec2014-01-16 02:30:09 -08002107 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
2108
2109 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2110 IXGBE_TXDCTL_SWFLSH);
Greg Rose92915f72010-01-09 02:24:10 +00002111 }
2112
Greg Rose92915f72010-01-09 02:24:10 +00002113 if (!pci_channel_offline(adapter->pdev))
2114 ixgbevf_reset(adapter);
2115
2116 ixgbevf_clean_all_tx_rings(adapter);
2117 ixgbevf_clean_all_rx_rings(adapter);
2118}
2119
2120void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
2121{
2122 WARN_ON(in_interrupt());
Greg Rosec0456c22010-01-22 22:47:18 +00002123
Greg Rose92915f72010-01-09 02:24:10 +00002124 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
2125 msleep(1);
2126
Alexander Duyck4b2cd272012-08-02 01:16:59 +00002127 ixgbevf_down(adapter);
2128 ixgbevf_up(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00002129
2130 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
2131}
2132
2133void ixgbevf_reset(struct ixgbevf_adapter *adapter)
2134{
2135 struct ixgbe_hw *hw = &adapter->hw;
2136 struct net_device *netdev = adapter->netdev;
2137
Don Skidmore798e3812013-10-01 04:33:51 -07002138 if (hw->mac.ops.reset_hw(hw)) {
Greg Rose92915f72010-01-09 02:24:10 +00002139 hw_dbg(hw, "PF still resetting\n");
Don Skidmore798e3812013-10-01 04:33:51 -07002140 } else {
Greg Rose92915f72010-01-09 02:24:10 +00002141 hw->mac.ops.init_hw(hw);
Don Skidmore798e3812013-10-01 04:33:51 -07002142 ixgbevf_negotiate_api(adapter);
2143 }
Greg Rose92915f72010-01-09 02:24:10 +00002144
2145 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
2146 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
2147 netdev->addr_len);
2148 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
2149 netdev->addr_len);
2150 }
2151}
2152
Jakub Kicinskie45dd5f2012-11-13 04:03:16 +00002153static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
2154 int vectors)
Greg Rose92915f72010-01-09 02:24:10 +00002155{
Emil Tantilova5f93372012-11-13 04:03:17 +00002156 int vector_threshold;
Greg Rose92915f72010-01-09 02:24:10 +00002157
Alexander Duyckfa71ae22012-05-11 08:32:50 +00002158 /* We'll want at least 2 (vector_threshold):
2159 * 1) TxQ[0] + RxQ[0] handler
2160 * 2) Other (Link Status Change, etc.)
Greg Rose92915f72010-01-09 02:24:10 +00002161 */
2162 vector_threshold = MIN_MSIX_COUNT;
2163
2164 /* The more we get, the more we will assign to Tx/Rx Cleanup
2165 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2166 * Right now, we simply care about how many we'll get; we'll
2167 * set them up later while requesting irq's.
2168 */
Alexander Gordeev5c1e35882014-02-18 11:11:46 +01002169 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2170 vector_threshold, vectors);
Greg Rose92915f72010-01-09 02:24:10 +00002171
Alexander Gordeev5c1e35882014-02-18 11:11:46 +01002172 if (vectors < 0) {
Jakub Kicinskie45dd5f2012-11-13 04:03:16 +00002173 dev_err(&adapter->pdev->dev,
2174 "Unable to allocate MSI-X interrupts\n");
Greg Rose92915f72010-01-09 02:24:10 +00002175 kfree(adapter->msix_entries);
2176 adapter->msix_entries = NULL;
Alexander Gordeev5c1e35882014-02-18 11:11:46 +01002177 return vectors;
Greg Rose92915f72010-01-09 02:24:10 +00002178 }
Greg Rosedee847f2012-11-02 05:50:57 +00002179
Alexander Gordeev5c1e35882014-02-18 11:11:46 +01002180 /* Adjust for only the vectors we'll use, which is minimum
2181 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
2182 * vectors we were allocated.
2183 */
2184 adapter->num_msix_vectors = vectors;
2185
2186 return 0;
Greg Rose92915f72010-01-09 02:24:10 +00002187}
2188
Ben Hutchings49ce9c22012-07-10 10:56:00 +00002189/**
2190 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
Greg Rose92915f72010-01-09 02:24:10 +00002191 * @adapter: board private structure to initialize
2192 *
2193 * This is the top level queue allocation routine. The order here is very
2194 * important, starting with the "most" number of features turned on at once,
2195 * and ending with the smallest set of features. This way large combinations
2196 * can be allocated if they're turned on, and smaller combinations are the
2197 * fallthrough conditions.
2198 *
2199 **/
2200static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
2201{
Don Skidmore220fe052013-09-21 01:40:49 +00002202 struct ixgbe_hw *hw = &adapter->hw;
2203 unsigned int def_q = 0;
2204 unsigned int num_tcs = 0;
2205 int err;
2206
Greg Rose92915f72010-01-09 02:24:10 +00002207 /* Start with base case */
2208 adapter->num_rx_queues = 1;
2209 adapter->num_tx_queues = 1;
Don Skidmore220fe052013-09-21 01:40:49 +00002210
2211 spin_lock_bh(&adapter->mbx_lock);
2212
2213 /* fetch queue configuration from the PF */
2214 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2215
2216 spin_unlock_bh(&adapter->mbx_lock);
2217
2218 if (err)
2219 return;
2220
2221 /* we need as many queues as traffic classes */
Emil Tantilov2dc571a2014-12-06 09:19:02 +00002222 if (num_tcs > 1) {
Don Skidmore220fe052013-09-21 01:40:49 +00002223 adapter->num_rx_queues = num_tcs;
Emil Tantilov2dc571a2014-12-06 09:19:02 +00002224 } else {
2225 u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES);
2226
2227 switch (hw->api_version) {
2228 case ixgbe_mbox_api_11:
2229 adapter->num_rx_queues = rss;
2230 adapter->num_tx_queues = rss;
2231 default:
2232 break;
2233 }
2234 }
Greg Rose92915f72010-01-09 02:24:10 +00002235}
2236
2237/**
2238 * ixgbevf_alloc_queues - Allocate memory for all rings
2239 * @adapter: board private structure to initialize
2240 *
2241 * We allocate one ring per queue at run-time since we don't know the
2242 * number of queues at compile-time. The polling_netdev array is
2243 * intended for Multiqueue, but should work fine with a single queue.
2244 **/
2245static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
2246{
Don Skidmore87e70ab2014-01-16 02:30:08 -08002247 struct ixgbevf_ring *ring;
2248 int rx = 0, tx = 0;
Greg Rose92915f72010-01-09 02:24:10 +00002249
Don Skidmore87e70ab2014-01-16 02:30:08 -08002250 for (; tx < adapter->num_tx_queues; tx++) {
2251 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2252 if (!ring)
2253 goto err_allocation;
Greg Rose92915f72010-01-09 02:24:10 +00002254
Don Skidmore87e70ab2014-01-16 02:30:08 -08002255 ring->dev = &adapter->pdev->dev;
2256 ring->netdev = adapter->netdev;
2257 ring->count = adapter->tx_ring_count;
2258 ring->queue_index = tx;
2259 ring->reg_idx = tx;
Greg Rose92915f72010-01-09 02:24:10 +00002260
Don Skidmore87e70ab2014-01-16 02:30:08 -08002261 adapter->tx_ring[tx] = ring;
Greg Rose92915f72010-01-09 02:24:10 +00002262 }
2263
Don Skidmore87e70ab2014-01-16 02:30:08 -08002264 for (; rx < adapter->num_rx_queues; rx++) {
2265 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2266 if (!ring)
2267 goto err_allocation;
2268
2269 ring->dev = &adapter->pdev->dev;
2270 ring->netdev = adapter->netdev;
2271
2272 ring->count = adapter->rx_ring_count;
2273 ring->queue_index = rx;
2274 ring->reg_idx = rx;
2275
2276 adapter->rx_ring[rx] = ring;
Greg Rose92915f72010-01-09 02:24:10 +00002277 }
2278
2279 return 0;
2280
Don Skidmore87e70ab2014-01-16 02:30:08 -08002281err_allocation:
2282 while (tx) {
2283 kfree(adapter->tx_ring[--tx]);
2284 adapter->tx_ring[tx] = NULL;
2285 }
2286
2287 while (rx) {
2288 kfree(adapter->rx_ring[--rx]);
2289 adapter->rx_ring[rx] = NULL;
2290 }
Greg Rose92915f72010-01-09 02:24:10 +00002291 return -ENOMEM;
2292}
2293
2294/**
2295 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2296 * @adapter: board private structure to initialize
2297 *
2298 * Attempt to configure the interrupts using the best available
2299 * capabilities of the hardware and the kernel.
2300 **/
2301static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2302{
Greg Rose91e2b892012-10-03 00:57:23 +00002303 struct net_device *netdev = adapter->netdev;
Greg Rose92915f72010-01-09 02:24:10 +00002304 int err = 0;
2305 int vector, v_budget;
2306
2307 /*
2308 * It's easy to be greedy for MSI-X vectors, but it really
2309 * doesn't do us much good if we have a lot more vectors
2310 * than CPU's. So let's be conservative and only ask for
Alexander Duyckfa71ae22012-05-11 08:32:50 +00002311 * (roughly) the same number of vectors as there are CPU's.
2312 * The default is to use pairs of vectors.
Greg Rose92915f72010-01-09 02:24:10 +00002313 */
Alexander Duyckfa71ae22012-05-11 08:32:50 +00002314 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2315 v_budget = min_t(int, v_budget, num_online_cpus());
2316 v_budget += NON_Q_VECTORS;
Greg Rose92915f72010-01-09 02:24:10 +00002317
2318 /* A failure in MSI-X entry allocation isn't fatal, but it does
2319 * mean we disable MSI-X capabilities of the adapter. */
2320 adapter->msix_entries = kcalloc(v_budget,
2321 sizeof(struct msix_entry), GFP_KERNEL);
2322 if (!adapter->msix_entries) {
2323 err = -ENOMEM;
2324 goto out;
2325 }
2326
2327 for (vector = 0; vector < v_budget; vector++)
2328 adapter->msix_entries[vector].entry = vector;
2329
Jakub Kicinskie45dd5f2012-11-13 04:03:16 +00002330 err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
2331 if (err)
2332 goto out;
Greg Rose92915f72010-01-09 02:24:10 +00002333
Greg Rose91e2b892012-10-03 00:57:23 +00002334 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
2335 if (err)
2336 goto out;
2337
2338 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
2339
Greg Rose92915f72010-01-09 02:24:10 +00002340out:
2341 return err;
2342}
2343
2344/**
2345 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2346 * @adapter: board private structure to initialize
2347 *
2348 * We allocate one q_vector per queue interrupt. If allocation fails we
2349 * return -ENOMEM.
2350 **/
2351static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2352{
2353 int q_idx, num_q_vectors;
2354 struct ixgbevf_q_vector *q_vector;
Greg Rose92915f72010-01-09 02:24:10 +00002355
2356 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
Greg Rose92915f72010-01-09 02:24:10 +00002357
2358 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2359 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2360 if (!q_vector)
2361 goto err_out;
2362 q_vector->adapter = adapter;
2363 q_vector->v_idx = q_idx;
Alexander Duyckfa71ae22012-05-11 08:32:50 +00002364 netif_napi_add(adapter->netdev, &q_vector->napi,
2365 ixgbevf_poll, 64);
Jacob Kellerc777cdf2013-09-21 06:24:20 +00002366#ifdef CONFIG_NET_RX_BUSY_POLL
2367 napi_hash_add(&q_vector->napi);
2368#endif
Greg Rose92915f72010-01-09 02:24:10 +00002369 adapter->q_vector[q_idx] = q_vector;
2370 }
2371
2372 return 0;
2373
2374err_out:
2375 while (q_idx) {
2376 q_idx--;
2377 q_vector = adapter->q_vector[q_idx];
Jacob Kellerc777cdf2013-09-21 06:24:20 +00002378#ifdef CONFIG_NET_RX_BUSY_POLL
2379 napi_hash_del(&q_vector->napi);
2380#endif
Greg Rose92915f72010-01-09 02:24:10 +00002381 netif_napi_del(&q_vector->napi);
2382 kfree(q_vector);
2383 adapter->q_vector[q_idx] = NULL;
2384 }
2385 return -ENOMEM;
2386}
2387
2388/**
2389 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2390 * @adapter: board private structure to initialize
2391 *
2392 * This function frees the memory allocated to the q_vectors. In addition if
2393 * NAPI is enabled it will delete any references to the NAPI struct prior
2394 * to freeing the q_vector.
2395 **/
2396static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2397{
John Fastabendf4477702012-09-16 08:19:46 +00002398 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
Greg Rose92915f72010-01-09 02:24:10 +00002399
2400 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2401 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2402
2403 adapter->q_vector[q_idx] = NULL;
Jacob Kellerc777cdf2013-09-21 06:24:20 +00002404#ifdef CONFIG_NET_RX_BUSY_POLL
2405 napi_hash_del(&q_vector->napi);
2406#endif
John Fastabendf4477702012-09-16 08:19:46 +00002407 netif_napi_del(&q_vector->napi);
Greg Rose92915f72010-01-09 02:24:10 +00002408 kfree(q_vector);
2409 }
2410}
2411
2412/**
2413 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2414 * @adapter: board private structure
2415 *
2416 **/
2417static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2418{
2419 pci_disable_msix(adapter->pdev);
2420 kfree(adapter->msix_entries);
2421 adapter->msix_entries = NULL;
Greg Rose92915f72010-01-09 02:24:10 +00002422}
2423
2424/**
2425 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2426 * @adapter: board private structure to initialize
2427 *
2428 **/
2429static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2430{
2431 int err;
2432
2433 /* Number of supported queues */
2434 ixgbevf_set_num_queues(adapter);
2435
2436 err = ixgbevf_set_interrupt_capability(adapter);
2437 if (err) {
2438 hw_dbg(&adapter->hw,
2439 "Unable to setup interrupt capabilities\n");
2440 goto err_set_interrupt;
2441 }
2442
2443 err = ixgbevf_alloc_q_vectors(adapter);
2444 if (err) {
2445 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
2446 "vectors\n");
2447 goto err_alloc_q_vectors;
2448 }
2449
2450 err = ixgbevf_alloc_queues(adapter);
2451 if (err) {
Jeff Kirsherdbd96362011-10-21 19:38:18 +00002452 pr_err("Unable to allocate memory for queues\n");
Greg Rose92915f72010-01-09 02:24:10 +00002453 goto err_alloc_queues;
2454 }
2455
2456 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
2457 "Tx Queue count = %u\n",
2458 (adapter->num_rx_queues > 1) ? "Enabled" :
2459 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2460
2461 set_bit(__IXGBEVF_DOWN, &adapter->state);
2462
2463 return 0;
2464err_alloc_queues:
2465 ixgbevf_free_q_vectors(adapter);
2466err_alloc_q_vectors:
2467 ixgbevf_reset_interrupt_capability(adapter);
2468err_set_interrupt:
2469 return err;
2470}
2471
2472/**
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00002473 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2474 * @adapter: board private structure to clear interrupt scheme on
2475 *
2476 * We go through and clear interrupt specific resources and reset the structure
2477 * to pre-load conditions
2478 **/
2479static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2480{
Don Skidmore87e70ab2014-01-16 02:30:08 -08002481 int i;
2482
2483 for (i = 0; i < adapter->num_tx_queues; i++) {
2484 kfree(adapter->tx_ring[i]);
2485 adapter->tx_ring[i] = NULL;
2486 }
2487 for (i = 0; i < adapter->num_rx_queues; i++) {
2488 kfree(adapter->rx_ring[i]);
2489 adapter->rx_ring[i] = NULL;
2490 }
2491
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00002492 adapter->num_tx_queues = 0;
2493 adapter->num_rx_queues = 0;
2494
2495 ixgbevf_free_q_vectors(adapter);
2496 ixgbevf_reset_interrupt_capability(adapter);
2497}
2498
2499/**
Greg Rose92915f72010-01-09 02:24:10 +00002500 * ixgbevf_sw_init - Initialize general software structures
2501 * (struct ixgbevf_adapter)
2502 * @adapter: board private structure to initialize
2503 *
2504 * ixgbevf_sw_init initializes the Adapter private data structure.
2505 * Fields are initialized based on PCI device information and
2506 * OS network device settings (MTU size).
2507 **/
Bill Pemberton9f9a12f2012-12-03 09:24:25 -05002508static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
Greg Rose92915f72010-01-09 02:24:10 +00002509{
2510 struct ixgbe_hw *hw = &adapter->hw;
2511 struct pci_dev *pdev = adapter->pdev;
Greg Rosee1941a72013-02-13 03:02:05 +00002512 struct net_device *netdev = adapter->netdev;
Greg Rose92915f72010-01-09 02:24:10 +00002513 int err;
2514
2515 /* PCI config space info */
2516
2517 hw->vendor_id = pdev->vendor;
2518 hw->device_id = pdev->device;
Sergei Shtylyovff938e42011-02-28 11:57:33 -08002519 hw->revision_id = pdev->revision;
Greg Rose92915f72010-01-09 02:24:10 +00002520 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2521 hw->subsystem_device_id = pdev->subsystem_device;
2522
2523 hw->mbx.ops.init_params(hw);
Alexander Duyck56e94092012-07-20 08:10:03 +00002524
2525 /* assume legacy case in which PF would only give VF 2 queues */
2526 hw->mac.max_tx_queues = 2;
2527 hw->mac.max_rx_queues = 2;
2528
Don Skidmore798e3812013-10-01 04:33:51 -07002529 /* lock to protect mailbox accesses */
2530 spin_lock_init(&adapter->mbx_lock);
2531
Greg Rose92915f72010-01-09 02:24:10 +00002532 err = hw->mac.ops.reset_hw(hw);
2533 if (err) {
2534 dev_info(&pdev->dev,
Greg Rosee1941a72013-02-13 03:02:05 +00002535 "PF still in reset state. Is the PF interface up?\n");
Greg Rose92915f72010-01-09 02:24:10 +00002536 } else {
2537 err = hw->mac.ops.init_hw(hw);
2538 if (err) {
Jeff Kirsherdbd96362011-10-21 19:38:18 +00002539 pr_err("init_shared_code failed: %d\n", err);
Greg Rose92915f72010-01-09 02:24:10 +00002540 goto out;
2541 }
Don Skidmore798e3812013-10-01 04:33:51 -07002542 ixgbevf_negotiate_api(adapter);
Greg Rosee1941a72013-02-13 03:02:05 +00002543 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2544 if (err)
2545 dev_info(&pdev->dev, "Error reading MAC address\n");
2546 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2547 dev_info(&pdev->dev,
2548 "MAC address not assigned by administrator.\n");
2549 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2550 }
2551
2552 if (!is_valid_ether_addr(netdev->dev_addr)) {
2553 dev_info(&pdev->dev, "Assigning random MAC address\n");
2554 eth_hw_addr_random(netdev);
2555 memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
Greg Rose92915f72010-01-09 02:24:10 +00002556 }
2557
2558 /* Enable dynamic interrupt throttling rates */
Alexander Duyck5f3600e2012-05-11 08:32:55 +00002559 adapter->rx_itr_setting = 1;
2560 adapter->tx_itr_setting = 1;
Greg Rose92915f72010-01-09 02:24:10 +00002561
Greg Rose92915f72010-01-09 02:24:10 +00002562 /* set default ring sizes */
2563 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2564 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2565
Greg Rose92915f72010-01-09 02:24:10 +00002566 set_bit(__IXGBEVF_DOWN, &adapter->state);
Danny Kukawka1a0d6ae2012-02-09 09:48:54 +00002567 return 0;
Greg Rose92915f72010-01-09 02:24:10 +00002568
2569out:
2570 return err;
2571}
2572
Greg Rose92915f72010-01-09 02:24:10 +00002573#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2574 { \
2575 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2576 if (current_counter < last_counter) \
2577 counter += 0x100000000LL; \
2578 last_counter = current_counter; \
2579 counter &= 0xFFFFFFFF00000000LL; \
2580 counter |= current_counter; \
2581 }
2582
2583#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2584 { \
2585 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2586 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2587 u64 current_counter = (current_counter_msb << 32) | \
2588 current_counter_lsb; \
2589 if (current_counter < last_counter) \
2590 counter += 0x1000000000LL; \
2591 last_counter = current_counter; \
2592 counter &= 0xFFFFFFF000000000LL; \
2593 counter |= current_counter; \
2594 }
2595/**
2596 * ixgbevf_update_stats - Update the board statistics counters.
2597 * @adapter: board private structure
2598 **/
2599void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2600{
2601 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose55fb2772012-11-06 05:53:32 +00002602 int i;
Greg Rose92915f72010-01-09 02:24:10 +00002603
Greg Rose088245a2013-01-04 07:37:31 +00002604 if (!adapter->link_up)
2605 return;
2606
Greg Rose92915f72010-01-09 02:24:10 +00002607 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2608 adapter->stats.vfgprc);
2609 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2610 adapter->stats.vfgptc);
2611 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2612 adapter->stats.last_vfgorc,
2613 adapter->stats.vfgorc);
2614 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2615 adapter->stats.last_vfgotc,
2616 adapter->stats.vfgotc);
2617 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2618 adapter->stats.vfmprc);
Greg Rose55fb2772012-11-06 05:53:32 +00002619
2620 for (i = 0; i < adapter->num_rx_queues; i++) {
2621 adapter->hw_csum_rx_error +=
Don Skidmore87e70ab2014-01-16 02:30:08 -08002622 adapter->rx_ring[i]->hw_csum_rx_error;
Don Skidmore87e70ab2014-01-16 02:30:08 -08002623 adapter->rx_ring[i]->hw_csum_rx_error = 0;
Greg Rose55fb2772012-11-06 05:53:32 +00002624 }
Greg Rose92915f72010-01-09 02:24:10 +00002625}
2626
2627/**
2628 * ixgbevf_watchdog - Timer Call-back
2629 * @data: pointer to adapter cast into an unsigned long
2630 **/
2631static void ixgbevf_watchdog(unsigned long data)
2632{
2633 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2634 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck5f3600e2012-05-11 08:32:55 +00002635 u32 eics = 0;
Greg Rose92915f72010-01-09 02:24:10 +00002636 int i;
2637
2638 /*
2639 * Do the watchdog outside of interrupt context due to the lovely
2640 * delays that some of the newer hardware requires
2641 */
2642
2643 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2644 goto watchdog_short_circuit;
2645
2646 /* get one bit for every active tx/rx interrupt vector */
2647 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2648 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
Alexander Duyck6b43c442012-05-11 08:32:45 +00002649 if (qv->rx.ring || qv->tx.ring)
Alexander Duyck5f3600e2012-05-11 08:32:55 +00002650 eics |= 1 << i;
Greg Rose92915f72010-01-09 02:24:10 +00002651 }
2652
Alexander Duyck5f3600e2012-05-11 08:32:55 +00002653 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
Greg Rose92915f72010-01-09 02:24:10 +00002654
2655watchdog_short_circuit:
2656 schedule_work(&adapter->watchdog_task);
2657}
2658
2659/**
2660 * ixgbevf_tx_timeout - Respond to a Tx Hang
2661 * @netdev: network interface device structure
2662 **/
2663static void ixgbevf_tx_timeout(struct net_device *netdev)
2664{
2665 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2666
2667 /* Do the reset outside of interrupt context */
2668 schedule_work(&adapter->reset_task);
2669}
2670
2671static void ixgbevf_reset_task(struct work_struct *work)
2672{
2673 struct ixgbevf_adapter *adapter;
2674 adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2675
2676 /* If we're already down or resetting, just bail */
2677 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
Mark Rustad2e7cfbd2014-03-04 03:02:13 +00002678 test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
Greg Rose92915f72010-01-09 02:24:10 +00002679 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2680 return;
2681
2682 adapter->tx_timeout_count++;
2683
2684 ixgbevf_reinit_locked(adapter);
2685}
2686
2687/**
2688 * ixgbevf_watchdog_task - worker thread to bring link up
2689 * @work: pointer to work_struct containing our data
2690 **/
2691static void ixgbevf_watchdog_task(struct work_struct *work)
2692{
2693 struct ixgbevf_adapter *adapter = container_of(work,
2694 struct ixgbevf_adapter,
2695 watchdog_task);
2696 struct net_device *netdev = adapter->netdev;
2697 struct ixgbe_hw *hw = &adapter->hw;
2698 u32 link_speed = adapter->link_speed;
2699 bool link_up = adapter->link_up;
Greg Rose92fe0bf2012-11-02 05:50:47 +00002700 s32 need_reset;
Greg Rose92915f72010-01-09 02:24:10 +00002701
Mark Rustad26597802014-03-04 03:02:45 +00002702 if (IXGBE_REMOVED(hw->hw_addr)) {
2703 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
2704 rtnl_lock();
2705 ixgbevf_down(adapter);
2706 rtnl_unlock();
2707 }
2708 return;
2709 }
Don Skidmore220fe052013-09-21 01:40:49 +00002710 ixgbevf_queue_reset_subtask(adapter);
2711
Greg Rose92915f72010-01-09 02:24:10 +00002712 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2713
2714 /*
2715 * Always check the link on the watchdog because we have
2716 * no LSC interrupt
2717 */
Greg Rose92fe0bf2012-11-02 05:50:47 +00002718 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00002719
Greg Rose92fe0bf2012-11-02 05:50:47 +00002720 need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00002721
Greg Rose92fe0bf2012-11-02 05:50:47 +00002722 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00002723
Greg Rose92fe0bf2012-11-02 05:50:47 +00002724 if (need_reset) {
2725 adapter->link_up = link_up;
2726 adapter->link_speed = link_speed;
2727 netif_carrier_off(netdev);
2728 netif_tx_stop_all_queues(netdev);
2729 schedule_work(&adapter->reset_task);
2730 goto pf_has_reset;
Greg Rose92915f72010-01-09 02:24:10 +00002731 }
2732 adapter->link_up = link_up;
2733 adapter->link_speed = link_speed;
2734
2735 if (link_up) {
2736 if (!netif_carrier_ok(netdev)) {
Greg Roseb876a742013-01-19 06:40:22 +00002737 char *link_speed_string;
2738 switch (link_speed) {
2739 case IXGBE_LINK_SPEED_10GB_FULL:
2740 link_speed_string = "10 Gbps";
2741 break;
2742 case IXGBE_LINK_SPEED_1GB_FULL:
2743 link_speed_string = "1 Gbps";
2744 break;
2745 case IXGBE_LINK_SPEED_100_FULL:
2746 link_speed_string = "100 Mbps";
2747 break;
2748 default:
2749 link_speed_string = "unknown speed";
2750 break;
2751 }
Greg Rose6fe59672013-01-04 07:37:26 +00002752 dev_info(&adapter->pdev->dev,
Greg Roseb876a742013-01-19 06:40:22 +00002753 "NIC Link is Up, %s\n", link_speed_string);
Greg Rose92915f72010-01-09 02:24:10 +00002754 netif_carrier_on(netdev);
2755 netif_tx_wake_all_queues(netdev);
Greg Rose92915f72010-01-09 02:24:10 +00002756 }
2757 } else {
2758 adapter->link_up = false;
2759 adapter->link_speed = 0;
2760 if (netif_carrier_ok(netdev)) {
Greg Rose6fe59672013-01-04 07:37:26 +00002761 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
Greg Rose92915f72010-01-09 02:24:10 +00002762 netif_carrier_off(netdev);
2763 netif_tx_stop_all_queues(netdev);
2764 }
2765 }
2766
Greg Rose92915f72010-01-09 02:24:10 +00002767 ixgbevf_update_stats(adapter);
2768
Greg Rose33bd9f62010-03-19 02:59:52 +00002769pf_has_reset:
Greg Rose92915f72010-01-09 02:24:10 +00002770 /* Reset the timer */
Mark Rustad2e7cfbd2014-03-04 03:02:13 +00002771 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
2772 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
Greg Rose92915f72010-01-09 02:24:10 +00002773 mod_timer(&adapter->watchdog_timer,
2774 round_jiffies(jiffies + (2 * HZ)));
2775
2776 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2777}
2778
2779/**
2780 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
Greg Rose92915f72010-01-09 02:24:10 +00002781 * @tx_ring: Tx descriptor ring for a specific queue
2782 *
2783 * Free all transmit software resources
2784 **/
Emil Tantilov05d063a2014-01-17 18:29:59 -08002785void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
Greg Rose92915f72010-01-09 02:24:10 +00002786{
Emil Tantilov05d063a2014-01-17 18:29:59 -08002787 ixgbevf_clean_tx_ring(tx_ring);
Greg Rose92915f72010-01-09 02:24:10 +00002788
2789 vfree(tx_ring->tx_buffer_info);
2790 tx_ring->tx_buffer_info = NULL;
2791
Don Skidmorede02dec2014-01-16 02:30:09 -08002792 /* if not set, then don't free */
2793 if (!tx_ring->desc)
2794 return;
2795
Emil Tantilov05d063a2014-01-17 18:29:59 -08002796 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
Nick Nunley2a1f8792010-04-27 13:10:50 +00002797 tx_ring->dma);
Greg Rose92915f72010-01-09 02:24:10 +00002798
2799 tx_ring->desc = NULL;
2800}
2801
2802/**
2803 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2804 * @adapter: board private structure
2805 *
2806 * Free all transmit software resources
2807 **/
2808static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2809{
2810 int i;
2811
2812 for (i = 0; i < adapter->num_tx_queues; i++)
Don Skidmore87e70ab2014-01-16 02:30:08 -08002813 if (adapter->tx_ring[i]->desc)
Emil Tantilov05d063a2014-01-17 18:29:59 -08002814 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00002815}
2816
2817/**
2818 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
Greg Rose92915f72010-01-09 02:24:10 +00002819 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2820 *
2821 * Return 0 on success, negative on failure
2822 **/
Emil Tantilov05d063a2014-01-17 18:29:59 -08002823int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
Greg Rose92915f72010-01-09 02:24:10 +00002824{
Greg Rose92915f72010-01-09 02:24:10 +00002825 int size;
2826
2827 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00002828 tx_ring->tx_buffer_info = vzalloc(size);
Greg Rose92915f72010-01-09 02:24:10 +00002829 if (!tx_ring->tx_buffer_info)
2830 goto err;
Greg Rose92915f72010-01-09 02:24:10 +00002831
2832 /* round up to nearest 4K */
2833 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2834 tx_ring->size = ALIGN(tx_ring->size, 4096);
2835
Emil Tantilov05d063a2014-01-17 18:29:59 -08002836 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
Nick Nunley2a1f8792010-04-27 13:10:50 +00002837 &tx_ring->dma, GFP_KERNEL);
Greg Rose92915f72010-01-09 02:24:10 +00002838 if (!tx_ring->desc)
2839 goto err;
2840
Greg Rose92915f72010-01-09 02:24:10 +00002841 return 0;
2842
2843err:
2844 vfree(tx_ring->tx_buffer_info);
2845 tx_ring->tx_buffer_info = NULL;
2846 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2847 "descriptor ring\n");
2848 return -ENOMEM;
2849}
2850
2851/**
2852 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2853 * @adapter: board private structure
2854 *
2855 * If this function returns with an error, then it's possible one or
2856 * more of the rings is populated (while the rest are not). It is the
2857 * callers duty to clean those orphaned rings.
2858 *
2859 * Return 0 on success, negative on failure
2860 **/
2861static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2862{
2863 int i, err = 0;
2864
2865 for (i = 0; i < adapter->num_tx_queues; i++) {
Emil Tantilov05d063a2014-01-17 18:29:59 -08002866 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00002867 if (!err)
2868 continue;
2869 hw_dbg(&adapter->hw,
2870 "Allocation for Tx Queue %u failed\n", i);
2871 break;
2872 }
2873
2874 return err;
2875}
2876
2877/**
2878 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
Greg Rose92915f72010-01-09 02:24:10 +00002879 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2880 *
2881 * Returns 0 on success, negative on failure
2882 **/
Emil Tantilov05d063a2014-01-17 18:29:59 -08002883int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
Greg Rose92915f72010-01-09 02:24:10 +00002884{
Greg Rose92915f72010-01-09 02:24:10 +00002885 int size;
2886
2887 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00002888 rx_ring->rx_buffer_info = vzalloc(size);
Joe Perchese404dec2012-01-29 12:56:23 +00002889 if (!rx_ring->rx_buffer_info)
Emil Tantilov05d063a2014-01-17 18:29:59 -08002890 goto err;
Greg Rose92915f72010-01-09 02:24:10 +00002891
2892 /* Round up to nearest 4K */
2893 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2894 rx_ring->size = ALIGN(rx_ring->size, 4096);
2895
Emil Tantilov05d063a2014-01-17 18:29:59 -08002896 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
Nick Nunley2a1f8792010-04-27 13:10:50 +00002897 &rx_ring->dma, GFP_KERNEL);
Greg Rose92915f72010-01-09 02:24:10 +00002898
Emil Tantilov05d063a2014-01-17 18:29:59 -08002899 if (!rx_ring->desc)
2900 goto err;
Greg Rose92915f72010-01-09 02:24:10 +00002901
Greg Rose92915f72010-01-09 02:24:10 +00002902 return 0;
Emil Tantilov05d063a2014-01-17 18:29:59 -08002903err:
2904 vfree(rx_ring->rx_buffer_info);
2905 rx_ring->rx_buffer_info = NULL;
2906 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
Greg Rose92915f72010-01-09 02:24:10 +00002907 return -ENOMEM;
2908}
2909
2910/**
2911 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2912 * @adapter: board private structure
2913 *
2914 * If this function returns with an error, then it's possible one or
2915 * more of the rings is populated (while the rest are not). It is the
2916 * callers duty to clean those orphaned rings.
2917 *
2918 * Return 0 on success, negative on failure
2919 **/
2920static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2921{
2922 int i, err = 0;
2923
2924 for (i = 0; i < adapter->num_rx_queues; i++) {
Emil Tantilov05d063a2014-01-17 18:29:59 -08002925 err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00002926 if (!err)
2927 continue;
2928 hw_dbg(&adapter->hw,
2929 "Allocation for Rx Queue %u failed\n", i);
2930 break;
2931 }
2932 return err;
2933}
2934
2935/**
2936 * ixgbevf_free_rx_resources - Free Rx Resources
Greg Rose92915f72010-01-09 02:24:10 +00002937 * @rx_ring: ring to clean the resources from
2938 *
2939 * Free all receive software resources
2940 **/
Emil Tantilov05d063a2014-01-17 18:29:59 -08002941void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
Greg Rose92915f72010-01-09 02:24:10 +00002942{
Emil Tantilov05d063a2014-01-17 18:29:59 -08002943 ixgbevf_clean_rx_ring(rx_ring);
Greg Rose92915f72010-01-09 02:24:10 +00002944
2945 vfree(rx_ring->rx_buffer_info);
2946 rx_ring->rx_buffer_info = NULL;
2947
Emil Tantilov05d063a2014-01-17 18:29:59 -08002948 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
Nick Nunley2a1f8792010-04-27 13:10:50 +00002949 rx_ring->dma);
Greg Rose92915f72010-01-09 02:24:10 +00002950
2951 rx_ring->desc = NULL;
2952}
2953
2954/**
2955 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2956 * @adapter: board private structure
2957 *
2958 * Free all receive software resources
2959 **/
2960static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2961{
2962 int i;
2963
2964 for (i = 0; i < adapter->num_rx_queues; i++)
Don Skidmore87e70ab2014-01-16 02:30:08 -08002965 if (adapter->rx_ring[i]->desc)
Emil Tantilov05d063a2014-01-17 18:29:59 -08002966 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
Greg Rose92915f72010-01-09 02:24:10 +00002967}
2968
2969/**
2970 * ixgbevf_open - Called when a network interface is made active
2971 * @netdev: network interface device structure
2972 *
2973 * Returns 0 on success, negative value on failure
2974 *
2975 * The open entry point is called when a network interface is made
2976 * active by the system (IFF_UP). At this point all resources needed
2977 * for transmit and receive operations are allocated, the interrupt
2978 * handler is registered with the OS, the watchdog timer is started,
2979 * and the stack is notified that the interface is ready.
2980 **/
2981static int ixgbevf_open(struct net_device *netdev)
2982{
2983 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2984 struct ixgbe_hw *hw = &adapter->hw;
2985 int err;
2986
xunleera1f6c6b2013-03-05 07:44:20 +00002987 /* A previous failure to open the device because of a lack of
2988 * available MSIX vector resources may have reset the number
2989 * of msix vectors variable to zero. The only way to recover
2990 * is to unload/reload the driver and hope that the system has
2991 * been able to recover some MSIX vector resources.
2992 */
2993 if (!adapter->num_msix_vectors)
2994 return -ENOMEM;
2995
Greg Rose92915f72010-01-09 02:24:10 +00002996 if (hw->adapter_stopped) {
2997 ixgbevf_reset(adapter);
2998 /* if adapter is still stopped then PF isn't up and
2999 * the vf can't start. */
3000 if (hw->adapter_stopped) {
3001 err = IXGBE_ERR_MBX;
Jeff Kirsherdbd96362011-10-21 19:38:18 +00003002 pr_err("Unable to start - perhaps the PF Driver isn't "
3003 "up yet\n");
Greg Rose92915f72010-01-09 02:24:10 +00003004 goto err_setup_reset;
3005 }
3006 }
3007
Emil Tantilovd9bdb572015-01-28 03:21:18 +00003008 /* disallow open during test */
3009 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
3010 return -EBUSY;
3011
3012 netif_carrier_off(netdev);
3013
Greg Rose92915f72010-01-09 02:24:10 +00003014 /* allocate transmit descriptors */
3015 err = ixgbevf_setup_all_tx_resources(adapter);
3016 if (err)
3017 goto err_setup_tx;
3018
3019 /* allocate receive descriptors */
3020 err = ixgbevf_setup_all_rx_resources(adapter);
3021 if (err)
3022 goto err_setup_rx;
3023
3024 ixgbevf_configure(adapter);
3025
3026 /*
3027 * Map the Tx/Rx rings to the vectors we were allotted.
3028 * if request_irq will be called in this function map_rings
3029 * must be called *before* up_complete
3030 */
3031 ixgbevf_map_rings_to_vectors(adapter);
3032
Greg Rose92915f72010-01-09 02:24:10 +00003033 err = ixgbevf_request_irq(adapter);
3034 if (err)
3035 goto err_req_irq;
3036
Emil Tantilovd9bdb572015-01-28 03:21:18 +00003037 ixgbevf_up_complete(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00003038
3039 return 0;
3040
3041err_req_irq:
3042 ixgbevf_down(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00003043err_setup_rx:
3044 ixgbevf_free_all_rx_resources(adapter);
3045err_setup_tx:
3046 ixgbevf_free_all_tx_resources(adapter);
3047 ixgbevf_reset(adapter);
3048
3049err_setup_reset:
3050
3051 return err;
3052}
3053
3054/**
3055 * ixgbevf_close - Disables a network interface
3056 * @netdev: network interface device structure
3057 *
3058 * Returns 0, this is not allowed to fail
3059 *
3060 * The close entry point is called when an interface is de-activated
3061 * by the OS. The hardware is still under the drivers control, but
3062 * needs to be disabled. A global MAC reset is issued to stop the
3063 * hardware, and all transmit and receive resources are freed.
3064 **/
3065static int ixgbevf_close(struct net_device *netdev)
3066{
3067 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3068
3069 ixgbevf_down(adapter);
3070 ixgbevf_free_irq(adapter);
3071
3072 ixgbevf_free_all_tx_resources(adapter);
3073 ixgbevf_free_all_rx_resources(adapter);
3074
3075 return 0;
3076}
3077
Don Skidmore220fe052013-09-21 01:40:49 +00003078static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
3079{
3080 struct net_device *dev = adapter->netdev;
3081
3082 if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
3083 return;
3084
3085 adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
3086
3087 /* if interface is down do nothing */
3088 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3089 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3090 return;
3091
3092 /* Hardware has to reinitialize queues and interrupts to
3093 * match packet buffer alignment. Unfortunately, the
3094 * hardware is not flexible enough to do this dynamically.
3095 */
3096 if (netif_running(dev))
3097 ixgbevf_close(dev);
3098
3099 ixgbevf_clear_interrupt_scheme(adapter);
3100 ixgbevf_init_interrupt_scheme(adapter);
3101
3102 if (netif_running(dev))
3103 ixgbevf_open(dev);
3104}
3105
Alexander Duyck70a10e22012-05-11 08:33:21 +00003106static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
3107 u32 vlan_macip_lens, u32 type_tucmd,
3108 u32 mss_l4len_idx)
3109{
3110 struct ixgbe_adv_tx_context_desc *context_desc;
3111 u16 i = tx_ring->next_to_use;
3112
3113 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
3114
3115 i++;
3116 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3117
3118 /* set bits to identify this as an advanced context descriptor */
3119 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3120
3121 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3122 context_desc->seqnum_seed = 0;
3123 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
3124 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3125}
3126
3127static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003128 struct ixgbevf_tx_buffer *first,
3129 u8 *hdr_len)
Greg Rose92915f72010-01-09 02:24:10 +00003130{
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003131 struct sk_buff *skb = first->skb;
Alexander Duyck70a10e22012-05-11 08:33:21 +00003132 u32 vlan_macip_lens, type_tucmd;
Greg Rose92915f72010-01-09 02:24:10 +00003133 u32 mss_l4len_idx, l4len;
Francois Romieu8f12c032014-03-30 03:14:32 +00003134 int err;
Greg Rose92915f72010-01-09 02:24:10 +00003135
Emil Tantilov01a545c2014-02-27 20:32:45 -08003136 if (skb->ip_summed != CHECKSUM_PARTIAL)
3137 return 0;
3138
Alexander Duyck70a10e22012-05-11 08:33:21 +00003139 if (!skb_is_gso(skb))
3140 return 0;
Greg Rose92915f72010-01-09 02:24:10 +00003141
Francois Romieu8f12c032014-03-30 03:14:32 +00003142 err = skb_cow_head(skb, 0);
3143 if (err < 0)
3144 return err;
Greg Rose92915f72010-01-09 02:24:10 +00003145
Alexander Duyck70a10e22012-05-11 08:33:21 +00003146 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3147 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3148
Toshiaki Makita10e4fb32015-01-29 20:37:10 +09003149 if (first->protocol == htons(ETH_P_IP)) {
Alexander Duyck70a10e22012-05-11 08:33:21 +00003150 struct iphdr *iph = ip_hdr(skb);
3151 iph->tot_len = 0;
3152 iph->check = 0;
3153 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3154 iph->daddr, 0,
3155 IPPROTO_TCP,
3156 0);
3157 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003158 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3159 IXGBE_TX_FLAGS_CSUM |
3160 IXGBE_TX_FLAGS_IPV4;
Alexander Duyck70a10e22012-05-11 08:33:21 +00003161 } else if (skb_is_gso_v6(skb)) {
3162 ipv6_hdr(skb)->payload_len = 0;
3163 tcp_hdr(skb)->check =
3164 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3165 &ipv6_hdr(skb)->daddr,
3166 0, IPPROTO_TCP, 0);
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003167 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3168 IXGBE_TX_FLAGS_CSUM;
Alexander Duyck70a10e22012-05-11 08:33:21 +00003169 }
3170
3171 /* compute header lengths */
3172 l4len = tcp_hdrlen(skb);
3173 *hdr_len += l4len;
3174 *hdr_len = skb_transport_offset(skb) + l4len;
3175
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003176 /* update gso size and bytecount with header size */
3177 first->gso_segs = skb_shinfo(skb)->gso_segs;
3178 first->bytecount += (first->gso_segs - 1) * *hdr_len;
3179
Alexander Duyck70a10e22012-05-11 08:33:21 +00003180 /* mss_l4len_id: use 1 as index for TSO */
3181 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
3182 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
3183 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
3184
3185 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
3186 vlan_macip_lens = skb_network_header_len(skb);
3187 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003188 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
Alexander Duyck70a10e22012-05-11 08:33:21 +00003189
3190 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3191 type_tucmd, mss_l4len_idx);
3192
3193 return 1;
Greg Rose92915f72010-01-09 02:24:10 +00003194}
3195
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003196static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
3197 struct ixgbevf_tx_buffer *first)
Greg Rose92915f72010-01-09 02:24:10 +00003198{
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003199 struct sk_buff *skb = first->skb;
Alexander Duyck70a10e22012-05-11 08:33:21 +00003200 u32 vlan_macip_lens = 0;
3201 u32 mss_l4len_idx = 0;
3202 u32 type_tucmd = 0;
Greg Rose92915f72010-01-09 02:24:10 +00003203
Alexander Duyck70a10e22012-05-11 08:33:21 +00003204 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3205 u8 l4_hdr = 0;
Toshiaki Makita10e4fb32015-01-29 20:37:10 +09003206 switch (first->protocol) {
Joe Perches0933ce42014-03-13 05:19:30 +00003207 case htons(ETH_P_IP):
Alexander Duyck70a10e22012-05-11 08:33:21 +00003208 vlan_macip_lens |= skb_network_header_len(skb);
3209 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3210 l4_hdr = ip_hdr(skb)->protocol;
3211 break;
Joe Perches0933ce42014-03-13 05:19:30 +00003212 case htons(ETH_P_IPV6):
Alexander Duyck70a10e22012-05-11 08:33:21 +00003213 vlan_macip_lens |= skb_network_header_len(skb);
3214 l4_hdr = ipv6_hdr(skb)->nexthdr;
3215 break;
3216 default:
3217 if (unlikely(net_ratelimit())) {
3218 dev_warn(tx_ring->dev,
3219 "partial checksum but proto=%x!\n",
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003220 first->protocol);
Greg Rose92915f72010-01-09 02:24:10 +00003221 }
Alexander Duyck70a10e22012-05-11 08:33:21 +00003222 break;
Greg Rose92915f72010-01-09 02:24:10 +00003223 }
3224
Alexander Duyck70a10e22012-05-11 08:33:21 +00003225 switch (l4_hdr) {
3226 case IPPROTO_TCP:
3227 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3228 mss_l4len_idx = tcp_hdrlen(skb) <<
3229 IXGBE_ADVTXD_L4LEN_SHIFT;
3230 break;
3231 case IPPROTO_SCTP:
3232 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
3233 mss_l4len_idx = sizeof(struct sctphdr) <<
3234 IXGBE_ADVTXD_L4LEN_SHIFT;
3235 break;
3236 case IPPROTO_UDP:
3237 mss_l4len_idx = sizeof(struct udphdr) <<
3238 IXGBE_ADVTXD_L4LEN_SHIFT;
3239 break;
3240 default:
3241 if (unlikely(net_ratelimit())) {
3242 dev_warn(tx_ring->dev,
3243 "partial checksum but l4 proto=%x!\n",
3244 l4_hdr);
3245 }
3246 break;
3247 }
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003248
3249 /* update TX checksum flag */
3250 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
Greg Rose92915f72010-01-09 02:24:10 +00003251 }
3252
Alexander Duyck70a10e22012-05-11 08:33:21 +00003253 /* vlan_macip_lens: MACLEN, VLAN tag */
3254 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003255 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
Alexander Duyck70a10e22012-05-11 08:33:21 +00003256
3257 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3258 type_tucmd, mss_l4len_idx);
Greg Rose92915f72010-01-09 02:24:10 +00003259}
3260
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003261static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
3262{
3263 /* set type for advanced descriptor with frame checksum insertion */
3264 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
3265 IXGBE_ADVTXD_DCMD_IFCS |
3266 IXGBE_ADVTXD_DCMD_DEXT);
3267
3268 /* set HW vlan bit if vlan is present */
3269 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3270 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
3271
3272 /* set segmentation enable bits for TSO/FSO */
3273 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3274 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
3275
3276 return cmd_type;
3277}
3278
3279static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
3280 u32 tx_flags, unsigned int paylen)
3281{
3282 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
3283
3284 /* enable L4 checksum for TSO and TX checksum offload */
3285 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3286 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
3287
3288 /* enble IPv4 checksum for TSO */
3289 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3290 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
3291
3292 /* use index 1 context for TSO/FSO/FCOE */
3293 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3294 olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
3295
3296 /* Check Context must be set if Tx switch is enabled, which it
3297 * always is for case where virtual functions are running
3298 */
3299 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
3300
3301 tx_desc->read.olinfo_status = olinfo_status;
3302}
3303
3304static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3305 struct ixgbevf_tx_buffer *first,
3306 const u8 hdr_len)
Greg Rose92915f72010-01-09 02:24:10 +00003307{
Emil Tantilov9bdfefd2014-01-17 18:30:04 -08003308 dma_addr_t dma;
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003309 struct sk_buff *skb = first->skb;
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003310 struct ixgbevf_tx_buffer *tx_buffer;
3311 union ixgbe_adv_tx_desc *tx_desc;
3312 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
3313 unsigned int data_len = skb->data_len;
3314 unsigned int size = skb_headlen(skb);
3315 unsigned int paylen = skb->len - hdr_len;
3316 u32 tx_flags = first->tx_flags;
3317 __le32 cmd_type;
3318 u16 i = tx_ring->next_to_use;
Greg Rose92915f72010-01-09 02:24:10 +00003319
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003320 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
Greg Rose92915f72010-01-09 02:24:10 +00003321
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003322 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen);
3323 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
Greg Rose92915f72010-01-09 02:24:10 +00003324
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003325 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3326 if (dma_mapping_error(tx_ring->dev, dma))
3327 goto dma_error;
3328
3329 /* record length, and DMA address */
3330 dma_unmap_len_set(first, len, size);
3331 dma_unmap_addr_set(first, dma, dma);
3332
3333 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3334
3335 for (;;) {
3336 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
3337 tx_desc->read.cmd_type_len =
3338 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
3339
3340 i++;
3341 tx_desc++;
3342 if (i == tx_ring->count) {
3343 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3344 i = 0;
3345 }
3346
3347 dma += IXGBE_MAX_DATA_PER_TXD;
3348 size -= IXGBE_MAX_DATA_PER_TXD;
3349
3350 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3351 tx_desc->read.olinfo_status = 0;
3352 }
3353
3354 if (likely(!data_len))
3355 break;
3356
3357 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
3358
3359 i++;
3360 tx_desc++;
3361 if (i == tx_ring->count) {
3362 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3363 i = 0;
3364 }
3365
3366 size = skb_frag_size(frag);
3367 data_len -= size;
3368
3369 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3370 DMA_TO_DEVICE);
Emil Tantilov9bdfefd2014-01-17 18:30:04 -08003371 if (dma_mapping_error(tx_ring->dev, dma))
Greg Rose92915f72010-01-09 02:24:10 +00003372 goto dma_error;
Greg Rose92915f72010-01-09 02:24:10 +00003373
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003374 tx_buffer = &tx_ring->tx_buffer_info[i];
3375 dma_unmap_len_set(tx_buffer, len, size);
3376 dma_unmap_addr_set(tx_buffer, dma, dma);
Emil Tantilov9bdfefd2014-01-17 18:30:04 -08003377
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003378 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3379 tx_desc->read.olinfo_status = 0;
3380
3381 frag++;
Greg Rose92915f72010-01-09 02:24:10 +00003382 }
3383
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003384 /* write last descriptor with RS and EOP bits */
3385 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
3386 tx_desc->read.cmd_type_len = cmd_type;
Greg Rose92915f72010-01-09 02:24:10 +00003387
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003388 /* set the timestamp */
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003389 first->time_stamp = jiffies;
Greg Rose92915f72010-01-09 02:24:10 +00003390
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003391 /* Force memory writes to complete before letting h/w know there
3392 * are new descriptors to fetch. (Only applicable for weak-ordered
3393 * memory model archs, such as IA-64).
3394 *
3395 * We also need this memory barrier (wmb) to make certain all of the
3396 * status bits have been updated before next_to_watch is written.
3397 */
3398 wmb();
Greg Rose92915f72010-01-09 02:24:10 +00003399
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003400 /* set next_to_watch value indicating a packet is present */
3401 first->next_to_watch = tx_desc;
3402
3403 i++;
3404 if (i == tx_ring->count)
3405 i = 0;
3406
3407 tx_ring->next_to_use = i;
3408
3409 /* notify HW of packet */
Mark Rustad06380db2014-03-04 03:02:23 +00003410 ixgbevf_write_tail(tx_ring, i);
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003411
3412 return;
Greg Rose92915f72010-01-09 02:24:10 +00003413dma_error:
Alexander Duyck70a10e22012-05-11 08:33:21 +00003414 dev_err(tx_ring->dev, "TX DMA map failed\n");
Greg Rose92915f72010-01-09 02:24:10 +00003415
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003416 /* clear dma mappings for failed tx_buffer_info map */
3417 for (;;) {
3418 tx_buffer = &tx_ring->tx_buffer_info[i];
3419 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
3420 if (tx_buffer == first)
3421 break;
3422 if (i == 0)
3423 i = tx_ring->count;
Greg Rose92915f72010-01-09 02:24:10 +00003424 i--;
Greg Rose92915f72010-01-09 02:24:10 +00003425 }
3426
Greg Rose92915f72010-01-09 02:24:10 +00003427 tx_ring->next_to_use = i;
Greg Rose92915f72010-01-09 02:24:10 +00003428}
3429
Alexander Duyckfb401952012-05-11 08:33:16 +00003430static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
Greg Rose92915f72010-01-09 02:24:10 +00003431{
Alexander Duyckfb401952012-05-11 08:33:16 +00003432 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
Greg Rose92915f72010-01-09 02:24:10 +00003433 /* Herbert's original patch had:
3434 * smp_mb__after_netif_stop_queue();
3435 * but since that doesn't exist yet, just open code it. */
3436 smp_mb();
3437
3438 /* We need to check again in a case another CPU has just
3439 * made room available. */
Don Skidmoref880d072013-10-23 02:17:52 +00003440 if (likely(ixgbevf_desc_unused(tx_ring) < size))
Greg Rose92915f72010-01-09 02:24:10 +00003441 return -EBUSY;
3442
3443 /* A reprieve! - use start_queue because it doesn't call schedule */
Alexander Duyckfb401952012-05-11 08:33:16 +00003444 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
Emil Tantilov095e2612014-01-17 18:30:00 -08003445 ++tx_ring->tx_stats.restart_queue;
3446
Greg Rose92915f72010-01-09 02:24:10 +00003447 return 0;
3448}
3449
Alexander Duyckfb401952012-05-11 08:33:16 +00003450static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
Greg Rose92915f72010-01-09 02:24:10 +00003451{
Don Skidmoref880d072013-10-23 02:17:52 +00003452 if (likely(ixgbevf_desc_unused(tx_ring) >= size))
Greg Rose92915f72010-01-09 02:24:10 +00003453 return 0;
Alexander Duyckfb401952012-05-11 08:33:16 +00003454 return __ixgbevf_maybe_stop_tx(tx_ring, size);
Greg Rose92915f72010-01-09 02:24:10 +00003455}
3456
3457static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3458{
3459 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003460 struct ixgbevf_tx_buffer *first;
Greg Rose92915f72010-01-09 02:24:10 +00003461 struct ixgbevf_ring *tx_ring;
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003462 int tso;
3463 u32 tx_flags = 0;
Alexander Duyck35959902012-05-11 08:32:40 +00003464 u16 count = TXD_USE_COUNT(skb_headlen(skb));
3465#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3466 unsigned short f;
3467#endif
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003468 u8 hdr_len = 0;
Greg Rosef9d08f162012-10-02 00:50:52 +00003469 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003470
Ben Hutchings46acc462012-11-01 09:11:11 +00003471 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
Greg Rosef9d08f162012-10-02 00:50:52 +00003472 dev_kfree_skb(skb);
3473 return NETDEV_TX_OK;
3474 }
Greg Rose92915f72010-01-09 02:24:10 +00003475
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003476 tx_ring = adapter->tx_ring[skb->queue_mapping];
Greg Rose92915f72010-01-09 02:24:10 +00003477
Alexander Duyck35959902012-05-11 08:32:40 +00003478 /*
3479 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3480 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3481 * + 2 desc gap to keep tail from touching head,
3482 * + 1 desc for context descriptor,
3483 * otherwise try next time
3484 */
3485#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3486 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3487 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3488#else
3489 count += skb_shinfo(skb)->nr_frags;
3490#endif
Alexander Duyckfb401952012-05-11 08:33:16 +00003491 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
Emil Tantilov095e2612014-01-17 18:30:00 -08003492 tx_ring->tx_stats.tx_busy++;
Alexander Duyck35959902012-05-11 08:32:40 +00003493 return NETDEV_TX_BUSY;
3494 }
3495
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003496 /* record the location of the first descriptor for this packet */
3497 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
3498 first->skb = skb;
3499 first->bytecount = skb->len;
3500 first->gso_segs = 1;
3501
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01003502 if (skb_vlan_tag_present(skb)) {
3503 tx_flags |= skb_vlan_tag_get(skb);
Greg Rose92915f72010-01-09 02:24:10 +00003504 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3505 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3506 }
3507
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003508 /* record initial flags and protocol */
3509 first->tx_flags = tx_flags;
3510 first->protocol = vlan_get_protocol(skb);
Greg Rose92915f72010-01-09 02:24:10 +00003511
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003512 tso = ixgbevf_tso(tx_ring, first, &hdr_len);
3513 if (tso < 0)
3514 goto out_drop;
Emil Tantilovb5d217f2014-02-27 20:32:44 -08003515 else if (!tso)
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003516 ixgbevf_tx_csum(tx_ring, first);
Greg Rose92915f72010-01-09 02:24:10 +00003517
Emil Tantilov29d37fa2014-01-17 18:30:05 -08003518 ixgbevf_tx_map(tx_ring, first, hdr_len);
Greg Rose92915f72010-01-09 02:24:10 +00003519
Alexander Duyckfb401952012-05-11 08:33:16 +00003520 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
Greg Rose92915f72010-01-09 02:24:10 +00003521
3522 return NETDEV_TX_OK;
Emil Tantilov7ad1a092014-01-17 18:30:03 -08003523
3524out_drop:
3525 dev_kfree_skb_any(first->skb);
3526 first->skb = NULL;
3527
3528 return NETDEV_TX_OK;
Greg Rose92915f72010-01-09 02:24:10 +00003529}
3530
3531/**
Greg Rose92915f72010-01-09 02:24:10 +00003532 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3533 * @netdev: network interface device structure
3534 * @p: pointer to an address structure
3535 *
3536 * Returns 0 on success, negative on failure
3537 **/
3538static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3539{
3540 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3541 struct ixgbe_hw *hw = &adapter->hw;
3542 struct sockaddr *addr = p;
3543
3544 if (!is_valid_ether_addr(addr->sa_data))
3545 return -EADDRNOTAVAIL;
3546
3547 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3548 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3549
John Fastabend55fdd45b2012-10-01 14:52:20 +00003550 spin_lock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00003551
Greg Rose92fe0bf2012-11-02 05:50:47 +00003552 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
Greg Rose92915f72010-01-09 02:24:10 +00003553
John Fastabend55fdd45b2012-10-01 14:52:20 +00003554 spin_unlock_bh(&adapter->mbx_lock);
Alexander Duyck1c55ed72012-05-11 08:33:06 +00003555
Greg Rose92915f72010-01-09 02:24:10 +00003556 return 0;
3557}
3558
3559/**
3560 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3561 * @netdev: network interface device structure
3562 * @new_mtu: new value for maximum frame size
3563 *
3564 * Returns 0 on success, negative on failure
3565 **/
3566static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3567{
3568 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
Emil Tantilovbad17232014-11-21 02:57:15 +00003569 struct ixgbe_hw *hw = &adapter->hw;
Greg Rose92915f72010-01-09 02:24:10 +00003570 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
Greg Rose69bfbec2011-01-26 01:06:12 +00003571 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
Greg Rose69bfbec2011-01-26 01:06:12 +00003572
Alexander Duyck56e94092012-07-20 08:10:03 +00003573 switch (adapter->hw.api_version) {
3574 case ixgbe_mbox_api_11:
Greg Rose69bfbec2011-01-26 01:06:12 +00003575 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
Alexander Duyck56e94092012-07-20 08:10:03 +00003576 break;
3577 default:
Emil Tantilov47068b02014-11-22 07:59:56 +00003578 if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
Alexander Duyck56e94092012-07-20 08:10:03 +00003579 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3580 break;
3581 }
Greg Rose92915f72010-01-09 02:24:10 +00003582
3583 /* MTU < 68 is an error and causes problems on some kernels */
Greg Rose69bfbec2011-01-26 01:06:12 +00003584 if ((new_mtu < 68) || (max_frame > max_possible_frame))
Greg Rose92915f72010-01-09 02:24:10 +00003585 return -EINVAL;
3586
Emil Tantilovbad17232014-11-21 02:57:15 +00003587 hw_dbg(hw, "changing MTU from %d to %d\n",
Greg Rose92915f72010-01-09 02:24:10 +00003588 netdev->mtu, new_mtu);
3589 /* must set new MTU before calling down or up */
3590 netdev->mtu = new_mtu;
3591
Emil Tantilovbad17232014-11-21 02:57:15 +00003592 /* notify the PF of our intent to use this size of frame */
3593 ixgbevf_rlpml_set_vf(hw, max_frame);
Greg Rose92915f72010-01-09 02:24:10 +00003594
3595 return 0;
3596}
3597
Emil Tantilov688ff322014-11-08 01:39:56 +00003598#ifdef CONFIG_NET_POLL_CONTROLLER
3599/* Polling 'interrupt' - used by things like netconsole to send skbs
3600 * without having to re-enable interrupts. It's not called while
3601 * the interrupt routine is executing.
3602 */
3603static void ixgbevf_netpoll(struct net_device *netdev)
3604{
3605 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3606 int i;
3607
3608 /* if interface is down do nothing */
3609 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
3610 return;
3611 for (i = 0; i < adapter->num_rx_queues; i++)
3612 ixgbevf_msix_clean_rings(0, adapter->q_vector[i]);
3613}
3614#endif /* CONFIG_NET_POLL_CONTROLLER */
3615
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003616static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
Greg Rose92915f72010-01-09 02:24:10 +00003617{
3618 struct net_device *netdev = pci_get_drvdata(pdev);
3619 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003620#ifdef CONFIG_PM
3621 int retval = 0;
3622#endif
Greg Rose92915f72010-01-09 02:24:10 +00003623
3624 netif_device_detach(netdev);
3625
3626 if (netif_running(netdev)) {
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003627 rtnl_lock();
Greg Rose92915f72010-01-09 02:24:10 +00003628 ixgbevf_down(adapter);
3629 ixgbevf_free_irq(adapter);
3630 ixgbevf_free_all_tx_resources(adapter);
3631 ixgbevf_free_all_rx_resources(adapter);
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003632 rtnl_unlock();
Greg Rose92915f72010-01-09 02:24:10 +00003633 }
3634
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003635 ixgbevf_clear_interrupt_scheme(adapter);
3636
3637#ifdef CONFIG_PM
3638 retval = pci_save_state(pdev);
3639 if (retval)
3640 return retval;
3641
3642#endif
Mark Rustadbc0c7152014-03-12 00:38:45 +00003643 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3644 pci_disable_device(pdev);
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003645
3646 return 0;
3647}
3648
3649#ifdef CONFIG_PM
3650static int ixgbevf_resume(struct pci_dev *pdev)
3651{
Wei Yongjun27ae2962014-01-16 02:30:07 -08003652 struct net_device *netdev = pci_get_drvdata(pdev);
3653 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003654 u32 err;
3655
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003656 pci_restore_state(pdev);
3657 /*
3658 * pci_restore_state clears dev->state_saved so call
3659 * pci_save_state to restore it.
3660 */
Greg Rose92915f72010-01-09 02:24:10 +00003661 pci_save_state(pdev);
Greg Rose92915f72010-01-09 02:24:10 +00003662
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003663 err = pci_enable_device_mem(pdev);
3664 if (err) {
3665 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3666 return err;
3667 }
Peter Zijlstra4e857c52014-03-17 18:06:10 +01003668 smp_mb__before_atomic();
Mark Rustadbc0c7152014-03-12 00:38:45 +00003669 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003670 pci_set_master(pdev);
3671
Don Skidmore798e3812013-10-01 04:33:51 -07003672 ixgbevf_reset(adapter);
3673
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003674 rtnl_lock();
3675 err = ixgbevf_init_interrupt_scheme(adapter);
3676 rtnl_unlock();
3677 if (err) {
3678 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3679 return err;
3680 }
3681
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003682 if (netif_running(netdev)) {
3683 err = ixgbevf_open(netdev);
3684 if (err)
3685 return err;
3686 }
3687
3688 netif_device_attach(netdev);
3689
3690 return err;
3691}
3692
3693#endif /* CONFIG_PM */
3694static void ixgbevf_shutdown(struct pci_dev *pdev)
3695{
3696 ixgbevf_suspend(pdev, PMSG_SUSPEND);
Greg Rose92915f72010-01-09 02:24:10 +00003697}
3698
Eric Dumazet4197aa72011-06-22 05:01:35 +00003699static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3700 struct rtnl_link_stats64 *stats)
3701{
3702 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3703 unsigned int start;
3704 u64 bytes, packets;
3705 const struct ixgbevf_ring *ring;
3706 int i;
3707
3708 ixgbevf_update_stats(adapter);
3709
3710 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3711
3712 for (i = 0; i < adapter->num_rx_queues; i++) {
Don Skidmore87e70ab2014-01-16 02:30:08 -08003713 ring = adapter->rx_ring[i];
Eric Dumazet4197aa72011-06-22 05:01:35 +00003714 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07003715 start = u64_stats_fetch_begin_irq(&ring->syncp);
Emil Tantilov095e2612014-01-17 18:30:00 -08003716 bytes = ring->stats.bytes;
3717 packets = ring->stats.packets;
Eric W. Biederman57a77442014-03-13 21:26:42 -07003718 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
Eric Dumazet4197aa72011-06-22 05:01:35 +00003719 stats->rx_bytes += bytes;
3720 stats->rx_packets += packets;
3721 }
3722
3723 for (i = 0; i < adapter->num_tx_queues; i++) {
Don Skidmore87e70ab2014-01-16 02:30:08 -08003724 ring = adapter->tx_ring[i];
Eric Dumazet4197aa72011-06-22 05:01:35 +00003725 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07003726 start = u64_stats_fetch_begin_irq(&ring->syncp);
Emil Tantilov095e2612014-01-17 18:30:00 -08003727 bytes = ring->stats.bytes;
3728 packets = ring->stats.packets;
Eric W. Biederman57a77442014-03-13 21:26:42 -07003729 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
Eric Dumazet4197aa72011-06-22 05:01:35 +00003730 stats->tx_bytes += bytes;
3731 stats->tx_packets += packets;
3732 }
3733
3734 return stats;
3735}
3736
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003737static const struct net_device_ops ixgbevf_netdev_ops = {
Stephen Hemmingerc12db762011-06-09 02:58:39 +00003738 .ndo_open = ixgbevf_open,
3739 .ndo_stop = ixgbevf_close,
3740 .ndo_start_xmit = ixgbevf_xmit_frame,
3741 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
Eric Dumazet4197aa72011-06-22 05:01:35 +00003742 .ndo_get_stats64 = ixgbevf_get_stats,
Greg Rose92915f72010-01-09 02:24:10 +00003743 .ndo_validate_addr = eth_validate_addr,
Stephen Hemmingerc12db762011-06-09 02:58:39 +00003744 .ndo_set_mac_address = ixgbevf_set_mac,
3745 .ndo_change_mtu = ixgbevf_change_mtu,
3746 .ndo_tx_timeout = ixgbevf_tx_timeout,
Stephen Hemmingerc12db762011-06-09 02:58:39 +00003747 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3748 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
Jacob Kellerc777cdf2013-09-21 06:24:20 +00003749#ifdef CONFIG_NET_RX_BUSY_POLL
3750 .ndo_busy_poll = ixgbevf_busy_poll_recv,
3751#endif
Emil Tantilov688ff322014-11-08 01:39:56 +00003752#ifdef CONFIG_NET_POLL_CONTROLLER
3753 .ndo_poll_controller = ixgbevf_netpoll,
3754#endif
Greg Rose92915f72010-01-09 02:24:10 +00003755};
Greg Rose92915f72010-01-09 02:24:10 +00003756
3757static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3758{
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003759 dev->netdev_ops = &ixgbevf_netdev_ops;
Greg Rose92915f72010-01-09 02:24:10 +00003760 ixgbevf_set_ethtool_ops(dev);
3761 dev->watchdog_timeo = 5 * HZ;
3762}
3763
3764/**
3765 * ixgbevf_probe - Device Initialization Routine
3766 * @pdev: PCI device information struct
3767 * @ent: entry in ixgbevf_pci_tbl
3768 *
3769 * Returns 0 on success, negative on failure
3770 *
3771 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3772 * The OS initialization, configuring of the adapter private structure,
3773 * and a hardware reset occur.
3774 **/
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00003775static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
Greg Rose92915f72010-01-09 02:24:10 +00003776{
3777 struct net_device *netdev;
3778 struct ixgbevf_adapter *adapter = NULL;
3779 struct ixgbe_hw *hw = NULL;
3780 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
Greg Rose92915f72010-01-09 02:24:10 +00003781 int err, pci_using_dac;
Emil Tantilov03334642014-12-05 04:32:44 +00003782 bool disable_dev = false;
Greg Rose92915f72010-01-09 02:24:10 +00003783
3784 err = pci_enable_device(pdev);
3785 if (err)
3786 return err;
3787
Russell King53567aa2013-06-10 12:49:38 +01003788 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
Greg Rose92915f72010-01-09 02:24:10 +00003789 pci_using_dac = 1;
3790 } else {
Russell King53567aa2013-06-10 12:49:38 +01003791 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Greg Rose92915f72010-01-09 02:24:10 +00003792 if (err) {
Russell King53567aa2013-06-10 12:49:38 +01003793 dev_err(&pdev->dev, "No usable DMA "
3794 "configuration, aborting\n");
3795 goto err_dma;
Greg Rose92915f72010-01-09 02:24:10 +00003796 }
3797 pci_using_dac = 0;
3798 }
3799
3800 err = pci_request_regions(pdev, ixgbevf_driver_name);
3801 if (err) {
3802 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3803 goto err_pci_reg;
3804 }
3805
3806 pci_set_master(pdev);
3807
Greg Rose92915f72010-01-09 02:24:10 +00003808 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3809 MAX_TX_QUEUES);
Greg Rose92915f72010-01-09 02:24:10 +00003810 if (!netdev) {
3811 err = -ENOMEM;
3812 goto err_alloc_etherdev;
3813 }
3814
3815 SET_NETDEV_DEV(netdev, &pdev->dev);
3816
Greg Rose92915f72010-01-09 02:24:10 +00003817 adapter = netdev_priv(netdev);
3818
3819 adapter->netdev = netdev;
3820 adapter->pdev = pdev;
3821 hw = &adapter->hw;
3822 hw->back = adapter;
stephen hemmingerb3f4d592012-03-13 06:04:20 +00003823 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
Greg Rose92915f72010-01-09 02:24:10 +00003824
3825 /*
3826 * call save state here in standalone driver because it relies on
3827 * adapter struct to exist, and needs to call netdev_priv
3828 */
3829 pci_save_state(pdev);
3830
3831 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3832 pci_resource_len(pdev, 0));
Mark Rustaddbf8b0d2014-03-04 03:02:34 +00003833 adapter->io_addr = hw->hw_addr;
Greg Rose92915f72010-01-09 02:24:10 +00003834 if (!hw->hw_addr) {
3835 err = -EIO;
3836 goto err_ioremap;
3837 }
3838
3839 ixgbevf_assign_netdev_ops(netdev);
3840
Greg Rose92915f72010-01-09 02:24:10 +00003841 /* Setup hw api */
3842 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3843 hw->mac.type = ii->mac;
3844
3845 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
Greg Rosef416dfc2011-06-08 07:32:38 +00003846 sizeof(struct ixgbe_mbx_operations));
Greg Rose92915f72010-01-09 02:24:10 +00003847
Greg Rose92915f72010-01-09 02:24:10 +00003848 /* setup the private structure */
3849 err = ixgbevf_sw_init(adapter);
Danny Kukawka1a0d6ae2012-02-09 09:48:54 +00003850 if (err)
3851 goto err_sw_init;
3852
3853 /* The HW MAC address was set and/or determined in sw_init */
Danny Kukawka1a0d6ae2012-02-09 09:48:54 +00003854 if (!is_valid_ether_addr(netdev->dev_addr)) {
3855 pr_err("invalid MAC address\n");
3856 err = -EIO;
3857 goto err_sw_init;
3858 }
Greg Rose92915f72010-01-09 02:24:10 +00003859
Michał Mirosław471a76d2011-06-08 08:53:03 +00003860 netdev->hw_features = NETIF_F_SG |
Greg Rose92915f72010-01-09 02:24:10 +00003861 NETIF_F_IP_CSUM |
Michał Mirosław471a76d2011-06-08 08:53:03 +00003862 NETIF_F_IPV6_CSUM |
3863 NETIF_F_TSO |
3864 NETIF_F_TSO6 |
3865 NETIF_F_RXCSUM;
3866
3867 netdev->features = netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00003868 NETIF_F_HW_VLAN_CTAG_TX |
3869 NETIF_F_HW_VLAN_CTAG_RX |
3870 NETIF_F_HW_VLAN_CTAG_FILTER;
Greg Rose92915f72010-01-09 02:24:10 +00003871
Emil Tantilov39f35a32015-01-28 03:21:13 +00003872 netdev->vlan_features |= NETIF_F_TSO |
3873 NETIF_F_TSO6 |
3874 NETIF_F_IP_CSUM |
3875 NETIF_F_IPV6_CSUM |
3876 NETIF_F_SG;
Greg Rose92915f72010-01-09 02:24:10 +00003877
3878 if (pci_using_dac)
3879 netdev->features |= NETIF_F_HIGHDMA;
3880
Jiri Pirko01789342011-08-16 06:29:00 +00003881 netdev->priv_flags |= IFF_UNICAST_FLT;
3882
Greg Rose92915f72010-01-09 02:24:10 +00003883 init_timer(&adapter->watchdog_timer);
Joe Perchesc061b182010-08-23 18:20:03 +00003884 adapter->watchdog_timer.function = ixgbevf_watchdog;
Greg Rose92915f72010-01-09 02:24:10 +00003885 adapter->watchdog_timer.data = (unsigned long)adapter;
3886
Mark Rustadea699562014-03-12 00:38:51 +00003887 if (IXGBE_REMOVED(hw->hw_addr)) {
3888 err = -EIO;
3889 goto err_sw_init;
3890 }
Greg Rose92915f72010-01-09 02:24:10 +00003891 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3892 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
Mark Rustadea699562014-03-12 00:38:51 +00003893 set_bit(__IXGBEVF_WORK_INIT, &adapter->state);
Greg Rose92915f72010-01-09 02:24:10 +00003894
3895 err = ixgbevf_init_interrupt_scheme(adapter);
3896 if (err)
3897 goto err_sw_init;
3898
Greg Rose92915f72010-01-09 02:24:10 +00003899 strcpy(netdev->name, "eth%d");
3900
3901 err = register_netdev(netdev);
3902 if (err)
3903 goto err_register;
3904
Emil Tantilov03334642014-12-05 04:32:44 +00003905 pci_set_drvdata(pdev, netdev);
Greg Rose5d426ad2010-11-16 19:27:19 -08003906 netif_carrier_off(netdev);
3907
Greg Rose33bd9f62010-03-19 02:59:52 +00003908 ixgbevf_init_last_counter_stats(adapter);
3909
Emil Tantilov47068b02014-11-22 07:59:56 +00003910 /* print the VF info */
3911 dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
3912 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
Greg Rose92915f72010-01-09 02:24:10 +00003913
Emil Tantilov47068b02014-11-22 07:59:56 +00003914 switch (hw->mac.type) {
3915 case ixgbe_mac_X550_vf:
3916 dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n");
3917 break;
3918 case ixgbe_mac_X540_vf:
3919 dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
3920 break;
3921 case ixgbe_mac_82599_vf:
3922 default:
3923 dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
3924 break;
3925 }
Greg Rose92915f72010-01-09 02:24:10 +00003926
Greg Rose92915f72010-01-09 02:24:10 +00003927 return 0;
3928
3929err_register:
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003930 ixgbevf_clear_interrupt_scheme(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00003931err_sw_init:
3932 ixgbevf_reset_interrupt_capability(adapter);
Mark Rustaddbf8b0d2014-03-04 03:02:34 +00003933 iounmap(adapter->io_addr);
Greg Rose92915f72010-01-09 02:24:10 +00003934err_ioremap:
Emil Tantilov03334642014-12-05 04:32:44 +00003935 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
Greg Rose92915f72010-01-09 02:24:10 +00003936 free_netdev(netdev);
3937err_alloc_etherdev:
3938 pci_release_regions(pdev);
3939err_pci_reg:
3940err_dma:
Emil Tantilov03334642014-12-05 04:32:44 +00003941 if (!adapter || disable_dev)
Mark Rustadbc0c7152014-03-12 00:38:45 +00003942 pci_disable_device(pdev);
Greg Rose92915f72010-01-09 02:24:10 +00003943 return err;
3944}
3945
3946/**
3947 * ixgbevf_remove - Device Removal Routine
3948 * @pdev: PCI device information struct
3949 *
3950 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3951 * that it should release a PCI device. The could be caused by a
3952 * Hot-Plug event, or because the driver is going to be removed from
3953 * memory.
3954 **/
Bill Pemberton9f9a12f2012-12-03 09:24:25 -05003955static void ixgbevf_remove(struct pci_dev *pdev)
Greg Rose92915f72010-01-09 02:24:10 +00003956{
3957 struct net_device *netdev = pci_get_drvdata(pdev);
Emil Tantilov03334642014-12-05 04:32:44 +00003958 struct ixgbevf_adapter *adapter;
3959 bool disable_dev;
3960
3961 if (!netdev)
3962 return;
3963
3964 adapter = netdev_priv(netdev);
Greg Rose92915f72010-01-09 02:24:10 +00003965
Mark Rustad2e7cfbd2014-03-04 03:02:13 +00003966 set_bit(__IXGBEVF_REMOVING, &adapter->state);
Greg Rose92915f72010-01-09 02:24:10 +00003967
3968 del_timer_sync(&adapter->watchdog_timer);
3969
Tejun Heo23f333a2010-12-12 16:45:14 +01003970 cancel_work_sync(&adapter->reset_task);
Greg Rose92915f72010-01-09 02:24:10 +00003971 cancel_work_sync(&adapter->watchdog_task);
3972
Alexander Duyckfd13a9a2012-05-11 08:32:24 +00003973 if (netdev->reg_state == NETREG_REGISTERED)
Greg Rose92915f72010-01-09 02:24:10 +00003974 unregister_netdev(netdev);
Greg Rose92915f72010-01-09 02:24:10 +00003975
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00003976 ixgbevf_clear_interrupt_scheme(adapter);
Greg Rose92915f72010-01-09 02:24:10 +00003977 ixgbevf_reset_interrupt_capability(adapter);
3978
Mark Rustaddbf8b0d2014-03-04 03:02:34 +00003979 iounmap(adapter->io_addr);
Greg Rose92915f72010-01-09 02:24:10 +00003980 pci_release_regions(pdev);
3981
3982 hw_dbg(&adapter->hw, "Remove complete\n");
3983
Emil Tantilov03334642014-12-05 04:32:44 +00003984 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
Greg Rose92915f72010-01-09 02:24:10 +00003985 free_netdev(netdev);
3986
Emil Tantilov03334642014-12-05 04:32:44 +00003987 if (disable_dev)
Mark Rustadbc0c7152014-03-12 00:38:45 +00003988 pci_disable_device(pdev);
Greg Rose92915f72010-01-09 02:24:10 +00003989}
3990
Alexander Duyck9f19f312012-05-11 08:33:32 +00003991/**
3992 * ixgbevf_io_error_detected - called when PCI error is detected
3993 * @pdev: Pointer to PCI device
3994 * @state: The current pci connection state
3995 *
3996 * This function is called after a PCI bus error affecting
3997 * this device has been detected.
3998 */
3999static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
4000 pci_channel_state_t state)
4001{
4002 struct net_device *netdev = pci_get_drvdata(pdev);
4003 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4004
Mark Rustadea699562014-03-12 00:38:51 +00004005 if (!test_bit(__IXGBEVF_WORK_INIT, &adapter->state))
4006 return PCI_ERS_RESULT_DISCONNECT;
4007
Mark Rustadbc0c7152014-03-12 00:38:45 +00004008 rtnl_lock();
Alexander Duyck9f19f312012-05-11 08:33:32 +00004009 netif_device_detach(netdev);
4010
Mark Rustadbc0c7152014-03-12 00:38:45 +00004011 if (state == pci_channel_io_perm_failure) {
4012 rtnl_unlock();
Alexander Duyck9f19f312012-05-11 08:33:32 +00004013 return PCI_ERS_RESULT_DISCONNECT;
Mark Rustadbc0c7152014-03-12 00:38:45 +00004014 }
Alexander Duyck9f19f312012-05-11 08:33:32 +00004015
4016 if (netif_running(netdev))
4017 ixgbevf_down(adapter);
4018
Mark Rustadbc0c7152014-03-12 00:38:45 +00004019 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
4020 pci_disable_device(pdev);
4021 rtnl_unlock();
Alexander Duyck9f19f312012-05-11 08:33:32 +00004022
4023 /* Request a slot slot reset. */
4024 return PCI_ERS_RESULT_NEED_RESET;
4025}
4026
4027/**
4028 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
4029 * @pdev: Pointer to PCI device
4030 *
4031 * Restart the card from scratch, as if from a cold-boot. Implementation
4032 * resembles the first-half of the ixgbevf_resume routine.
4033 */
4034static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
4035{
4036 struct net_device *netdev = pci_get_drvdata(pdev);
4037 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4038
4039 if (pci_enable_device_mem(pdev)) {
4040 dev_err(&pdev->dev,
4041 "Cannot re-enable PCI device after reset.\n");
4042 return PCI_ERS_RESULT_DISCONNECT;
4043 }
4044
Peter Zijlstra4e857c52014-03-17 18:06:10 +01004045 smp_mb__before_atomic();
Mark Rustadbc0c7152014-03-12 00:38:45 +00004046 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
Alexander Duyck9f19f312012-05-11 08:33:32 +00004047 pci_set_master(pdev);
4048
4049 ixgbevf_reset(adapter);
4050
4051 return PCI_ERS_RESULT_RECOVERED;
4052}
4053
4054/**
4055 * ixgbevf_io_resume - called when traffic can start flowing again.
4056 * @pdev: Pointer to PCI device
4057 *
4058 * This callback is called when the error recovery driver tells us that
4059 * its OK to resume normal operation. Implementation resembles the
4060 * second-half of the ixgbevf_resume routine.
4061 */
4062static void ixgbevf_io_resume(struct pci_dev *pdev)
4063{
4064 struct net_device *netdev = pci_get_drvdata(pdev);
4065 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4066
4067 if (netif_running(netdev))
4068 ixgbevf_up(adapter);
4069
4070 netif_device_attach(netdev);
4071}
4072
4073/* PCI Error Recovery (ERS) */
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004074static const struct pci_error_handlers ixgbevf_err_handler = {
Alexander Duyck9f19f312012-05-11 08:33:32 +00004075 .error_detected = ixgbevf_io_error_detected,
4076 .slot_reset = ixgbevf_io_slot_reset,
4077 .resume = ixgbevf_io_resume,
4078};
4079
Greg Rose92915f72010-01-09 02:24:10 +00004080static struct pci_driver ixgbevf_driver = {
4081 .name = ixgbevf_driver_name,
4082 .id_table = ixgbevf_pci_tbl,
4083 .probe = ixgbevf_probe,
Bill Pemberton9f9a12f2012-12-03 09:24:25 -05004084 .remove = ixgbevf_remove,
Alexander Duyck0ac1e8c2012-05-11 08:33:26 +00004085#ifdef CONFIG_PM
4086 /* Power Management Hooks */
4087 .suspend = ixgbevf_suspend,
4088 .resume = ixgbevf_resume,
4089#endif
Greg Rose92915f72010-01-09 02:24:10 +00004090 .shutdown = ixgbevf_shutdown,
Alexander Duyck9f19f312012-05-11 08:33:32 +00004091 .err_handler = &ixgbevf_err_handler
Greg Rose92915f72010-01-09 02:24:10 +00004092};
4093
4094/**
Greg Rose65d676c2011-02-03 06:54:13 +00004095 * ixgbevf_init_module - Driver Registration Routine
Greg Rose92915f72010-01-09 02:24:10 +00004096 *
Greg Rose65d676c2011-02-03 06:54:13 +00004097 * ixgbevf_init_module is the first routine called when the driver is
Greg Rose92915f72010-01-09 02:24:10 +00004098 * loaded. All it does is register with the PCI subsystem.
4099 **/
4100static int __init ixgbevf_init_module(void)
4101{
4102 int ret;
Jeff Kirsherdbd96362011-10-21 19:38:18 +00004103 pr_info("%s - version %s\n", ixgbevf_driver_string,
4104 ixgbevf_driver_version);
Greg Rose92915f72010-01-09 02:24:10 +00004105
Jeff Kirsherdbd96362011-10-21 19:38:18 +00004106 pr_info("%s\n", ixgbevf_copyright);
Greg Rose92915f72010-01-09 02:24:10 +00004107
4108 ret = pci_register_driver(&ixgbevf_driver);
4109 return ret;
4110}
4111
4112module_init(ixgbevf_init_module);
4113
4114/**
Greg Rose65d676c2011-02-03 06:54:13 +00004115 * ixgbevf_exit_module - Driver Exit Cleanup Routine
Greg Rose92915f72010-01-09 02:24:10 +00004116 *
Greg Rose65d676c2011-02-03 06:54:13 +00004117 * ixgbevf_exit_module is called just before the driver is removed
Greg Rose92915f72010-01-09 02:24:10 +00004118 * from memory.
4119 **/
4120static void __exit ixgbevf_exit_module(void)
4121{
4122 pci_unregister_driver(&ixgbevf_driver);
4123}
4124
4125#ifdef DEBUG
4126/**
Greg Rose65d676c2011-02-03 06:54:13 +00004127 * ixgbevf_get_hw_dev_name - return device name string
Greg Rose92915f72010-01-09 02:24:10 +00004128 * used by hardware layer to print debugging information
4129 **/
4130char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
4131{
4132 struct ixgbevf_adapter *adapter = hw->back;
4133 return adapter->netdev->name;
4134}
4135
4136#endif
4137module_exit(ixgbevf_exit_module);
4138
4139/* ixgbevf_main.c */