blob: 889ed10f588b18612f5338c369d7fadbef3d308e [file] [log] [blame]
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
Greg Rosedc641b72013-12-18 13:45:51 +00004 * Copyright(c) 2013 - 2014 Intel Corporation.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
Greg Rosedc641b72013-12-18 13:45:51 +000015 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000017 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
Mitch Williams1c112a62014-04-04 04:43:06 +000027#include <linux/prefetch.h>
Mitch Williamsa132af22015-01-24 09:58:35 +000028#include <net/busy_poll.h>
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000029#include "i40e.h"
Jesse Brandeburg206812b2014-02-12 01:45:33 +000030#include "i40e_prototype.h"
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000031
32static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
33 u32 td_tag)
34{
35 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
36 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
37 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
38 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
39 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
40}
41
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +000042#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000043#define I40E_FD_CLEAN_DELAY 10
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000044/**
45 * i40e_program_fdir_filter - Program a Flow Director filter
Joseph Gasparakis17a73f62014-02-12 01:45:30 +000046 * @fdir_data: Packet data that will be filter parameters
47 * @raw_packet: the pre-allocated packet buffer for FDir
Jeff Kirsherb40c82e2015-02-27 09:18:34 +000048 * @pf: The PF pointer
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000049 * @add: True for add/update, False for remove
50 **/
Joseph Gasparakis17a73f62014-02-12 01:45:30 +000051int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000052 struct i40e_pf *pf, bool add)
53{
54 struct i40e_filter_program_desc *fdir_desc;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000055 struct i40e_tx_buffer *tx_buf, *first;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000056 struct i40e_tx_desc *tx_desc;
57 struct i40e_ring *tx_ring;
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +000058 unsigned int fpt, dcc;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000059 struct i40e_vsi *vsi;
60 struct device *dev;
61 dma_addr_t dma;
62 u32 td_cmd = 0;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000063 u16 delay = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000064 u16 i;
65
66 /* find existing FDIR VSI */
67 vsi = NULL;
Mitch Williams505682c2014-05-20 08:01:37 +000068 for (i = 0; i < pf->num_alloc_vsi; i++)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000069 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
70 vsi = pf->vsi[i];
71 if (!vsi)
72 return -ENOENT;
73
Alexander Duyck9f65e152013-09-28 06:00:58 +000074 tx_ring = vsi->tx_rings[0];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000075 dev = tx_ring->dev;
76
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000077 /* we need two descriptors to add/del a filter and we can wait */
78 do {
79 if (I40E_DESC_UNUSED(tx_ring) > 1)
80 break;
81 msleep_interruptible(1);
82 delay++;
83 } while (delay < I40E_FD_CLEAN_DELAY);
84
85 if (!(I40E_DESC_UNUSED(tx_ring) > 1))
86 return -EAGAIN;
87
Joseph Gasparakis17a73f62014-02-12 01:45:30 +000088 dma = dma_map_single(dev, raw_packet,
89 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000090 if (dma_mapping_error(dev, dma))
91 goto dma_fail;
92
93 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +000094 i = tx_ring->next_to_use;
95 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000096 first = &tx_ring->tx_bi[i];
97 memset(first, 0, sizeof(struct i40e_tx_buffer));
Alexander Duyckfc4ac672013-09-28 06:00:22 +000098
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000099 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000100
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000101 fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
102 I40E_TXD_FLTR_QW0_QINDEX_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000103
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000104 fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
105 I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000106
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000107 fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
108 I40E_TXD_FLTR_QW0_PCTYPE_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000109
110 /* Use LAN VSI Id if not programmed by user */
111 if (fdir_data->dest_vsi == 0)
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000112 fpt |= (pf->vsi[pf->lan_vsi]->id) <<
113 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000114 else
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000115 fpt |= ((u32)fdir_data->dest_vsi <<
116 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
117 I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000118
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000119 dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000120
121 if (add)
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000122 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
123 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000124 else
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000125 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
126 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000127
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000128 dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
129 I40E_TXD_FLTR_QW1_DEST_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000130
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000131 dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
132 I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000133
134 if (fdir_data->cnt_index != 0) {
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000135 dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
136 dcc |= ((u32)fdir_data->cnt_index <<
137 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +0000138 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000139 }
140
Jesse Brandeburg99753ea2014-06-04 04:22:49 +0000141 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
142 fdir_desc->rsvd = cpu_to_le32(0);
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000143 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000144 fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
145
146 /* Now program a dummy descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000147 i = tx_ring->next_to_use;
148 tx_desc = I40E_TX_DESC(tx_ring, i);
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000149 tx_buf = &tx_ring->tx_bi[i];
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000150
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000151 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
152
153 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000154
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000155 /* record length, and DMA address */
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000156 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000157 dma_unmap_addr_set(tx_buf, dma, dma);
158
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000159 tx_desc->buffer_addr = cpu_to_le64(dma);
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000160 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000161
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000162 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
163 tx_buf->raw_buf = (void *)raw_packet;
164
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000165 tx_desc->cmd_type_offset_bsz =
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000166 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000167
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000168 /* Force memory writes to complete before letting h/w
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000169 * know there are new descriptors to fetch.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000170 */
171 wmb();
172
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000173 /* Mark the data descriptor to be watched */
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000174 first->next_to_watch = tx_desc;
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000175
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000176 writel(tx_ring->next_to_use, tx_ring->tail);
177 return 0;
178
179dma_fail:
180 return -1;
181}
182
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000183#define IP_HEADER_OFFSET 14
184#define I40E_UDPIP_DUMMY_PACKET_LEN 42
185/**
186 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
187 * @vsi: pointer to the targeted VSI
188 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000189 * @add: true adds a filter, false removes it
190 *
191 * Returns 0 if the filters were successfully added or removed
192 **/
193static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
194 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000195 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000196{
197 struct i40e_pf *pf = vsi->back;
198 struct udphdr *udp;
199 struct iphdr *ip;
200 bool err = false;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000201 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000202 int ret;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000203 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
204 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
205 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
206
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000207 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
208 if (!raw_packet)
209 return -ENOMEM;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000210 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
211
212 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
213 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
214 + sizeof(struct iphdr));
215
216 ip->daddr = fd_data->dst_ip[0];
217 udp->dest = fd_data->dst_port;
218 ip->saddr = fd_data->src_ip[0];
219 udp->source = fd_data->src_port;
220
Kevin Scottb2d36c02014-04-09 05:58:59 +0000221 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
222 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
223 if (ret) {
224 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000225 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
226 fd_data->pctype, fd_data->fd_id, ret);
Kevin Scottb2d36c02014-04-09 05:58:59 +0000227 err = true;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000228 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000229 if (add)
230 dev_info(&pf->pdev->dev,
231 "Filter OK for PCTYPE %d loc = %d\n",
232 fd_data->pctype, fd_data->fd_id);
233 else
234 dev_info(&pf->pdev->dev,
235 "Filter deleted for PCTYPE %d loc = %d\n",
236 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000237 }
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000238 return err ? -EOPNOTSUPP : 0;
239}
240
241#define I40E_TCPIP_DUMMY_PACKET_LEN 54
242/**
243 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
244 * @vsi: pointer to the targeted VSI
245 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000246 * @add: true adds a filter, false removes it
247 *
248 * Returns 0 if the filters were successfully added or removed
249 **/
250static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
251 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000252 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000253{
254 struct i40e_pf *pf = vsi->back;
255 struct tcphdr *tcp;
256 struct iphdr *ip;
257 bool err = false;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000258 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000259 int ret;
260 /* Dummy packet */
261 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
262 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
263 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
264 0x0, 0x72, 0, 0, 0, 0};
265
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000266 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
267 if (!raw_packet)
268 return -ENOMEM;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000269 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
270
271 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
272 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
273 + sizeof(struct iphdr));
274
275 ip->daddr = fd_data->dst_ip[0];
276 tcp->dest = fd_data->dst_port;
277 ip->saddr = fd_data->src_ip[0];
278 tcp->source = fd_data->src_port;
279
280 if (add) {
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000281 pf->fd_tcp_rule++;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000282 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
Anjali Singhai Jain2e4875e2015-04-16 20:06:06 -0400283 if (I40E_DEBUG_FD & pf->hw.debug_mask)
284 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000285 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
286 }
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000287 } else {
288 pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
289 (pf->fd_tcp_rule - 1) : 0;
290 if (pf->fd_tcp_rule == 0) {
291 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
Anjali Singhai Jain2e4875e2015-04-16 20:06:06 -0400292 if (I40E_DEBUG_FD & pf->hw.debug_mask)
293 dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000294 }
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000295 }
296
Kevin Scottb2d36c02014-04-09 05:58:59 +0000297 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000298 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
299
300 if (ret) {
301 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000302 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
303 fd_data->pctype, fd_data->fd_id, ret);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000304 err = true;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000305 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000306 if (add)
307 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
308 fd_data->pctype, fd_data->fd_id);
309 else
310 dev_info(&pf->pdev->dev,
311 "Filter deleted for PCTYPE %d loc = %d\n",
312 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000313 }
314
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000315 return err ? -EOPNOTSUPP : 0;
316}
317
318/**
319 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
320 * a specific flow spec
321 * @vsi: pointer to the targeted VSI
322 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000323 * @add: true adds a filter, false removes it
324 *
Jean Sacren21d3efd2014-03-17 18:14:39 +0000325 * Always returns -EOPNOTSUPP
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000326 **/
327static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
328 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000329 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000330{
331 return -EOPNOTSUPP;
332}
333
334#define I40E_IP_DUMMY_PACKET_LEN 34
335/**
336 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
337 * a specific flow spec
338 * @vsi: pointer to the targeted VSI
339 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000340 * @add: true adds a filter, false removes it
341 *
342 * Returns 0 if the filters were successfully added or removed
343 **/
344static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
345 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000346 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000347{
348 struct i40e_pf *pf = vsi->back;
349 struct iphdr *ip;
350 bool err = false;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000351 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000352 int ret;
353 int i;
354 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
355 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
356 0, 0, 0, 0};
357
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000358 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
359 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000360 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
361 if (!raw_packet)
362 return -ENOMEM;
363 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
364 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
365
366 ip->saddr = fd_data->src_ip[0];
367 ip->daddr = fd_data->dst_ip[0];
368 ip->protocol = 0;
369
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000370 fd_data->pctype = i;
371 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
372
373 if (ret) {
374 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000375 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
376 fd_data->pctype, fd_data->fd_id, ret);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000377 err = true;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000378 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000379 if (add)
380 dev_info(&pf->pdev->dev,
381 "Filter OK for PCTYPE %d loc = %d\n",
382 fd_data->pctype, fd_data->fd_id);
383 else
384 dev_info(&pf->pdev->dev,
385 "Filter deleted for PCTYPE %d loc = %d\n",
386 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000387 }
388 }
389
390 return err ? -EOPNOTSUPP : 0;
391}
392
393/**
394 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
395 * @vsi: pointer to the targeted VSI
396 * @cmd: command to get or set RX flow classification rules
397 * @add: true adds a filter, false removes it
398 *
399 **/
400int i40e_add_del_fdir(struct i40e_vsi *vsi,
401 struct i40e_fdir_filter *input, bool add)
402{
403 struct i40e_pf *pf = vsi->back;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000404 int ret;
405
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000406 switch (input->flow_type & ~FLOW_EXT) {
407 case TCP_V4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000408 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000409 break;
410 case UDP_V4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000411 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000412 break;
413 case SCTP_V4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000414 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000415 break;
416 case IPV4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000417 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000418 break;
419 case IP_USER_FLOW:
420 switch (input->ip4_proto) {
421 case IPPROTO_TCP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000422 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000423 break;
424 case IPPROTO_UDP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000425 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000426 break;
427 case IPPROTO_SCTP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000428 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000429 break;
430 default:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000431 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000432 break;
433 }
434 break;
435 default:
Jakub Kicinskic5ffe7e2014-04-02 10:33:22 +0000436 dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000437 input->flow_type);
438 ret = -EINVAL;
439 }
440
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000441 /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000442 return ret;
443}
444
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000445/**
446 * i40e_fd_handle_status - check the Programming Status for FD
447 * @rx_ring: the Rx ring for this descriptor
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000448 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000449 * @prog_id: the id originally used for programming
450 *
451 * This is used to verify if the FD programming or invalidation
452 * requested by SW to the HW is successful or not and take actions accordingly.
453 **/
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000454static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
455 union i40e_rx_desc *rx_desc, u8 prog_id)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000456{
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000457 struct i40e_pf *pf = rx_ring->vsi->back;
458 struct pci_dev *pdev = pf->pdev;
459 u32 fcnt_prog, fcnt_avail;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000460 u32 error;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000461 u64 qw;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000462
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000463 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000464 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
465 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
466
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400467 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000468 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
469 (I40E_DEBUG_FD & pf->hw.debug_mask))
470 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
471 rx_desc->wb.qword0.hi_dword.fd_id);
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000472
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000473 /* Check if the programming error is for ATR.
474 * If so, auto disable ATR and set a state for
475 * flush in progress. Next time we come here if flush is in
476 * progress do nothing, once flush is complete the state will
477 * be cleared.
478 */
479 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
480 return;
481
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000482 pf->fd_add_err++;
483 /* store the current atr filter count */
484 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
485
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000486 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
487 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
488 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
489 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
490 }
491
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000492 /* filter programming failed most likely due to table full */
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000493 fcnt_prog = i40e_get_global_fd_count(pf);
Anjali Singhai Jain12957382014-06-04 04:22:47 +0000494 fcnt_avail = pf->fdir_pf_filter_count;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000495 /* If ATR is running fcnt_prog can quickly change,
496 * if we are very close to full, it makes sense to disable
497 * FD ATR/SB and then re-enable it when there is room.
498 */
499 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000500 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
Anjali Singhai Jainb814ba62014-06-04 20:41:48 +0000501 !(pf->auto_disable_flags &
Anjali Singhai Jainb814ba62014-06-04 20:41:48 +0000502 I40E_FLAG_FD_SB_ENABLED)) {
Anjali Singhai Jain2e4875e2015-04-16 20:06:06 -0400503 if (I40E_DEBUG_FD & pf->hw.debug_mask)
504 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000505 pf->auto_disable_flags |=
506 I40E_FLAG_FD_SB_ENABLED;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000507 }
508 } else {
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000509 dev_info(&pdev->dev,
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000510 "FD filter programming failed due to incorrect filter parameters\n");
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000511 }
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400512 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
Anjali Singhai Jain13c28842014-03-06 09:00:04 +0000513 if (I40E_DEBUG_FD & pf->hw.debug_mask)
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000514 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
Anjali Singhai Jain13c28842014-03-06 09:00:04 +0000515 rx_desc->wb.qword0.hi_dword.fd_id);
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000516 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000517}
518
519/**
Alexander Duycka5e9c572013-09-28 06:00:27 +0000520 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000521 * @ring: the ring that owns the buffer
522 * @tx_buffer: the buffer to free
523 **/
Alexander Duycka5e9c572013-09-28 06:00:27 +0000524static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
525 struct i40e_tx_buffer *tx_buffer)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000526{
Alexander Duycka5e9c572013-09-28 06:00:27 +0000527 if (tx_buffer->skb) {
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000528 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
529 kfree(tx_buffer->raw_buf);
530 else
531 dev_kfree_skb_any(tx_buffer->skb);
532
Alexander Duycka5e9c572013-09-28 06:00:27 +0000533 if (dma_unmap_len(tx_buffer, len))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000534 dma_unmap_single(ring->dev,
Alexander Duyck35a1e2a2013-09-28 06:00:17 +0000535 dma_unmap_addr(tx_buffer, dma),
536 dma_unmap_len(tx_buffer, len),
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000537 DMA_TO_DEVICE);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000538 } else if (dma_unmap_len(tx_buffer, len)) {
539 dma_unmap_page(ring->dev,
540 dma_unmap_addr(tx_buffer, dma),
541 dma_unmap_len(tx_buffer, len),
542 DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000543 }
Alexander Duycka5e9c572013-09-28 06:00:27 +0000544 tx_buffer->next_to_watch = NULL;
545 tx_buffer->skb = NULL;
Alexander Duyck35a1e2a2013-09-28 06:00:17 +0000546 dma_unmap_len_set(tx_buffer, len, 0);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000547 /* tx_buffer must be completely set up in the transmit path */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000548}
549
550/**
551 * i40e_clean_tx_ring - Free any empty Tx buffers
552 * @tx_ring: ring to be cleaned
553 **/
554void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
555{
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000556 unsigned long bi_size;
557 u16 i;
558
559 /* ring already cleared, nothing to do */
560 if (!tx_ring->tx_bi)
561 return;
562
563 /* Free all the Tx ring sk_buffs */
Alexander Duycka5e9c572013-09-28 06:00:27 +0000564 for (i = 0; i < tx_ring->count; i++)
565 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000566
567 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
568 memset(tx_ring->tx_bi, 0, bi_size);
569
570 /* Zero out the descriptor ring */
571 memset(tx_ring->desc, 0, tx_ring->size);
572
573 tx_ring->next_to_use = 0;
574 tx_ring->next_to_clean = 0;
Alexander Duyck7070ce02013-09-28 06:00:37 +0000575
576 if (!tx_ring->netdev)
577 return;
578
579 /* cleanup Tx queue statistics */
580 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
581 tx_ring->queue_index));
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000582}
583
584/**
585 * i40e_free_tx_resources - Free Tx resources per queue
586 * @tx_ring: Tx descriptor ring for a specific queue
587 *
588 * Free all transmit software resources
589 **/
590void i40e_free_tx_resources(struct i40e_ring *tx_ring)
591{
592 i40e_clean_tx_ring(tx_ring);
593 kfree(tx_ring->tx_bi);
594 tx_ring->tx_bi = NULL;
595
596 if (tx_ring->desc) {
597 dma_free_coherent(tx_ring->dev, tx_ring->size,
598 tx_ring->desc, tx_ring->dma);
599 tx_ring->desc = NULL;
600 }
601}
602
Jesse Brandeburga68de582015-02-24 05:26:03 +0000603/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000604 * i40e_get_tx_pending - how many tx descriptors not processed
605 * @tx_ring: the ring of descriptors
606 *
607 * Since there is no access to the ring head register
608 * in XL710, we need to use our local copies
609 **/
Kiran Patilb03a8c12015-09-24 18:13:15 -0400610u32 i40e_get_tx_pending(struct i40e_ring *ring)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000611{
Jesse Brandeburga68de582015-02-24 05:26:03 +0000612 u32 head, tail;
613
614 head = i40e_get_head(ring);
615 tail = readl(ring->tail);
616
617 if (head != tail)
618 return (head < tail) ?
619 tail - head : (tail + ring->count - head);
620
621 return 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000622}
623
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000624#define WB_STRIDE 0x3
625
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000626/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000627 * i40e_clean_tx_irq - Reclaim resources after transmit completes
628 * @tx_ring: tx ring to clean
629 * @budget: how many cleans we're allowed
630 *
631 * Returns true if there's any budget left (e.g. the clean is finished)
632 **/
633static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
634{
635 u16 i = tx_ring->next_to_clean;
636 struct i40e_tx_buffer *tx_buf;
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000637 struct i40e_tx_desc *tx_head;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000638 struct i40e_tx_desc *tx_desc;
639 unsigned int total_packets = 0;
640 unsigned int total_bytes = 0;
641
642 tx_buf = &tx_ring->tx_bi[i];
643 tx_desc = I40E_TX_DESC(tx_ring, i);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000644 i -= tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000645
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000646 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
647
Alexander Duycka5e9c572013-09-28 06:00:27 +0000648 do {
649 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000650
651 /* if next_to_watch is not set then there is no work pending */
652 if (!eop_desc)
653 break;
654
Alexander Duycka5e9c572013-09-28 06:00:27 +0000655 /* prevent any other reads prior to eop_desc */
656 read_barrier_depends();
657
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000658 /* we have caught up to head, no work left to do */
659 if (tx_head == tx_desc)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000660 break;
661
Alexander Duyckc304fda2013-09-28 06:00:12 +0000662 /* clear next_to_watch to prevent false hangs */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000663 tx_buf->next_to_watch = NULL;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000664
Alexander Duycka5e9c572013-09-28 06:00:27 +0000665 /* update the statistics for this packet */
666 total_bytes += tx_buf->bytecount;
667 total_packets += tx_buf->gso_segs;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000668
Alexander Duycka5e9c572013-09-28 06:00:27 +0000669 /* free the skb */
Rick Jonesa81fb042014-09-17 03:56:20 +0000670 dev_consume_skb_any(tx_buf->skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000671
Alexander Duycka5e9c572013-09-28 06:00:27 +0000672 /* unmap skb header data */
673 dma_unmap_single(tx_ring->dev,
674 dma_unmap_addr(tx_buf, dma),
675 dma_unmap_len(tx_buf, len),
676 DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000677
Alexander Duycka5e9c572013-09-28 06:00:27 +0000678 /* clear tx_buffer data */
679 tx_buf->skb = NULL;
680 dma_unmap_len_set(tx_buf, len, 0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000681
Alexander Duycka5e9c572013-09-28 06:00:27 +0000682 /* unmap remaining buffers */
683 while (tx_desc != eop_desc) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000684
685 tx_buf++;
686 tx_desc++;
687 i++;
Alexander Duycka5e9c572013-09-28 06:00:27 +0000688 if (unlikely(!i)) {
689 i -= tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000690 tx_buf = tx_ring->tx_bi;
691 tx_desc = I40E_TX_DESC(tx_ring, 0);
692 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000693
Alexander Duycka5e9c572013-09-28 06:00:27 +0000694 /* unmap any remaining paged data */
695 if (dma_unmap_len(tx_buf, len)) {
696 dma_unmap_page(tx_ring->dev,
697 dma_unmap_addr(tx_buf, dma),
698 dma_unmap_len(tx_buf, len),
699 DMA_TO_DEVICE);
700 dma_unmap_len_set(tx_buf, len, 0);
701 }
702 }
703
704 /* move us one more past the eop_desc for start of next pkt */
705 tx_buf++;
706 tx_desc++;
707 i++;
708 if (unlikely(!i)) {
709 i -= tx_ring->count;
710 tx_buf = tx_ring->tx_bi;
711 tx_desc = I40E_TX_DESC(tx_ring, 0);
712 }
713
Jesse Brandeburg016890b2015-02-27 09:15:31 +0000714 prefetch(tx_desc);
715
Alexander Duycka5e9c572013-09-28 06:00:27 +0000716 /* update budget accounting */
717 budget--;
718 } while (likely(budget));
719
720 i += tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000721 tx_ring->next_to_clean = i;
Alexander Duyck980e9b12013-09-28 06:01:03 +0000722 u64_stats_update_begin(&tx_ring->syncp);
Alexander Duycka114d0a2013-09-28 06:00:43 +0000723 tx_ring->stats.bytes += total_bytes;
724 tx_ring->stats.packets += total_packets;
Alexander Duyck980e9b12013-09-28 06:01:03 +0000725 u64_stats_update_end(&tx_ring->syncp);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000726 tx_ring->q_vector->tx.total_bytes += total_bytes;
727 tx_ring->q_vector->tx.total_packets += total_packets;
Alexander Duycka5e9c572013-09-28 06:00:27 +0000728
Anjali Singhai58044742015-09-25 18:26:13 -0700729 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
730 unsigned int j = 0;
731
732 /* check to see if there are < 4 descriptors
733 * waiting to be written back, then kick the hardware to force
734 * them to be written back in case we stay in NAPI.
735 * In this mode on X722 we do not enable Interrupt.
736 */
737 j = i40e_get_tx_pending(tx_ring);
738
739 if (budget &&
740 ((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
741 !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
742 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
743 tx_ring->arm_wb = true;
744 }
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000745
Alexander Duyck7070ce02013-09-28 06:00:37 +0000746 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
747 tx_ring->queue_index),
748 total_packets, total_bytes);
749
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000750#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
751 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
752 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
753 /* Make sure that anybody stopping the queue after this
754 * sees the new next_to_clean.
755 */
756 smp_mb();
757 if (__netif_subqueue_stopped(tx_ring->netdev,
758 tx_ring->queue_index) &&
759 !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
760 netif_wake_subqueue(tx_ring->netdev,
761 tx_ring->queue_index);
762 ++tx_ring->tx_stats.restart_queue;
763 }
764 }
765
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000766 return !!budget;
767}
768
769/**
770 * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors
771 * @vsi: the VSI we care about
772 * @q_vector: the vector on which to force writeback
773 *
774 **/
Kiran Patilb03a8c12015-09-24 18:13:15 -0400775void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000776{
Anjali Singhai Jain8e0764b2015-06-05 12:20:30 -0400777 u16 flags = q_vector->tx.ring[0].flags;
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000778
Anjali Singhai Jain8e0764b2015-06-05 12:20:30 -0400779 if (flags & I40E_TXR_FLAGS_WB_ON_ITR) {
780 u32 val;
781
782 if (q_vector->arm_wb_state)
783 return;
784
785 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK;
786
787 wr32(&vsi->back->hw,
788 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
789 vsi->base_vector - 1),
790 val);
791 q_vector->arm_wb_state = true;
792 } else if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
793 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
794 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
795 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
796 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
797 /* allow 00 to be written to the index */
798
799 wr32(&vsi->back->hw,
800 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
801 vsi->base_vector - 1), val);
802 } else {
803 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
804 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
805 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
806 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
807 /* allow 00 to be written to the index */
808
809 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
810 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000811}
812
813/**
814 * i40e_set_new_dynamic_itr - Find new ITR level
815 * @rc: structure containing ring performance data
816 *
817 * Stores a new ITR value based on packets and byte counts during
818 * the last interrupt. The advantage of per interrupt computation
819 * is faster updates and more accurate ITR for the current traffic
820 * pattern. Constants in this function were computed based on
821 * theoretical maximum wire speed and thresholds were set based on
822 * testing data as well as attempting to minimize response time
823 * while increasing bulk throughput.
824 **/
825static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
826{
827 enum i40e_latency_range new_latency_range = rc->latency_range;
828 u32 new_itr = rc->itr;
829 int bytes_per_int;
830
831 if (rc->total_packets == 0 || !rc->itr)
832 return;
833
834 /* simple throttlerate management
835 * 0-10MB/s lowest (100000 ints/s)
836 * 10-20MB/s low (20000 ints/s)
837 * 20-1249MB/s bulk (8000 ints/s)
838 */
839 bytes_per_int = rc->total_bytes / rc->itr;
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400840 switch (new_latency_range) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000841 case I40E_LOWEST_LATENCY:
842 if (bytes_per_int > 10)
843 new_latency_range = I40E_LOW_LATENCY;
844 break;
845 case I40E_LOW_LATENCY:
846 if (bytes_per_int > 20)
847 new_latency_range = I40E_BULK_LATENCY;
848 else if (bytes_per_int <= 10)
849 new_latency_range = I40E_LOWEST_LATENCY;
850 break;
851 case I40E_BULK_LATENCY:
852 if (bytes_per_int <= 20)
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400853 new_latency_range = I40E_LOW_LATENCY;
854 break;
855 default:
856 if (bytes_per_int <= 20)
857 new_latency_range = I40E_LOW_LATENCY;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000858 break;
859 }
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400860 rc->latency_range = new_latency_range;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000861
862 switch (new_latency_range) {
863 case I40E_LOWEST_LATENCY:
864 new_itr = I40E_ITR_100K;
865 break;
866 case I40E_LOW_LATENCY:
867 new_itr = I40E_ITR_20K;
868 break;
869 case I40E_BULK_LATENCY:
870 new_itr = I40E_ITR_8K;
871 break;
872 default:
873 break;
874 }
875
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400876 if (new_itr != rc->itr)
877 rc->itr = new_itr;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000878
879 rc->total_bytes = 0;
880 rc->total_packets = 0;
881}
882
883/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000884 * i40e_clean_programming_status - clean the programming status descriptor
885 * @rx_ring: the rx ring that has this descriptor
886 * @rx_desc: the rx descriptor written back by HW
887 *
888 * Flow director should handle FD_FILTER_STATUS to check its filter programming
889 * status being successful or not and take actions accordingly. FCoE should
890 * handle its context/filter programming/invalidation status and take actions.
891 *
892 **/
893static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
894 union i40e_rx_desc *rx_desc)
895{
896 u64 qw;
897 u8 id;
898
899 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
900 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
901 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
902
903 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000904 i40e_fd_handle_status(rx_ring, rx_desc, id);
Vasu Dev38e00432014-08-01 13:27:03 -0700905#ifdef I40E_FCOE
906 else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
907 (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
908 i40e_fcoe_handle_status(rx_ring, rx_desc, id);
909#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000910}
911
912/**
913 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
914 * @tx_ring: the tx ring to set up
915 *
916 * Return 0 on success, negative on error
917 **/
918int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
919{
920 struct device *dev = tx_ring->dev;
921 int bi_size;
922
923 if (!dev)
924 return -ENOMEM;
925
Jesse Brandeburge908f812015-07-23 16:54:42 -0400926 /* warn if we are about to overwrite the pointer */
927 WARN_ON(tx_ring->tx_bi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000928 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
929 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
930 if (!tx_ring->tx_bi)
931 goto err;
932
933 /* round up to nearest 4K */
934 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000935 /* add u32 for head writeback, align after this takes care of
936 * guaranteeing this is at least one cache line in size
937 */
938 tx_ring->size += sizeof(u32);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000939 tx_ring->size = ALIGN(tx_ring->size, 4096);
940 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
941 &tx_ring->dma, GFP_KERNEL);
942 if (!tx_ring->desc) {
943 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
944 tx_ring->size);
945 goto err;
946 }
947
948 tx_ring->next_to_use = 0;
949 tx_ring->next_to_clean = 0;
950 return 0;
951
952err:
953 kfree(tx_ring->tx_bi);
954 tx_ring->tx_bi = NULL;
955 return -ENOMEM;
956}
957
958/**
959 * i40e_clean_rx_ring - Free Rx buffers
960 * @rx_ring: ring to be cleaned
961 **/
962void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
963{
964 struct device *dev = rx_ring->dev;
965 struct i40e_rx_buffer *rx_bi;
966 unsigned long bi_size;
967 u16 i;
968
969 /* ring already cleared, nothing to do */
970 if (!rx_ring->rx_bi)
971 return;
972
Mitch Williamsa132af22015-01-24 09:58:35 +0000973 if (ring_is_ps_enabled(rx_ring)) {
974 int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
975
976 rx_bi = &rx_ring->rx_bi[0];
977 if (rx_bi->hdr_buf) {
978 dma_free_coherent(dev,
979 bufsz,
980 rx_bi->hdr_buf,
981 rx_bi->dma);
982 for (i = 0; i < rx_ring->count; i++) {
983 rx_bi = &rx_ring->rx_bi[i];
984 rx_bi->dma = 0;
Shannon Nelson37a29732015-02-27 09:15:19 +0000985 rx_bi->hdr_buf = NULL;
Mitch Williamsa132af22015-01-24 09:58:35 +0000986 }
987 }
988 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000989 /* Free all the Rx ring sk_buffs */
990 for (i = 0; i < rx_ring->count; i++) {
991 rx_bi = &rx_ring->rx_bi[i];
992 if (rx_bi->dma) {
993 dma_unmap_single(dev,
994 rx_bi->dma,
995 rx_ring->rx_buf_len,
996 DMA_FROM_DEVICE);
997 rx_bi->dma = 0;
998 }
999 if (rx_bi->skb) {
1000 dev_kfree_skb(rx_bi->skb);
1001 rx_bi->skb = NULL;
1002 }
1003 if (rx_bi->page) {
1004 if (rx_bi->page_dma) {
1005 dma_unmap_page(dev,
1006 rx_bi->page_dma,
1007 PAGE_SIZE / 2,
1008 DMA_FROM_DEVICE);
1009 rx_bi->page_dma = 0;
1010 }
1011 __free_page(rx_bi->page);
1012 rx_bi->page = NULL;
1013 rx_bi->page_offset = 0;
1014 }
1015 }
1016
1017 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1018 memset(rx_ring->rx_bi, 0, bi_size);
1019
1020 /* Zero out the descriptor ring */
1021 memset(rx_ring->desc, 0, rx_ring->size);
1022
1023 rx_ring->next_to_clean = 0;
1024 rx_ring->next_to_use = 0;
1025}
1026
1027/**
1028 * i40e_free_rx_resources - Free Rx resources
1029 * @rx_ring: ring to clean the resources from
1030 *
1031 * Free all receive software resources
1032 **/
1033void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1034{
1035 i40e_clean_rx_ring(rx_ring);
1036 kfree(rx_ring->rx_bi);
1037 rx_ring->rx_bi = NULL;
1038
1039 if (rx_ring->desc) {
1040 dma_free_coherent(rx_ring->dev, rx_ring->size,
1041 rx_ring->desc, rx_ring->dma);
1042 rx_ring->desc = NULL;
1043 }
1044}
1045
1046/**
Mitch Williamsa132af22015-01-24 09:58:35 +00001047 * i40e_alloc_rx_headers - allocate rx header buffers
1048 * @rx_ring: ring to alloc buffers
1049 *
1050 * Allocate rx header buffers for the entire ring. As these are static,
1051 * this is only called when setting up a new ring.
1052 **/
1053void i40e_alloc_rx_headers(struct i40e_ring *rx_ring)
1054{
1055 struct device *dev = rx_ring->dev;
1056 struct i40e_rx_buffer *rx_bi;
1057 dma_addr_t dma;
1058 void *buffer;
1059 int buf_size;
1060 int i;
1061
1062 if (rx_ring->rx_bi[0].hdr_buf)
1063 return;
1064 /* Make sure the buffers don't cross cache line boundaries. */
1065 buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
1066 buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
1067 &dma, GFP_KERNEL);
1068 if (!buffer)
1069 return;
1070 for (i = 0; i < rx_ring->count; i++) {
1071 rx_bi = &rx_ring->rx_bi[i];
1072 rx_bi->dma = dma + (i * buf_size);
1073 rx_bi->hdr_buf = buffer + (i * buf_size);
1074 }
1075}
1076
1077/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001078 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1079 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1080 *
1081 * Returns 0 on success, negative on failure
1082 **/
1083int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1084{
1085 struct device *dev = rx_ring->dev;
1086 int bi_size;
1087
Jesse Brandeburge908f812015-07-23 16:54:42 -04001088 /* warn if we are about to overwrite the pointer */
1089 WARN_ON(rx_ring->rx_bi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001090 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1091 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1092 if (!rx_ring->rx_bi)
1093 goto err;
1094
Carolyn Wybornyf217d6c2015-02-09 17:42:31 -08001095 u64_stats_init(&rx_ring->syncp);
Carolyn Wyborny638702b2015-01-24 09:58:32 +00001096
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001097 /* Round up to nearest 4K */
1098 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
1099 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
1100 : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1101 rx_ring->size = ALIGN(rx_ring->size, 4096);
1102 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1103 &rx_ring->dma, GFP_KERNEL);
1104
1105 if (!rx_ring->desc) {
1106 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1107 rx_ring->size);
1108 goto err;
1109 }
1110
1111 rx_ring->next_to_clean = 0;
1112 rx_ring->next_to_use = 0;
1113
1114 return 0;
1115err:
1116 kfree(rx_ring->rx_bi);
1117 rx_ring->rx_bi = NULL;
1118 return -ENOMEM;
1119}
1120
1121/**
1122 * i40e_release_rx_desc - Store the new tail and head values
1123 * @rx_ring: ring to bump
1124 * @val: new head index
1125 **/
1126static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1127{
1128 rx_ring->next_to_use = val;
1129 /* Force memory writes to complete before letting h/w
1130 * know there are new descriptors to fetch. (Only
1131 * applicable for weak-ordered memory model archs,
1132 * such as IA-64).
1133 */
1134 wmb();
1135 writel(val, rx_ring->tail);
1136}
1137
1138/**
Mitch Williamsa132af22015-01-24 09:58:35 +00001139 * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001140 * @rx_ring: ring to place buffers on
1141 * @cleaned_count: number of buffers to replace
1142 **/
Mitch Williamsa132af22015-01-24 09:58:35 +00001143void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
1144{
1145 u16 i = rx_ring->next_to_use;
1146 union i40e_rx_desc *rx_desc;
1147 struct i40e_rx_buffer *bi;
1148
1149 /* do nothing if no valid netdev defined */
1150 if (!rx_ring->netdev || !cleaned_count)
1151 return;
1152
1153 while (cleaned_count--) {
1154 rx_desc = I40E_RX_DESC(rx_ring, i);
1155 bi = &rx_ring->rx_bi[i];
1156
1157 if (bi->skb) /* desc is in use */
1158 goto no_buffers;
1159 if (!bi->page) {
1160 bi->page = alloc_page(GFP_ATOMIC);
1161 if (!bi->page) {
1162 rx_ring->rx_stats.alloc_page_failed++;
1163 goto no_buffers;
1164 }
1165 }
1166
1167 if (!bi->page_dma) {
1168 /* use a half page if we're re-using */
1169 bi->page_offset ^= PAGE_SIZE / 2;
1170 bi->page_dma = dma_map_page(rx_ring->dev,
1171 bi->page,
1172 bi->page_offset,
1173 PAGE_SIZE / 2,
1174 DMA_FROM_DEVICE);
1175 if (dma_mapping_error(rx_ring->dev,
1176 bi->page_dma)) {
1177 rx_ring->rx_stats.alloc_page_failed++;
1178 bi->page_dma = 0;
1179 goto no_buffers;
1180 }
1181 }
1182
1183 dma_sync_single_range_for_device(rx_ring->dev,
1184 bi->dma,
1185 0,
1186 rx_ring->rx_hdr_len,
1187 DMA_FROM_DEVICE);
1188 /* Refresh the desc even if buffer_addrs didn't change
1189 * because each write-back erases this info.
1190 */
1191 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1192 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1193 i++;
1194 if (i == rx_ring->count)
1195 i = 0;
1196 }
1197
1198no_buffers:
1199 if (rx_ring->next_to_use != i)
1200 i40e_release_rx_desc(rx_ring, i);
1201}
1202
1203/**
1204 * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
1205 * @rx_ring: ring to place buffers on
1206 * @cleaned_count: number of buffers to replace
1207 **/
1208void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001209{
1210 u16 i = rx_ring->next_to_use;
1211 union i40e_rx_desc *rx_desc;
1212 struct i40e_rx_buffer *bi;
1213 struct sk_buff *skb;
1214
1215 /* do nothing if no valid netdev defined */
1216 if (!rx_ring->netdev || !cleaned_count)
1217 return;
1218
1219 while (cleaned_count--) {
1220 rx_desc = I40E_RX_DESC(rx_ring, i);
1221 bi = &rx_ring->rx_bi[i];
1222 skb = bi->skb;
1223
1224 if (!skb) {
1225 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1226 rx_ring->rx_buf_len);
1227 if (!skb) {
Mitch Williams420136c2013-12-18 13:45:59 +00001228 rx_ring->rx_stats.alloc_buff_failed++;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001229 goto no_buffers;
1230 }
1231 /* initialize queue mapping */
1232 skb_record_rx_queue(skb, rx_ring->queue_index);
1233 bi->skb = skb;
1234 }
1235
1236 if (!bi->dma) {
1237 bi->dma = dma_map_single(rx_ring->dev,
1238 skb->data,
1239 rx_ring->rx_buf_len,
1240 DMA_FROM_DEVICE);
1241 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
Mitch Williams420136c2013-12-18 13:45:59 +00001242 rx_ring->rx_stats.alloc_buff_failed++;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001243 bi->dma = 0;
1244 goto no_buffers;
1245 }
1246 }
1247
Mitch Williamsa132af22015-01-24 09:58:35 +00001248 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
1249 rx_desc->read.hdr_addr = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001250 i++;
1251 if (i == rx_ring->count)
1252 i = 0;
1253 }
1254
1255no_buffers:
1256 if (rx_ring->next_to_use != i)
1257 i40e_release_rx_desc(rx_ring, i);
1258}
1259
1260/**
1261 * i40e_receive_skb - Send a completed packet up the stack
1262 * @rx_ring: rx ring in play
1263 * @skb: packet to send up
1264 * @vlan_tag: vlan tag for packet
1265 **/
1266static void i40e_receive_skb(struct i40e_ring *rx_ring,
1267 struct sk_buff *skb, u16 vlan_tag)
1268{
1269 struct i40e_q_vector *q_vector = rx_ring->q_vector;
1270 struct i40e_vsi *vsi = rx_ring->vsi;
1271 u64 flags = vsi->back->flags;
1272
1273 if (vlan_tag & VLAN_VID_MASK)
1274 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1275
1276 if (flags & I40E_FLAG_IN_NETPOLL)
1277 netif_rx(skb);
1278 else
1279 napi_gro_receive(&q_vector->napi, skb);
1280}
1281
1282/**
1283 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1284 * @vsi: the VSI we care about
1285 * @skb: skb currently being received and modified
1286 * @rx_status: status value of last descriptor in packet
1287 * @rx_error: error value of last descriptor in packet
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001288 * @rx_ptype: ptype value of last descriptor in packet
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001289 **/
1290static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1291 struct sk_buff *skb,
1292 u32 rx_status,
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001293 u32 rx_error,
1294 u16 rx_ptype)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001295{
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001296 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
1297 bool ipv4 = false, ipv6 = false;
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001298 bool ipv4_tunnel, ipv6_tunnel;
1299 __wsum rx_udp_csum;
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001300 struct iphdr *iph;
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001301 __sum16 csum;
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001302
Anjali Singhai Jainf8faaa42015-02-24 06:58:48 +00001303 ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
1304 (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
1305 ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
1306 (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001307
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001308 skb->ip_summed = CHECKSUM_NONE;
1309
1310 /* Rx csum enabled and ip headers found? */
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001311 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001312 return;
1313
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001314 /* did the hardware decode the packet and checksum? */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001315 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001316 return;
1317
1318 /* both known and outer_ip must be set for the below code to work */
1319 if (!(decoded.known && decoded.outer_ip))
1320 return;
1321
1322 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1323 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
1324 ipv4 = true;
1325 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1326 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
1327 ipv6 = true;
1328
1329 if (ipv4 &&
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001330 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1331 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001332 goto checksum_fail;
1333
Jesse Brandeburgddf1d0d2014-02-13 03:48:39 -08001334 /* likely incorrect csum if alternate IP extension headers found */
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001335 if (ipv6 &&
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001336 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001337 /* don't increment checksum err here, non-fatal err */
Shannon Nelson8ee75a82013-12-21 05:44:46 +00001338 return;
1339
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001340 /* there was some L4 error, count error and punt packet to the stack */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001341 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001342 goto checksum_fail;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001343
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001344 /* handle packets that were not able to be checksummed due
1345 * to arrival speed, in this case the stack can compute
1346 * the csum.
1347 */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001348 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001349 return;
1350
1351 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
1352 * it in the driver, hardware does not do it for us.
1353 * Since L3L4P bit was set we assume a valid IHL value (>=5)
1354 * so the total length of IPv4 header is IHL*4 bytes
1355 * The UDP_0 bit *may* bet set if the *inner* header is UDP
1356 */
Anjali Singhai Jain527274c2015-06-05 12:20:31 -04001357 if (!(vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) &&
1358 (ipv4_tunnel)) {
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001359 skb->transport_header = skb->mac_header +
1360 sizeof(struct ethhdr) +
1361 (ip_hdr(skb)->ihl * 4);
1362
1363 /* Add 4 bytes for VLAN tagged packets */
1364 skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
1365 skb->protocol == htons(ETH_P_8021AD))
1366 ? VLAN_HLEN : 0;
1367
Anjali Singhaif6385972014-12-19 02:58:11 +00001368 if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
1369 (udp_hdr(skb)->check != 0)) {
1370 rx_udp_csum = udp_csum(skb);
1371 iph = ip_hdr(skb);
1372 csum = csum_tcpudp_magic(
1373 iph->saddr, iph->daddr,
1374 (skb->len - skb_transport_offset(skb)),
1375 IPPROTO_UDP, rx_udp_csum);
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001376
Anjali Singhaif6385972014-12-19 02:58:11 +00001377 if (udp_hdr(skb)->check != csum)
1378 goto checksum_fail;
1379
1380 } /* else its GRE and so no outer UDP header */
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001381 }
1382
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001383 skb->ip_summed = CHECKSUM_UNNECESSARY;
Tom Herbertfa4ba692014-08-27 21:27:32 -07001384 skb->csum_level = ipv4_tunnel || ipv6_tunnel;
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001385
1386 return;
1387
1388checksum_fail:
1389 vsi->back->hw_csum_rx_error++;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001390}
1391
1392/**
1393 * i40e_rx_hash - returns the hash value from the Rx descriptor
1394 * @ring: descriptor ring
1395 * @rx_desc: specific descriptor
1396 **/
1397static inline u32 i40e_rx_hash(struct i40e_ring *ring,
1398 union i40e_rx_desc *rx_desc)
1399{
Jesse Brandeburg8a494922013-11-20 10:02:49 +00001400 const __le64 rss_mask =
1401 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1402 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1403
1404 if ((ring->netdev->features & NETIF_F_RXHASH) &&
1405 (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
1406 return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1407 else
1408 return 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001409}
1410
1411/**
Jesse Brandeburg206812b2014-02-12 01:45:33 +00001412 * i40e_ptype_to_hash - get a hash type
1413 * @ptype: the ptype value from the descriptor
1414 *
1415 * Returns a hash type to be used by skb_set_hash
1416 **/
1417static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
1418{
1419 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1420
1421 if (!decoded.known)
1422 return PKT_HASH_TYPE_NONE;
1423
1424 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1425 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1426 return PKT_HASH_TYPE_L4;
1427 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1428 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1429 return PKT_HASH_TYPE_L3;
1430 else
1431 return PKT_HASH_TYPE_L2;
1432}
1433
1434/**
Mitch Williamsa132af22015-01-24 09:58:35 +00001435 * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001436 * @rx_ring: rx ring to clean
1437 * @budget: how many cleans we're allowed
1438 *
1439 * Returns true if there's any budget left (e.g. the clean is finished)
1440 **/
Mitch Williamsa132af22015-01-24 09:58:35 +00001441static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001442{
1443 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1444 u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
1445 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
Jiang Liu8dc55622015-08-17 11:19:02 +08001446 const int current_node = numa_mem_id();
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001447 struct i40e_vsi *vsi = rx_ring->vsi;
1448 u16 i = rx_ring->next_to_clean;
1449 union i40e_rx_desc *rx_desc;
1450 u32 rx_error, rx_status;
Jesse Brandeburg206812b2014-02-12 01:45:33 +00001451 u8 rx_ptype;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001452 u64 qword;
1453
Eric W. Biederman390f86d2014-03-14 17:59:10 -07001454 if (budget <= 0)
1455 return 0;
1456
Mitch Williamsa132af22015-01-24 09:58:35 +00001457 do {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001458 struct i40e_rx_buffer *rx_bi;
1459 struct sk_buff *skb;
1460 u16 vlan_tag;
Mitch Williamsa132af22015-01-24 09:58:35 +00001461 /* return some buffers to hardware, one at a time is too slow */
1462 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1463 i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count);
1464 cleaned_count = 0;
1465 }
1466
1467 i = rx_ring->next_to_clean;
1468 rx_desc = I40E_RX_DESC(rx_ring, i);
1469 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1470 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1471 I40E_RXD_QW1_STATUS_SHIFT;
1472
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001473 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
Mitch Williamsa132af22015-01-24 09:58:35 +00001474 break;
1475
1476 /* This memory barrier is needed to keep us from reading
1477 * any other fields out of the rx_desc until we know the
1478 * DD bit is set.
1479 */
Alexander Duyck67317162015-04-08 18:49:43 -07001480 dma_rmb();
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001481 if (i40e_rx_is_programming_status(qword)) {
1482 i40e_clean_programming_status(rx_ring, rx_desc);
Mitch Williamsa132af22015-01-24 09:58:35 +00001483 I40E_RX_INCREMENT(rx_ring, i);
1484 continue;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001485 }
1486 rx_bi = &rx_ring->rx_bi[i];
1487 skb = rx_bi->skb;
Mitch Williamsa132af22015-01-24 09:58:35 +00001488 if (likely(!skb)) {
1489 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1490 rx_ring->rx_hdr_len);
Jesse Brandeburg8b6ed9c2015-03-31 00:45:01 -07001491 if (!skb) {
Mitch Williamsa132af22015-01-24 09:58:35 +00001492 rx_ring->rx_stats.alloc_buff_failed++;
Jesse Brandeburg8b6ed9c2015-03-31 00:45:01 -07001493 break;
1494 }
1495
Mitch Williamsa132af22015-01-24 09:58:35 +00001496 /* initialize queue mapping */
1497 skb_record_rx_queue(skb, rx_ring->queue_index);
1498 /* we are reusing so sync this buffer for CPU use */
1499 dma_sync_single_range_for_cpu(rx_ring->dev,
1500 rx_bi->dma,
1501 0,
1502 rx_ring->rx_hdr_len,
1503 DMA_FROM_DEVICE);
1504 }
Mitch Williams829af3ac2013-12-18 13:46:00 +00001505 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1506 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1507 rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
1508 I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
1509 rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
1510 I40E_RXD_QW1_LENGTH_SPH_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001511
Mitch Williams829af3ac2013-12-18 13:46:00 +00001512 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1513 I40E_RXD_QW1_ERROR_SHIFT;
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001514 rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1515 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001516
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001517 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1518 I40E_RXD_QW1_PTYPE_SHIFT;
Mitch Williamsa132af22015-01-24 09:58:35 +00001519 prefetch(rx_bi->page);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001520 rx_bi->skb = NULL;
Mitch Williamsa132af22015-01-24 09:58:35 +00001521 cleaned_count++;
1522 if (rx_hbo || rx_sph) {
1523 int len;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001524 if (rx_hbo)
1525 len = I40E_RX_HDR_SIZE;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001526 else
Mitch Williamsa132af22015-01-24 09:58:35 +00001527 len = rx_header_len;
1528 memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
1529 } else if (skb->len == 0) {
1530 int len;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001531
Mitch Williamsa132af22015-01-24 09:58:35 +00001532 len = (rx_packet_len > skb_headlen(skb) ?
1533 skb_headlen(skb) : rx_packet_len);
1534 memcpy(__skb_put(skb, len),
1535 rx_bi->page + rx_bi->page_offset,
1536 len);
1537 rx_bi->page_offset += len;
1538 rx_packet_len -= len;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001539 }
1540
1541 /* Get the rest of the data if this was a header split */
Mitch Williamsa132af22015-01-24 09:58:35 +00001542 if (rx_packet_len) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001543 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1544 rx_bi->page,
1545 rx_bi->page_offset,
1546 rx_packet_len);
1547
1548 skb->len += rx_packet_len;
1549 skb->data_len += rx_packet_len;
1550 skb->truesize += rx_packet_len;
1551
1552 if ((page_count(rx_bi->page) == 1) &&
1553 (page_to_nid(rx_bi->page) == current_node))
1554 get_page(rx_bi->page);
1555 else
1556 rx_bi->page = NULL;
1557
1558 dma_unmap_page(rx_ring->dev,
1559 rx_bi->page_dma,
1560 PAGE_SIZE / 2,
1561 DMA_FROM_DEVICE);
1562 rx_bi->page_dma = 0;
1563 }
Mitch Williamsa132af22015-01-24 09:58:35 +00001564 I40E_RX_INCREMENT(rx_ring, i);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001565
1566 if (unlikely(
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001567 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001568 struct i40e_rx_buffer *next_buffer;
1569
1570 next_buffer = &rx_ring->rx_bi[i];
Mitch Williamsa132af22015-01-24 09:58:35 +00001571 next_buffer->skb = skb;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001572 rx_ring->rx_stats.non_eop_descs++;
Mitch Williamsa132af22015-01-24 09:58:35 +00001573 continue;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001574 }
1575
1576 /* ERR_MASK will only have valid bits if EOP set */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001577 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001578 dev_kfree_skb_any(skb);
Mitch Williamsa132af22015-01-24 09:58:35 +00001579 continue;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001580 }
1581
Jesse Brandeburg206812b2014-02-12 01:45:33 +00001582 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1583 i40e_ptype_to_hash(rx_ptype));
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00001584 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1585 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1586 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1587 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1588 rx_ring->last_rx_timestamp = jiffies;
1589 }
1590
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001591 /* probably a little skewed due to removing CRC */
1592 total_rx_bytes += skb->len;
1593 total_rx_packets++;
1594
1595 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001596
1597 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1598
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001599 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001600 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1601 : 0;
Vasu Dev38e00432014-08-01 13:27:03 -07001602#ifdef I40E_FCOE
1603 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1604 dev_kfree_skb_any(skb);
Mitch Williamsa132af22015-01-24 09:58:35 +00001605 continue;
Vasu Dev38e00432014-08-01 13:27:03 -07001606 }
1607#endif
Mitch Williamsa132af22015-01-24 09:58:35 +00001608 skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001609 i40e_receive_skb(rx_ring, skb, vlan_tag);
1610
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001611 rx_desc->wb.qword1.status_error_len = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001612
Mitch Williamsa132af22015-01-24 09:58:35 +00001613 } while (likely(total_rx_packets < budget));
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001614
Alexander Duyck980e9b12013-09-28 06:01:03 +00001615 u64_stats_update_begin(&rx_ring->syncp);
Alexander Duycka114d0a2013-09-28 06:00:43 +00001616 rx_ring->stats.packets += total_rx_packets;
1617 rx_ring->stats.bytes += total_rx_bytes;
Alexander Duyck980e9b12013-09-28 06:01:03 +00001618 u64_stats_update_end(&rx_ring->syncp);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001619 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1620 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1621
Mitch Williamsa132af22015-01-24 09:58:35 +00001622 return total_rx_packets;
1623}
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001624
Mitch Williamsa132af22015-01-24 09:58:35 +00001625/**
1626 * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
1627 * @rx_ring: rx ring to clean
1628 * @budget: how many cleans we're allowed
1629 *
1630 * Returns number of packets cleaned
1631 **/
1632static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
1633{
1634 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1635 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1636 struct i40e_vsi *vsi = rx_ring->vsi;
1637 union i40e_rx_desc *rx_desc;
1638 u32 rx_error, rx_status;
1639 u16 rx_packet_len;
1640 u8 rx_ptype;
1641 u64 qword;
1642 u16 i;
1643
1644 do {
1645 struct i40e_rx_buffer *rx_bi;
1646 struct sk_buff *skb;
1647 u16 vlan_tag;
1648 /* return some buffers to hardware, one at a time is too slow */
1649 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1650 i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count);
1651 cleaned_count = 0;
1652 }
1653
1654 i = rx_ring->next_to_clean;
1655 rx_desc = I40E_RX_DESC(rx_ring, i);
1656 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1657 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1658 I40E_RXD_QW1_STATUS_SHIFT;
1659
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001660 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
Mitch Williamsa132af22015-01-24 09:58:35 +00001661 break;
1662
1663 /* This memory barrier is needed to keep us from reading
1664 * any other fields out of the rx_desc until we know the
1665 * DD bit is set.
1666 */
Alexander Duyck67317162015-04-08 18:49:43 -07001667 dma_rmb();
Mitch Williamsa132af22015-01-24 09:58:35 +00001668
1669 if (i40e_rx_is_programming_status(qword)) {
1670 i40e_clean_programming_status(rx_ring, rx_desc);
1671 I40E_RX_INCREMENT(rx_ring, i);
1672 continue;
1673 }
1674 rx_bi = &rx_ring->rx_bi[i];
1675 skb = rx_bi->skb;
1676 prefetch(skb->data);
1677
1678 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1679 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1680
1681 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1682 I40E_RXD_QW1_ERROR_SHIFT;
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001683 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
Mitch Williamsa132af22015-01-24 09:58:35 +00001684
1685 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1686 I40E_RXD_QW1_PTYPE_SHIFT;
1687 rx_bi->skb = NULL;
1688 cleaned_count++;
1689
1690 /* Get the header and possibly the whole packet
1691 * If this is an skb from previous receive dma will be 0
1692 */
1693 skb_put(skb, rx_packet_len);
1694 dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
1695 DMA_FROM_DEVICE);
1696 rx_bi->dma = 0;
1697
1698 I40E_RX_INCREMENT(rx_ring, i);
1699
1700 if (unlikely(
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001701 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
Mitch Williamsa132af22015-01-24 09:58:35 +00001702 rx_ring->rx_stats.non_eop_descs++;
1703 continue;
1704 }
1705
1706 /* ERR_MASK will only have valid bits if EOP set */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001707 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
Mitch Williamsa132af22015-01-24 09:58:35 +00001708 dev_kfree_skb_any(skb);
1709 /* TODO: shouldn't we increment a counter indicating the
1710 * drop?
1711 */
1712 continue;
1713 }
1714
1715 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1716 i40e_ptype_to_hash(rx_ptype));
1717 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1718 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1719 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1720 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1721 rx_ring->last_rx_timestamp = jiffies;
1722 }
1723
1724 /* probably a little skewed due to removing CRC */
1725 total_rx_bytes += skb->len;
1726 total_rx_packets++;
1727
1728 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1729
1730 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1731
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001732 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
Mitch Williamsa132af22015-01-24 09:58:35 +00001733 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1734 : 0;
1735#ifdef I40E_FCOE
1736 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1737 dev_kfree_skb_any(skb);
1738 continue;
1739 }
1740#endif
1741 i40e_receive_skb(rx_ring, skb, vlan_tag);
1742
Mitch Williamsa132af22015-01-24 09:58:35 +00001743 rx_desc->wb.qword1.status_error_len = 0;
1744 } while (likely(total_rx_packets < budget));
1745
1746 u64_stats_update_begin(&rx_ring->syncp);
1747 rx_ring->stats.packets += total_rx_packets;
1748 rx_ring->stats.bytes += total_rx_bytes;
1749 u64_stats_update_end(&rx_ring->syncp);
1750 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1751 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1752
1753 return total_rx_packets;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001754}
1755
1756/**
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001757 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
1758 * @vsi: the VSI we care about
1759 * @q_vector: q_vector for which itr is being updated and interrupt enabled
1760 *
1761 **/
1762static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
1763 struct i40e_q_vector *q_vector)
1764{
1765 struct i40e_hw *hw = &vsi->back->hw;
1766 u16 old_itr;
1767 int vector;
1768 u32 val;
1769
1770 vector = (q_vector->v_idx + vsi->base_vector);
1771 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
1772 old_itr = q_vector->rx.itr;
1773 i40e_set_new_dynamic_itr(&q_vector->rx);
1774 if (old_itr != q_vector->rx.itr) {
1775 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1776 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1777 (I40E_RX_ITR <<
1778 I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1779 (q_vector->rx.itr <<
1780 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
1781 } else {
1782 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1783 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1784 (I40E_ITR_NONE <<
1785 I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
1786 }
1787 if (!test_bit(__I40E_DOWN, &vsi->state))
1788 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
1789 } else {
Jesse Brandeburg78455482015-07-23 16:54:41 -04001790 i40e_irq_dynamic_enable(vsi, q_vector->v_idx);
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001791 }
1792 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
1793 old_itr = q_vector->tx.itr;
1794 i40e_set_new_dynamic_itr(&q_vector->tx);
1795 if (old_itr != q_vector->tx.itr) {
1796 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1797 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1798 (I40E_TX_ITR <<
1799 I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1800 (q_vector->tx.itr <<
1801 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
1802 } else {
1803 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1804 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1805 (I40E_ITR_NONE <<
1806 I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
1807 }
1808 if (!test_bit(__I40E_DOWN, &vsi->state))
1809 wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->v_idx +
1810 vsi->base_vector - 1), val);
1811 } else {
Jesse Brandeburg78455482015-07-23 16:54:41 -04001812 i40e_irq_dynamic_enable(vsi, q_vector->v_idx);
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001813 }
1814}
1815
1816/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001817 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1818 * @napi: napi struct with our devices info in it
1819 * @budget: amount of work driver is allowed to do this pass, in packets
1820 *
1821 * This function will clean all queues associated with a q_vector.
1822 *
1823 * Returns the amount of work done
1824 **/
1825int i40e_napi_poll(struct napi_struct *napi, int budget)
1826{
1827 struct i40e_q_vector *q_vector =
1828 container_of(napi, struct i40e_q_vector, napi);
1829 struct i40e_vsi *vsi = q_vector->vsi;
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001830 struct i40e_ring *ring;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001831 bool clean_complete = true;
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001832 bool arm_wb = false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001833 int budget_per_ring;
Mitch Williamsa132af22015-01-24 09:58:35 +00001834 int cleaned;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001835
1836 if (test_bit(__I40E_DOWN, &vsi->state)) {
1837 napi_complete(napi);
1838 return 0;
1839 }
1840
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001841 /* Since the actual Tx work is minimal, we can give the Tx a larger
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001842 * budget and be more aggressive about cleaning up the Tx descriptors.
1843 */
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001844 i40e_for_each_ring(ring, q_vector->tx) {
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001845 clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001846 arm_wb |= ring->arm_wb;
Jesse Brandeburg0deda862015-07-23 16:54:34 -04001847 ring->arm_wb = false;
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001848 }
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001849
1850 /* We attempt to distribute budget to each Rx queue fairly, but don't
1851 * allow the budget to go below 1 because that would exit polling early.
1852 */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001853 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001854
Mitch Williamsa132af22015-01-24 09:58:35 +00001855 i40e_for_each_ring(ring, q_vector->rx) {
1856 if (ring_is_ps_enabled(ring))
1857 cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
1858 else
1859 cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
1860 /* if we didn't clean as many as budgeted, we must be done */
1861 clean_complete &= (budget_per_ring != cleaned);
1862 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001863
1864 /* If work not completed, return budget and polling will return */
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001865 if (!clean_complete) {
1866 if (arm_wb)
1867 i40e_force_wb(vsi, q_vector);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001868 return budget;
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001869 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001870
Anjali Singhai Jain8e0764b2015-06-05 12:20:30 -04001871 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
1872 q_vector->arm_wb_state = false;
1873
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001874 /* Work is done so exit the polling mode and re-enable the interrupt */
1875 napi_complete(napi);
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001876 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
1877 i40e_update_enable_itr(vsi, q_vector);
1878 } else { /* Legacy mode */
1879 struct i40e_hw *hw = &vsi->back->hw;
1880 /* We re-enable the queue 0 cause, but
1881 * don't worry about dynamic_enable
1882 * because we left it on for the other
1883 * possible interrupts during napi
1884 */
1885 u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) |
1886 I40E_QINT_RQCTL_CAUSE_ENA_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001887
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001888 wr32(hw, I40E_QINT_RQCTL(0), qval);
1889 qval = rd32(hw, I40E_QINT_TQCTL(0)) |
1890 I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1891 wr32(hw, I40E_QINT_TQCTL(0), qval);
1892 i40e_irq_dynamic_enable_icr0(vsi->back);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001893 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001894 return 0;
1895}
1896
1897/**
1898 * i40e_atr - Add a Flow Director ATR filter
1899 * @tx_ring: ring to add programming descriptor to
1900 * @skb: send buffer
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04001901 * @tx_flags: send tx flags
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001902 * @protocol: wire protocol
1903 **/
1904static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04001905 u32 tx_flags, __be16 protocol)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001906{
1907 struct i40e_filter_program_desc *fdir_desc;
1908 struct i40e_pf *pf = tx_ring->vsi->back;
1909 union {
1910 unsigned char *network;
1911 struct iphdr *ipv4;
1912 struct ipv6hdr *ipv6;
1913 } hdr;
1914 struct tcphdr *th;
1915 unsigned int hlen;
1916 u32 flex_ptype, dtype_cmd;
Alexander Duyckfc4ac672013-09-28 06:00:22 +00001917 u16 i;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001918
1919 /* make sure ATR is enabled */
Jesse Brandeburg60ea5f82014-01-17 15:36:34 -08001920 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001921 return;
1922
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00001923 if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1924 return;
1925
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001926 /* if sampling is disabled do nothing */
1927 if (!tx_ring->atr_sample_rate)
1928 return;
1929
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04001930 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001931 return;
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04001932
1933 if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) {
1934 /* snag network header to get L4 type and address */
1935 hdr.network = skb_network_header(skb);
1936
1937 /* Currently only IPv4/IPv6 with TCP is supported
1938 * access ihl as u8 to avoid unaligned access on ia64
1939 */
1940 if (tx_flags & I40E_TX_FLAGS_IPV4)
1941 hlen = (hdr.network[0] & 0x0F) << 2;
1942 else if (protocol == htons(ETH_P_IPV6))
1943 hlen = sizeof(struct ipv6hdr);
1944 else
1945 return;
1946 } else {
1947 hdr.network = skb_inner_network_header(skb);
1948 hlen = skb_inner_network_header_len(skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001949 }
1950
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04001951 /* Currently only IPv4/IPv6 with TCP is supported
1952 * Note: tx_flags gets modified to reflect inner protocols in
1953 * tx_enable_csum function if encap is enabled.
1954 */
1955 if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
1956 (hdr.ipv4->protocol != IPPROTO_TCP))
1957 return;
1958 else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
1959 (hdr.ipv6->nexthdr != IPPROTO_TCP))
1960 return;
1961
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001962 th = (struct tcphdr *)(hdr.network + hlen);
1963
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00001964 /* Due to lack of space, no more new filters can be programmed */
1965 if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1966 return;
Anjali Singhai Jain52eb95e2015-06-05 12:20:33 -04001967 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) {
1968 /* HW ATR eviction will take care of removing filters on FIN
1969 * and RST packets.
1970 */
1971 if (th->fin || th->rst)
1972 return;
1973 }
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00001974
1975 tx_ring->atr_count++;
1976
Anjali Singhai Jaince806782014-03-06 08:59:54 +00001977 /* sample on all syn/fin/rst packets or once every atr sample rate */
1978 if (!th->fin &&
1979 !th->syn &&
1980 !th->rst &&
1981 (tx_ring->atr_count < tx_ring->atr_sample_rate))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001982 return;
1983
1984 tx_ring->atr_count = 0;
1985
1986 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +00001987 i = tx_ring->next_to_use;
1988 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
1989
1990 i++;
1991 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001992
1993 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1994 I40E_TXD_FLTR_QW0_QINDEX_MASK;
1995 flex_ptype |= (protocol == htons(ETH_P_IP)) ?
1996 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1997 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1998 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1999 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2000
2001 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2002
2003 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2004
Anjali Singhai Jaince806782014-03-06 08:59:54 +00002005 dtype_cmd |= (th->fin || th->rst) ?
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002006 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2007 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2008 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2009 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2010
2011 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2012 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2013
2014 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2015 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2016
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +00002017 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
Anjali Singhai Jain60ccd452015-04-16 20:06:01 -04002018 if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL))
2019 dtype_cmd |=
2020 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2021 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2022 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2023 else
2024 dtype_cmd |=
2025 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2026 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2027 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +00002028
Anjali Singhai Jain52eb95e2015-06-05 12:20:33 -04002029 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
2030 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2031
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002032 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
Jesse Brandeburg99753ea2014-06-04 04:22:49 +00002033 fdir_desc->rsvd = cpu_to_le32(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002034 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
Jesse Brandeburg99753ea2014-06-04 04:22:49 +00002035 fdir_desc->fd_id = cpu_to_le32(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002036}
2037
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002038/**
2039 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2040 * @skb: send buffer
2041 * @tx_ring: ring to send buffer on
2042 * @flags: the tx flags to be set
2043 *
2044 * Checks the skb and set up correspondingly several generic transmit flags
2045 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2046 *
2047 * Returns error code indicate the frame should be dropped upon error and the
2048 * otherwise returns 0 to indicate the flags has been set properly.
2049 **/
Vasu Dev38e00432014-08-01 13:27:03 -07002050#ifdef I40E_FCOE
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002051inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002052 struct i40e_ring *tx_ring,
2053 u32 *flags)
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002054#else
2055static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2056 struct i40e_ring *tx_ring,
2057 u32 *flags)
Vasu Dev38e00432014-08-01 13:27:03 -07002058#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002059{
2060 __be16 protocol = skb->protocol;
2061 u32 tx_flags = 0;
2062
Greg Rose31eaacc2015-03-31 00:45:03 -07002063 if (protocol == htons(ETH_P_8021Q) &&
2064 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2065 /* When HW VLAN acceleration is turned off by the user the
2066 * stack sets the protocol to 8021q so that the driver
2067 * can take any steps required to support the SW only
2068 * VLAN handling. In our case the driver doesn't need
2069 * to take any further steps so just set the protocol
2070 * to the encapsulated ethertype.
2071 */
2072 skb->protocol = vlan_get_protocol(skb);
2073 goto out;
2074 }
2075
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002076 /* if we have a HW VLAN tag being added, default to the HW one */
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01002077 if (skb_vlan_tag_present(skb)) {
2078 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002079 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2080 /* else if it is a SW VLAN, check the next protocol and store the tag */
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00002081 } else if (protocol == htons(ETH_P_8021Q)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002082 struct vlan_hdr *vhdr, _vhdr;
2083 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2084 if (!vhdr)
2085 return -EINVAL;
2086
2087 protocol = vhdr->h_vlan_encapsulated_proto;
2088 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2089 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2090 }
2091
Neerav Parikhd40d00b2015-02-24 06:58:40 +00002092 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2093 goto out;
2094
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002095 /* Insert 802.1p priority into VLAN header */
Vasu Dev38e00432014-08-01 13:27:03 -07002096 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2097 (skb->priority != TC_PRIO_CONTROL)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002098 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2099 tx_flags |= (skb->priority & 0x7) <<
2100 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2101 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2102 struct vlan_ethhdr *vhdr;
Francois Romieudd225bc2014-03-30 03:14:48 +00002103 int rc;
2104
2105 rc = skb_cow_head(skb, 0);
2106 if (rc < 0)
2107 return rc;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002108 vhdr = (struct vlan_ethhdr *)skb->data;
2109 vhdr->h_vlan_TCI = htons(tx_flags >>
2110 I40E_TX_FLAGS_VLAN_SHIFT);
2111 } else {
2112 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2113 }
2114 }
Neerav Parikhd40d00b2015-02-24 06:58:40 +00002115
2116out:
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002117 *flags = tx_flags;
2118 return 0;
2119}
2120
2121/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002122 * i40e_tso - set up the tso context descriptor
2123 * @tx_ring: ptr to the ring to send
2124 * @skb: ptr to the skb we're sending
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002125 * @hdr_len: ptr to the size of the packet header
2126 * @cd_tunneling: ptr to context descriptor bits
2127 *
2128 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2129 **/
2130static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002131 u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
2132 u32 *cd_tunneling)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002133{
2134 u32 cd_cmd, cd_tso_len, cd_mss;
Francois Romieudd225bc2014-03-30 03:14:48 +00002135 struct ipv6hdr *ipv6h;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002136 struct tcphdr *tcph;
2137 struct iphdr *iph;
2138 u32 l4len;
2139 int err;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002140
2141 if (!skb_is_gso(skb))
2142 return 0;
2143
Francois Romieudd225bc2014-03-30 03:14:48 +00002144 err = skb_cow_head(skb, 0);
2145 if (err < 0)
2146 return err;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002147
Anjali Singhaidf230752014-12-19 02:58:16 +00002148 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
2149 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
2150
2151 if (iph->version == 4) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002152 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
2153 iph->tot_len = 0;
2154 iph->check = 0;
2155 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
2156 0, IPPROTO_TCP, 0);
Anjali Singhaidf230752014-12-19 02:58:16 +00002157 } else if (ipv6h->version == 6) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002158 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
2159 ipv6h->payload_len = 0;
2160 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
2161 0, IPPROTO_TCP, 0);
2162 }
2163
2164 l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
2165 *hdr_len = (skb->encapsulation
2166 ? (skb_inner_transport_header(skb) - skb->data)
2167 : skb_transport_offset(skb)) + l4len;
2168
2169 /* find the field values */
2170 cd_cmd = I40E_TX_CTX_DESC_TSO;
2171 cd_tso_len = skb->len - *hdr_len;
2172 cd_mss = skb_shinfo(skb)->gso_size;
Mitch Williams829af3ac2013-12-18 13:46:00 +00002173 *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2174 ((u64)cd_tso_len <<
2175 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2176 ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002177 return 1;
2178}
2179
2180/**
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002181 * i40e_tsyn - set up the tsyn context descriptor
2182 * @tx_ring: ptr to the ring to send
2183 * @skb: ptr to the skb we're sending
2184 * @tx_flags: the collected send information
2185 *
2186 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2187 **/
2188static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2189 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2190{
2191 struct i40e_pf *pf;
2192
2193 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2194 return 0;
2195
2196 /* Tx timestamps cannot be sampled when doing TSO */
2197 if (tx_flags & I40E_TX_FLAGS_TSO)
2198 return 0;
2199
2200 /* only timestamp the outbound packet if the user has requested it and
2201 * we are not already transmitting a packet to be timestamped
2202 */
2203 pf = i40e_netdev_to_pf(tx_ring->netdev);
Jacob Keller22b47772014-12-14 01:55:09 +00002204 if (!(pf->flags & I40E_FLAG_PTP))
2205 return 0;
2206
Jakub Kicinski9ce34f02014-03-15 14:55:42 +00002207 if (pf->ptp_tx &&
2208 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002209 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2210 pf->ptp_tx_skb = skb_get(skb);
2211 } else {
2212 return 0;
2213 }
2214
2215 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
2216 I40E_TXD_CTX_QW1_CMD_SHIFT;
2217
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002218 return 1;
2219}
2220
2221/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002222 * i40e_tx_enable_csum - Enable Tx checksum offloads
2223 * @skb: send buffer
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002224 * @tx_flags: pointer to Tx flags currently set
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002225 * @td_cmd: Tx descriptor command bits to set
2226 * @td_offset: Tx descriptor header offsets to set
2227 * @cd_tunneling: ptr to context desc bits
2228 **/
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002229static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002230 u32 *td_cmd, u32 *td_offset,
2231 struct i40e_ring *tx_ring,
2232 u32 *cd_tunneling)
2233{
2234 struct ipv6hdr *this_ipv6_hdr;
2235 unsigned int this_tcp_hdrlen;
2236 struct iphdr *this_ip_hdr;
2237 u32 network_hdr_len;
2238 u8 l4_hdr = 0;
Anjali Singhai Jain527274c2015-06-05 12:20:31 -04002239 struct udphdr *oudph;
2240 struct iphdr *oiph;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002241 u32 l4_tunnel = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002242
2243 if (skb->encapsulation) {
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002244 switch (ip_hdr(skb)->protocol) {
2245 case IPPROTO_UDP:
Anjali Singhai Jain527274c2015-06-05 12:20:31 -04002246 oudph = udp_hdr(skb);
2247 oiph = ip_hdr(skb);
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002248 l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002249 *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002250 break;
Shannon Nelsonc1d17912015-09-25 19:26:04 +00002251 case IPPROTO_GRE:
2252 l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING;
2253 break;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002254 default:
2255 return;
2256 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002257 network_hdr_len = skb_inner_network_header_len(skb);
2258 this_ip_hdr = inner_ip_hdr(skb);
2259 this_ipv6_hdr = inner_ipv6_hdr(skb);
2260 this_tcp_hdrlen = inner_tcp_hdrlen(skb);
2261
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002262 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2263 if (*tx_flags & I40E_TX_FLAGS_TSO) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002264 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
2265 ip_hdr(skb)->check = 0;
2266 } else {
2267 *cd_tunneling |=
2268 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2269 }
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002270 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
Anjali Singhaidf230752014-12-19 02:58:16 +00002271 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002272 if (*tx_flags & I40E_TX_FLAGS_TSO)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002273 ip_hdr(skb)->check = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002274 }
2275
2276 /* Now set the ctx descriptor fields */
2277 *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002278 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
2279 l4_tunnel |
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002280 ((skb_inner_network_offset(skb) -
2281 skb_transport_offset(skb)) >> 1) <<
2282 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
Anjali Singhaidf230752014-12-19 02:58:16 +00002283 if (this_ip_hdr->version == 6) {
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002284 *tx_flags &= ~I40E_TX_FLAGS_IPV4;
2285 *tx_flags |= I40E_TX_FLAGS_IPV6;
Anjali Singhaidf230752014-12-19 02:58:16 +00002286 }
Anjali Singhai Jain527274c2015-06-05 12:20:31 -04002287 if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
2288 (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) &&
2289 (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
2290 oudph->check = ~csum_tcpudp_magic(oiph->saddr,
2291 oiph->daddr,
2292 (skb->len - skb_transport_offset(skb)),
2293 IPPROTO_UDP, 0);
2294 *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2295 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002296 } else {
2297 network_hdr_len = skb_network_header_len(skb);
2298 this_ip_hdr = ip_hdr(skb);
2299 this_ipv6_hdr = ipv6_hdr(skb);
2300 this_tcp_hdrlen = tcp_hdrlen(skb);
2301 }
2302
2303 /* Enable IP checksum offloads */
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002304 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002305 l4_hdr = this_ip_hdr->protocol;
2306 /* the stack computes the IP header already, the only time we
2307 * need the hardware to recompute it is in the case of TSO.
2308 */
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002309 if (*tx_flags & I40E_TX_FLAGS_TSO) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002310 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
2311 this_ip_hdr->check = 0;
2312 } else {
2313 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
2314 }
2315 /* Now set the td_offset for IP header length */
2316 *td_offset = (network_hdr_len >> 2) <<
2317 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002318 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002319 l4_hdr = this_ipv6_hdr->nexthdr;
2320 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
2321 /* Now set the td_offset for IP header length */
2322 *td_offset = (network_hdr_len >> 2) <<
2323 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2324 }
2325 /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
2326 *td_offset |= (skb_network_offset(skb) >> 1) <<
2327 I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
2328
2329 /* Enable L4 checksum offloads */
2330 switch (l4_hdr) {
2331 case IPPROTO_TCP:
2332 /* enable checksum offloads */
2333 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
2334 *td_offset |= (this_tcp_hdrlen >> 2) <<
2335 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2336 break;
2337 case IPPROTO_SCTP:
2338 /* enable SCTP checksum offload */
2339 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
2340 *td_offset |= (sizeof(struct sctphdr) >> 2) <<
2341 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2342 break;
2343 case IPPROTO_UDP:
2344 /* enable UDP checksum offload */
2345 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
2346 *td_offset |= (sizeof(struct udphdr) >> 2) <<
2347 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2348 break;
2349 default:
2350 break;
2351 }
2352}
2353
2354/**
2355 * i40e_create_tx_ctx Build the Tx context descriptor
2356 * @tx_ring: ring to create the descriptor on
2357 * @cd_type_cmd_tso_mss: Quad Word 1
2358 * @cd_tunneling: Quad Word 0 - bits 0-31
2359 * @cd_l2tag2: Quad Word 0 - bits 32-63
2360 **/
2361static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
2362 const u64 cd_type_cmd_tso_mss,
2363 const u32 cd_tunneling, const u32 cd_l2tag2)
2364{
2365 struct i40e_tx_context_desc *context_desc;
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002366 int i = tx_ring->next_to_use;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002367
Jesse Brandeburgff40dd52014-02-14 02:14:41 +00002368 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
2369 !cd_tunneling && !cd_l2tag2)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002370 return;
2371
2372 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002373 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
2374
2375 i++;
2376 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002377
2378 /* cpu_to_le32 and assign to struct fields */
2379 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2380 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
Jesse Brandeburg3efbbb22014-06-04 20:41:54 +00002381 context_desc->rsvd = cpu_to_le16(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002382 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2383}
2384
2385/**
Eric Dumazet4567dc12014-10-07 13:30:23 -07002386 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2387 * @tx_ring: the ring to be checked
2388 * @size: the size buffer we want to assure is available
2389 *
2390 * Returns -EBUSY if a stop is needed, else 0
2391 **/
2392static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2393{
2394 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2395 /* Memory barrier before checking head and tail */
2396 smp_mb();
2397
2398 /* Check again in a case another CPU has just made room available. */
2399 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2400 return -EBUSY;
2401
2402 /* A reprieve! - use start_queue because it doesn't call schedule */
2403 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2404 ++tx_ring->tx_stats.restart_queue;
2405 return 0;
2406}
2407
2408/**
2409 * i40e_maybe_stop_tx - 1st level check for tx stop conditions
2410 * @tx_ring: the ring to be checked
2411 * @size: the size buffer we want to assure is available
2412 *
2413 * Returns 0 if stop is not needed
2414 **/
2415#ifdef I40E_FCOE
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002416inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
Eric Dumazet4567dc12014-10-07 13:30:23 -07002417#else
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002418static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
Eric Dumazet4567dc12014-10-07 13:30:23 -07002419#endif
2420{
2421 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
2422 return 0;
2423 return __i40e_maybe_stop_tx(tx_ring, size);
2424}
2425
2426/**
Anjali Singhai71da6192015-02-21 06:42:35 +00002427 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
2428 * @skb: send buffer
2429 * @tx_flags: collected send information
Anjali Singhai71da6192015-02-21 06:42:35 +00002430 *
2431 * Note: Our HW can't scatter-gather more than 8 fragments to build
2432 * a packet on the wire and so we need to figure out the cases where we
2433 * need to linearize the skb.
2434 **/
Anjali Singhai Jain30520832015-05-08 15:35:52 -07002435static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
Anjali Singhai71da6192015-02-21 06:42:35 +00002436{
2437 struct skb_frag_struct *frag;
2438 bool linearize = false;
2439 unsigned int size = 0;
2440 u16 num_frags;
2441 u16 gso_segs;
2442
2443 num_frags = skb_shinfo(skb)->nr_frags;
2444 gso_segs = skb_shinfo(skb)->gso_segs;
2445
2446 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
Anjali Singhai Jain30520832015-05-08 15:35:52 -07002447 u16 j = 0;
Anjali Singhai71da6192015-02-21 06:42:35 +00002448
2449 if (num_frags < (I40E_MAX_BUFFER_TXD))
2450 goto linearize_chk_done;
2451 /* try the simple math, if we have too many frags per segment */
2452 if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
2453 I40E_MAX_BUFFER_TXD) {
2454 linearize = true;
2455 goto linearize_chk_done;
2456 }
2457 frag = &skb_shinfo(skb)->frags[0];
Anjali Singhai71da6192015-02-21 06:42:35 +00002458 /* we might still have more fragments per segment */
2459 do {
2460 size += skb_frag_size(frag);
2461 frag++; j++;
Anjali Singhai Jain30520832015-05-08 15:35:52 -07002462 if ((size >= skb_shinfo(skb)->gso_size) &&
2463 (j < I40E_MAX_BUFFER_TXD)) {
2464 size = (size % skb_shinfo(skb)->gso_size);
2465 j = (size) ? 1 : 0;
2466 }
Anjali Singhai71da6192015-02-21 06:42:35 +00002467 if (j == I40E_MAX_BUFFER_TXD) {
Anjali Singhai Jain30520832015-05-08 15:35:52 -07002468 linearize = true;
2469 break;
Anjali Singhai71da6192015-02-21 06:42:35 +00002470 }
2471 num_frags--;
2472 } while (num_frags);
2473 } else {
2474 if (num_frags >= I40E_MAX_BUFFER_TXD)
2475 linearize = true;
2476 }
2477
2478linearize_chk_done:
2479 return linearize;
2480}
2481
2482/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002483 * i40e_tx_map - Build the Tx descriptor
2484 * @tx_ring: ring to send buffer on
2485 * @skb: send buffer
2486 * @first: first buffer info buffer to use
2487 * @tx_flags: collected send information
2488 * @hdr_len: size of the packet header
2489 * @td_cmd: the command field in the descriptor
2490 * @td_offset: offset for checksum or crc
2491 **/
Vasu Dev38e00432014-08-01 13:27:03 -07002492#ifdef I40E_FCOE
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002493inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002494 struct i40e_tx_buffer *first, u32 tx_flags,
2495 const u8 hdr_len, u32 td_cmd, u32 td_offset)
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002496#else
2497static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2498 struct i40e_tx_buffer *first, u32 tx_flags,
2499 const u8 hdr_len, u32 td_cmd, u32 td_offset)
Vasu Dev38e00432014-08-01 13:27:03 -07002500#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002501{
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002502 unsigned int data_len = skb->data_len;
2503 unsigned int size = skb_headlen(skb);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002504 struct skb_frag_struct *frag;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002505 struct i40e_tx_buffer *tx_bi;
2506 struct i40e_tx_desc *tx_desc;
Alexander Duycka5e9c572013-09-28 06:00:27 +00002507 u16 i = tx_ring->next_to_use;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002508 u32 td_tag = 0;
2509 dma_addr_t dma;
2510 u16 gso_segs;
Anjali Singhai58044742015-09-25 18:26:13 -07002511 u16 desc_count = 0;
2512 bool tail_bump = true;
2513 bool do_rs = false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002514
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002515 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
2516 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
2517 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
2518 I40E_TX_FLAGS_VLAN_SHIFT;
2519 }
2520
Alexander Duycka5e9c572013-09-28 06:00:27 +00002521 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
2522 gso_segs = skb_shinfo(skb)->gso_segs;
2523 else
2524 gso_segs = 1;
2525
2526 /* multiply data chunks by size of headers */
2527 first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
2528 first->gso_segs = gso_segs;
2529 first->skb = skb;
2530 first->tx_flags = tx_flags;
2531
2532 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2533
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002534 tx_desc = I40E_TX_DESC(tx_ring, i);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002535 tx_bi = first;
2536
2537 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2538 if (dma_mapping_error(tx_ring->dev, dma))
2539 goto dma_error;
2540
2541 /* record length, and DMA address */
2542 dma_unmap_len_set(tx_bi, len, size);
2543 dma_unmap_addr_set(tx_bi, dma, dma);
2544
2545 tx_desc->buffer_addr = cpu_to_le64(dma);
2546
2547 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002548 tx_desc->cmd_type_offset_bsz =
2549 build_ctob(td_cmd, td_offset,
2550 I40E_MAX_DATA_PER_TXD, td_tag);
2551
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002552 tx_desc++;
2553 i++;
Anjali Singhai58044742015-09-25 18:26:13 -07002554 desc_count++;
2555
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002556 if (i == tx_ring->count) {
2557 tx_desc = I40E_TX_DESC(tx_ring, 0);
2558 i = 0;
2559 }
Alexander Duycka5e9c572013-09-28 06:00:27 +00002560
2561 dma += I40E_MAX_DATA_PER_TXD;
2562 size -= I40E_MAX_DATA_PER_TXD;
2563
2564 tx_desc->buffer_addr = cpu_to_le64(dma);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002565 }
2566
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002567 if (likely(!data_len))
2568 break;
2569
Alexander Duycka5e9c572013-09-28 06:00:27 +00002570 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2571 size, td_tag);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002572
2573 tx_desc++;
2574 i++;
Anjali Singhai58044742015-09-25 18:26:13 -07002575 desc_count++;
2576
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002577 if (i == tx_ring->count) {
2578 tx_desc = I40E_TX_DESC(tx_ring, 0);
2579 i = 0;
2580 }
2581
Alexander Duycka5e9c572013-09-28 06:00:27 +00002582 size = skb_frag_size(frag);
2583 data_len -= size;
2584
2585 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2586 DMA_TO_DEVICE);
2587
2588 tx_bi = &tx_ring->tx_bi[i];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002589 }
2590
Alexander Duycka5e9c572013-09-28 06:00:27 +00002591 /* set next_to_watch value indicating a packet is present */
2592 first->next_to_watch = tx_desc;
2593
2594 i++;
2595 if (i == tx_ring->count)
2596 i = 0;
2597
2598 tx_ring->next_to_use = i;
2599
Anjali Singhai58044742015-09-25 18:26:13 -07002600 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
2601 tx_ring->queue_index),
2602 first->bytecount);
Eric Dumazet4567dc12014-10-07 13:30:23 -07002603 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
Anjali Singhai58044742015-09-25 18:26:13 -07002604
2605 /* Algorithm to optimize tail and RS bit setting:
2606 * if xmit_more is supported
2607 * if xmit_more is true
2608 * do not update tail and do not mark RS bit.
2609 * if xmit_more is false and last xmit_more was false
2610 * if every packet spanned less than 4 desc
2611 * then set RS bit on 4th packet and update tail
2612 * on every packet
2613 * else
2614 * update tail and set RS bit on every packet.
2615 * if xmit_more is false and last_xmit_more was true
2616 * update tail and set RS bit.
2617 *
2618 * Optimization: wmb to be issued only in case of tail update.
2619 * Also optimize the Descriptor WB path for RS bit with the same
2620 * algorithm.
2621 *
2622 * Note: If there are less than 4 packets
2623 * pending and interrupts were disabled the service task will
2624 * trigger a force WB.
2625 */
2626 if (skb->xmit_more &&
2627 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2628 tx_ring->queue_index))) {
2629 tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2630 tail_bump = false;
2631 } else if (!skb->xmit_more &&
2632 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2633 tx_ring->queue_index)) &&
2634 (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
2635 (tx_ring->packet_stride < WB_STRIDE) &&
2636 (desc_count < WB_STRIDE)) {
2637 tx_ring->packet_stride++;
2638 } else {
2639 tx_ring->packet_stride = 0;
2640 tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2641 do_rs = true;
2642 }
2643 if (do_rs)
2644 tx_ring->packet_stride = 0;
2645
2646 tx_desc->cmd_type_offset_bsz =
2647 build_ctob(td_cmd, td_offset, size, td_tag) |
2648 cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
2649 I40E_TX_DESC_CMD_EOP) <<
2650 I40E_TXD_QW1_CMD_SHIFT);
2651
Alexander Duycka5e9c572013-09-28 06:00:27 +00002652 /* notify HW of packet */
Anjali Singhai58044742015-09-25 18:26:13 -07002653 if (!tail_bump)
Jesse Brandeburg489ce7a2015-04-27 14:57:08 -04002654 prefetchw(tx_desc + 1);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002655
Anjali Singhai58044742015-09-25 18:26:13 -07002656 if (tail_bump) {
2657 /* Force memory writes to complete before letting h/w
2658 * know there are new descriptors to fetch. (Only
2659 * applicable for weak-ordered memory model archs,
2660 * such as IA-64).
2661 */
2662 wmb();
2663 writel(i, tx_ring->tail);
2664 }
2665
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002666 return;
2667
2668dma_error:
Alexander Duycka5e9c572013-09-28 06:00:27 +00002669 dev_info(tx_ring->dev, "TX DMA map failed\n");
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002670
2671 /* clear dma mappings for failed tx_bi map */
2672 for (;;) {
2673 tx_bi = &tx_ring->tx_bi[i];
Alexander Duycka5e9c572013-09-28 06:00:27 +00002674 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002675 if (tx_bi == first)
2676 break;
2677 if (i == 0)
2678 i = tx_ring->count;
2679 i--;
2680 }
2681
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002682 tx_ring->next_to_use = i;
2683}
2684
2685/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002686 * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
2687 * @skb: send buffer
2688 * @tx_ring: ring to send buffer on
2689 *
2690 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
2691 * there is not enough descriptors available in this ring since we need at least
2692 * one descriptor.
2693 **/
Vasu Dev38e00432014-08-01 13:27:03 -07002694#ifdef I40E_FCOE
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002695inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002696 struct i40e_ring *tx_ring)
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002697#else
2698static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
2699 struct i40e_ring *tx_ring)
Vasu Dev38e00432014-08-01 13:27:03 -07002700#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002701{
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002702 unsigned int f;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002703 int count = 0;
2704
2705 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2706 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
Jesse Brandeburgbe560522014-02-06 05:51:13 +00002707 * + 4 desc gap to avoid the cache line where head is,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002708 * + 1 desc for context descriptor,
2709 * otherwise try next time
2710 */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002711 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2712 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
Jesse Brandeburg980093e2014-05-10 04:49:12 +00002713
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002714 count += TXD_USE_COUNT(skb_headlen(skb));
Jesse Brandeburgbe560522014-02-06 05:51:13 +00002715 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002716 tx_ring->tx_stats.tx_busy++;
2717 return 0;
2718 }
2719 return count;
2720}
2721
2722/**
2723 * i40e_xmit_frame_ring - Sends buffer on Tx ring
2724 * @skb: send buffer
2725 * @tx_ring: ring to send buffer on
2726 *
2727 * Returns NETDEV_TX_OK if sent, else an error code
2728 **/
2729static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2730 struct i40e_ring *tx_ring)
2731{
2732 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
2733 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2734 struct i40e_tx_buffer *first;
2735 u32 td_offset = 0;
2736 u32 tx_flags = 0;
2737 __be16 protocol;
2738 u32 td_cmd = 0;
2739 u8 hdr_len = 0;
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002740 int tsyn;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002741 int tso;
2742 if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
2743 return NETDEV_TX_BUSY;
2744
2745 /* prepare the xmit flags */
2746 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2747 goto out_drop;
2748
2749 /* obtain protocol of skb */
Vlad Yasevich3d34dd02014-08-25 10:34:52 -04002750 protocol = vlan_get_protocol(skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002751
2752 /* record the location of the first descriptor for this packet */
2753 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2754
2755 /* setup IPv4/IPv6 offloads */
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00002756 if (protocol == htons(ETH_P_IP))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002757 tx_flags |= I40E_TX_FLAGS_IPV4;
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00002758 else if (protocol == htons(ETH_P_IPV6))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002759 tx_flags |= I40E_TX_FLAGS_IPV6;
2760
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002761 tso = i40e_tso(tx_ring, skb, &hdr_len,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002762 &cd_type_cmd_tso_mss, &cd_tunneling);
2763
2764 if (tso < 0)
2765 goto out_drop;
2766 else if (tso)
2767 tx_flags |= I40E_TX_FLAGS_TSO;
2768
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002769 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
2770
2771 if (tsyn)
2772 tx_flags |= I40E_TX_FLAGS_TSYN;
2773
Anjali Singhai Jain2fc3d712015-08-27 11:42:29 -04002774 if (i40e_chk_linearize(skb, tx_flags)) {
Anjali Singhai71da6192015-02-21 06:42:35 +00002775 if (skb_linearize(skb))
2776 goto out_drop;
Anjali Singhai Jain2fc3d712015-08-27 11:42:29 -04002777 tx_ring->tx_stats.tx_linearize++;
2778 }
Jakub Kicinski259afec2014-03-15 14:55:37 +00002779 skb_tx_timestamp(skb);
2780
Alexander Duyckb1941302013-09-28 06:00:32 +00002781 /* always enable CRC insertion offload */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002782 td_cmd |= I40E_TX_DESC_CMD_ICRC;
2783
Alexander Duyckb1941302013-09-28 06:00:32 +00002784 /* Always offload the checksum, since it's in the data descriptor */
2785 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2786 tx_flags |= I40E_TX_FLAGS_CSUM;
2787
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002788 i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002789 tx_ring, &cd_tunneling);
Alexander Duyckb1941302013-09-28 06:00:32 +00002790 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002791
2792 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2793 cd_tunneling, cd_l2tag2);
2794
2795 /* Add Flow Director ATR if it's enabled.
2796 *
2797 * NOTE: this must always be directly before the data descriptor.
2798 */
2799 i40e_atr(tx_ring, skb, tx_flags, protocol);
2800
2801 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2802 td_cmd, td_offset);
2803
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002804 return NETDEV_TX_OK;
2805
2806out_drop:
2807 dev_kfree_skb_any(skb);
2808 return NETDEV_TX_OK;
2809}
2810
2811/**
2812 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2813 * @skb: send buffer
2814 * @netdev: network interface device structure
2815 *
2816 * Returns NETDEV_TX_OK if sent, else an error code
2817 **/
2818netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2819{
2820 struct i40e_netdev_priv *np = netdev_priv(netdev);
2821 struct i40e_vsi *vsi = np->vsi;
Alexander Duyck9f65e152013-09-28 06:00:58 +00002822 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002823
2824 /* hardware can't handle really short frames, hardware padding works
2825 * beyond this point
2826 */
Alexander Duycka94d9e22014-12-03 08:17:39 -08002827 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
2828 return NETDEV_TX_OK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002829
2830 return i40e_xmit_frame_ring(skb, tx_ring);
2831}