blob: 797bcdd3504e13e04dee8719edc0a4b8af65501b [file] [log] [blame]
Jeff Kirsherae06c702018-03-22 10:08:48 -07001// SPDX-License-Identifier: GPL-2.0
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002/*******************************************************************************
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -08005 * Copyright(c) 2013 - 2016 Intel Corporation.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00006 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
Greg Rosedc641b72013-12-18 13:45:51 +000016 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000018 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
Mitch Williams1c112a62014-04-04 04:43:06 +000028#include <linux/prefetch.h>
Mitch Williamsa132af22015-01-24 09:58:35 +000029#include <net/busy_poll.h>
Björn Töpel0c8493d2017-05-24 07:55:34 +020030#include <linux/bpf_trace.h>
Jesper Dangaard Brouer87128822018-01-03 11:25:23 +010031#include <net/xdp.h>
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000032#include "i40e.h"
Scott Petersoned0980c2017-04-13 04:45:44 -040033#include "i40e_trace.h"
Jesse Brandeburg206812b2014-02-12 01:45:33 +000034#include "i40e_prototype.h"
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000035
36static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
37 u32 td_tag)
38{
39 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
40 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
41 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
42 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
43 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
44}
45
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +000046#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
Alexander Duyck5e02f282016-09-12 14:18:41 -070047/**
48 * i40e_fdir - Generate a Flow Director descriptor based on fdata
49 * @tx_ring: Tx ring to send buffer on
50 * @fdata: Flow director filter data
51 * @add: Indicate if we are adding a rule or deleting one
52 *
53 **/
54static void i40e_fdir(struct i40e_ring *tx_ring,
55 struct i40e_fdir_filter *fdata, bool add)
56{
57 struct i40e_filter_program_desc *fdir_desc;
58 struct i40e_pf *pf = tx_ring->vsi->back;
59 u32 flex_ptype, dtype_cmd;
60 u16 i;
61
62 /* grab the next descriptor */
63 i = tx_ring->next_to_use;
64 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
65
66 i++;
67 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
68
69 flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
70 (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
71
72 flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
73 (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
74
75 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
76 (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
77
Jacob Keller0e588de2017-02-06 14:38:50 -080078 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
79 (fdata->flex_offset << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
80
Alexander Duyck5e02f282016-09-12 14:18:41 -070081 /* Use LAN VSI Id if not programmed by user */
82 flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
83 ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
84 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
85
86 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
87
88 dtype_cmd |= add ?
89 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
90 I40E_TXD_FLTR_QW1_PCMD_SHIFT :
91 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
92 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
93
94 dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
95 (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
96
97 dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
98 (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
99
100 if (fdata->cnt_index) {
101 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
102 dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
103 ((u32)fdata->cnt_index <<
104 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
105 }
106
107 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
108 fdir_desc->rsvd = cpu_to_le32(0);
109 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
110 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
111}
112
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000113#define I40E_FD_CLEAN_DELAY 10
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000114/**
115 * i40e_program_fdir_filter - Program a Flow Director filter
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000116 * @fdir_data: Packet data that will be filter parameters
117 * @raw_packet: the pre-allocated packet buffer for FDir
Jeff Kirsherb40c82e62015-02-27 09:18:34 +0000118 * @pf: The PF pointer
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000119 * @add: True for add/update, False for remove
120 **/
Alexander Duyck1eb846a2016-09-12 14:18:42 -0700121static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
122 u8 *raw_packet, struct i40e_pf *pf,
123 bool add)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000124{
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000125 struct i40e_tx_buffer *tx_buf, *first;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000126 struct i40e_tx_desc *tx_desc;
127 struct i40e_ring *tx_ring;
128 struct i40e_vsi *vsi;
129 struct device *dev;
130 dma_addr_t dma;
131 u32 td_cmd = 0;
132 u16 i;
133
134 /* find existing FDIR VSI */
Alexander Duyck4b816442016-10-11 15:26:53 -0700135 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000136 if (!vsi)
137 return -ENOENT;
138
Alexander Duyck9f65e152013-09-28 06:00:58 +0000139 tx_ring = vsi->tx_rings[0];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000140 dev = tx_ring->dev;
141
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000142 /* we need two descriptors to add/del a filter and we can wait */
Alexander Duycked245402016-09-14 16:24:32 -0700143 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
144 if (!i)
145 return -EAGAIN;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000146 msleep_interruptible(1);
Alexander Duycked245402016-09-14 16:24:32 -0700147 }
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000148
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000149 dma = dma_map_single(dev, raw_packet,
150 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000151 if (dma_mapping_error(dev, dma))
152 goto dma_fail;
153
154 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000155 i = tx_ring->next_to_use;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000156 first = &tx_ring->tx_bi[i];
Alexander Duyck5e02f282016-09-12 14:18:41 -0700157 i40e_fdir(tx_ring, fdir_data, add);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000158
159 /* Now program a dummy descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000160 i = tx_ring->next_to_use;
161 tx_desc = I40E_TX_DESC(tx_ring, i);
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000162 tx_buf = &tx_ring->tx_bi[i];
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000163
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000164 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
165
166 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000167
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000168 /* record length, and DMA address */
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000169 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000170 dma_unmap_addr_set(tx_buf, dma, dma);
171
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000172 tx_desc->buffer_addr = cpu_to_le64(dma);
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000173 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000174
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000175 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
176 tx_buf->raw_buf = (void *)raw_packet;
177
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000178 tx_desc->cmd_type_offset_bsz =
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000179 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000180
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000181 /* Force memory writes to complete before letting h/w
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000182 * know there are new descriptors to fetch.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000183 */
184 wmb();
185
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000186 /* Mark the data descriptor to be watched */
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000187 first->next_to_watch = tx_desc;
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000188
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000189 writel(tx_ring->next_to_use, tx_ring->tail);
190 return 0;
191
192dma_fail:
193 return -1;
194}
195
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000196#define IP_HEADER_OFFSET 14
197#define I40E_UDPIP_DUMMY_PACKET_LEN 42
198/**
199 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
200 * @vsi: pointer to the targeted VSI
201 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000202 * @add: true adds a filter, false removes it
203 *
204 * Returns 0 if the filters were successfully added or removed
205 **/
206static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
207 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000208 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000209{
210 struct i40e_pf *pf = vsi->back;
211 struct udphdr *udp;
212 struct iphdr *ip;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000213 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000214 int ret;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000215 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
216 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
217 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
218
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000219 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
220 if (!raw_packet)
221 return -ENOMEM;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000222 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
223
224 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
225 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
226 + sizeof(struct iphdr));
227
Jacob Keller8ce43dc2017-02-06 14:38:39 -0800228 ip->daddr = fd_data->dst_ip;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000229 udp->dest = fd_data->dst_port;
Jacob Keller8ce43dc2017-02-06 14:38:39 -0800230 ip->saddr = fd_data->src_ip;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000231 udp->source = fd_data->src_port;
232
Jacob Keller0e588de2017-02-06 14:38:50 -0800233 if (fd_data->flex_filter) {
234 u8 *payload = raw_packet + I40E_UDPIP_DUMMY_PACKET_LEN;
235 __be16 pattern = fd_data->flex_word;
236 u16 off = fd_data->flex_offset;
237
238 *((__force __be16 *)(payload + off)) = pattern;
239 }
240
Kevin Scottb2d36c02014-04-09 05:58:59 +0000241 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
242 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
243 if (ret) {
244 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000245 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
246 fd_data->pctype, fd_data->fd_id, ret);
Jacob Kellere5187ee2017-02-06 14:38:41 -0800247 /* Free the packet buffer since it wasn't added to the ring */
248 kfree(raw_packet);
249 return -EOPNOTSUPP;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000250 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000251 if (add)
252 dev_info(&pf->pdev->dev,
253 "Filter OK for PCTYPE %d loc = %d\n",
254 fd_data->pctype, fd_data->fd_id);
255 else
256 dev_info(&pf->pdev->dev,
257 "Filter deleted for PCTYPE %d loc = %d\n",
258 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000259 }
Kiran Patila42e7a32015-11-06 15:26:03 -0800260
Jacob Keller097dbf52017-02-06 14:38:46 -0800261 if (add)
262 pf->fd_udp4_filter_cnt++;
263 else
264 pf->fd_udp4_filter_cnt--;
265
Jacob Kellere5187ee2017-02-06 14:38:41 -0800266 return 0;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000267}
268
269#define I40E_TCPIP_DUMMY_PACKET_LEN 54
270/**
271 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
272 * @vsi: pointer to the targeted VSI
273 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000274 * @add: true adds a filter, false removes it
275 *
276 * Returns 0 if the filters were successfully added or removed
277 **/
278static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
279 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000280 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000281{
282 struct i40e_pf *pf = vsi->back;
283 struct tcphdr *tcp;
284 struct iphdr *ip;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000285 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000286 int ret;
287 /* Dummy packet */
288 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
289 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
290 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
291 0x0, 0x72, 0, 0, 0, 0};
292
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000293 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
294 if (!raw_packet)
295 return -ENOMEM;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000296 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
297
298 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
299 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
300 + sizeof(struct iphdr));
301
Jacob Keller8ce43dc2017-02-06 14:38:39 -0800302 ip->daddr = fd_data->dst_ip;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000303 tcp->dest = fd_data->dst_port;
Jacob Keller8ce43dc2017-02-06 14:38:39 -0800304 ip->saddr = fd_data->src_ip;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000305 tcp->source = fd_data->src_port;
306
Jacob Keller0e588de2017-02-06 14:38:50 -0800307 if (fd_data->flex_filter) {
308 u8 *payload = raw_packet + I40E_TCPIP_DUMMY_PACKET_LEN;
309 __be16 pattern = fd_data->flex_word;
310 u16 off = fd_data->flex_offset;
311
312 *((__force __be16 *)(payload + off)) = pattern;
313 }
314
Kevin Scottb2d36c02014-04-09 05:58:59 +0000315 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000316 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000317 if (ret) {
318 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000319 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
320 fd_data->pctype, fd_data->fd_id, ret);
Jacob Kellere5187ee2017-02-06 14:38:41 -0800321 /* Free the packet buffer since it wasn't added to the ring */
322 kfree(raw_packet);
323 return -EOPNOTSUPP;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000324 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000325 if (add)
326 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
327 fd_data->pctype, fd_data->fd_id);
328 else
329 dev_info(&pf->pdev->dev,
330 "Filter deleted for PCTYPE %d loc = %d\n",
331 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000332 }
333
Jacob Keller377cc242017-02-06 14:38:42 -0800334 if (add) {
Jacob Keller097dbf52017-02-06 14:38:46 -0800335 pf->fd_tcp4_filter_cnt++;
Jacob Keller377cc242017-02-06 14:38:42 -0800336 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
337 I40E_DEBUG_FD & pf->hw.debug_mask)
338 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
Jacob Keller134201a2018-03-16 01:26:32 -0700339 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
Jacob Keller377cc242017-02-06 14:38:42 -0800340 } else {
Jacob Keller097dbf52017-02-06 14:38:46 -0800341 pf->fd_tcp4_filter_cnt--;
Jacob Keller377cc242017-02-06 14:38:42 -0800342 }
343
Jacob Kellere5187ee2017-02-06 14:38:41 -0800344 return 0;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000345}
346
Jacob Kellerf223c872017-02-06 14:38:51 -0800347#define I40E_SCTPIP_DUMMY_PACKET_LEN 46
348/**
349 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
350 * a specific flow spec
351 * @vsi: pointer to the targeted VSI
352 * @fd_data: the flow director data required for the FDir descriptor
353 * @add: true adds a filter, false removes it
354 *
355 * Returns 0 if the filters were successfully added or removed
356 **/
357static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
358 struct i40e_fdir_filter *fd_data,
359 bool add)
360{
361 struct i40e_pf *pf = vsi->back;
362 struct sctphdr *sctp;
363 struct iphdr *ip;
364 u8 *raw_packet;
365 int ret;
366 /* Dummy packet */
367 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
368 0x45, 0, 0, 0x20, 0, 0, 0x40, 0, 0x40, 0x84, 0, 0, 0, 0, 0, 0,
369 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
370
371 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
372 if (!raw_packet)
373 return -ENOMEM;
374 memcpy(raw_packet, packet, I40E_SCTPIP_DUMMY_PACKET_LEN);
375
376 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
377 sctp = (struct sctphdr *)(raw_packet + IP_HEADER_OFFSET
378 + sizeof(struct iphdr));
379
380 ip->daddr = fd_data->dst_ip;
381 sctp->dest = fd_data->dst_port;
382 ip->saddr = fd_data->src_ip;
383 sctp->source = fd_data->src_port;
384
385 if (fd_data->flex_filter) {
386 u8 *payload = raw_packet + I40E_SCTPIP_DUMMY_PACKET_LEN;
387 __be16 pattern = fd_data->flex_word;
388 u16 off = fd_data->flex_offset;
389
390 *((__force __be16 *)(payload + off)) = pattern;
391 }
392
393 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
394 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
395 if (ret) {
396 dev_info(&pf->pdev->dev,
397 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
398 fd_data->pctype, fd_data->fd_id, ret);
399 /* Free the packet buffer since it wasn't added to the ring */
400 kfree(raw_packet);
401 return -EOPNOTSUPP;
402 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
403 if (add)
404 dev_info(&pf->pdev->dev,
405 "Filter OK for PCTYPE %d loc = %d\n",
406 fd_data->pctype, fd_data->fd_id);
407 else
408 dev_info(&pf->pdev->dev,
409 "Filter deleted for PCTYPE %d loc = %d\n",
410 fd_data->pctype, fd_data->fd_id);
411 }
412
413 if (add)
414 pf->fd_sctp4_filter_cnt++;
415 else
416 pf->fd_sctp4_filter_cnt--;
417
418 return 0;
419}
420
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000421#define I40E_IP_DUMMY_PACKET_LEN 34
422/**
423 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
424 * a specific flow spec
425 * @vsi: pointer to the targeted VSI
426 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000427 * @add: true adds a filter, false removes it
428 *
429 * Returns 0 if the filters were successfully added or removed
430 **/
431static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
432 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000433 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000434{
435 struct i40e_pf *pf = vsi->back;
436 struct iphdr *ip;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000437 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000438 int ret;
439 int i;
440 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
441 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
442 0, 0, 0, 0};
443
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000444 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
445 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000446 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
447 if (!raw_packet)
448 return -ENOMEM;
449 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
450 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
451
Jacob Keller8ce43dc2017-02-06 14:38:39 -0800452 ip->saddr = fd_data->src_ip;
453 ip->daddr = fd_data->dst_ip;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000454 ip->protocol = 0;
455
Jacob Keller0e588de2017-02-06 14:38:50 -0800456 if (fd_data->flex_filter) {
457 u8 *payload = raw_packet + I40E_IP_DUMMY_PACKET_LEN;
458 __be16 pattern = fd_data->flex_word;
459 u16 off = fd_data->flex_offset;
460
461 *((__force __be16 *)(payload + off)) = pattern;
462 }
463
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000464 fd_data->pctype = i;
465 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000466 if (ret) {
467 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000468 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
469 fd_data->pctype, fd_data->fd_id, ret);
Jacob Kellere5187ee2017-02-06 14:38:41 -0800470 /* The packet buffer wasn't added to the ring so we
471 * need to free it now.
472 */
473 kfree(raw_packet);
474 return -EOPNOTSUPP;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000475 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000476 if (add)
477 dev_info(&pf->pdev->dev,
478 "Filter OK for PCTYPE %d loc = %d\n",
479 fd_data->pctype, fd_data->fd_id);
480 else
481 dev_info(&pf->pdev->dev,
482 "Filter deleted for PCTYPE %d loc = %d\n",
483 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000484 }
485 }
486
Jacob Keller097dbf52017-02-06 14:38:46 -0800487 if (add)
488 pf->fd_ip4_filter_cnt++;
489 else
490 pf->fd_ip4_filter_cnt--;
491
Jacob Kellere5187ee2017-02-06 14:38:41 -0800492 return 0;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000493}
494
495/**
496 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
497 * @vsi: pointer to the targeted VSI
498 * @cmd: command to get or set RX flow classification rules
499 * @add: true adds a filter, false removes it
500 *
501 **/
502int i40e_add_del_fdir(struct i40e_vsi *vsi,
503 struct i40e_fdir_filter *input, bool add)
504{
505 struct i40e_pf *pf = vsi->back;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000506 int ret;
507
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000508 switch (input->flow_type & ~FLOW_EXT) {
509 case TCP_V4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000510 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000511 break;
512 case UDP_V4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000513 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000514 break;
Jacob Kellerf223c872017-02-06 14:38:51 -0800515 case SCTP_V4_FLOW:
516 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
517 break;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000518 case IP_USER_FLOW:
519 switch (input->ip4_proto) {
520 case IPPROTO_TCP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000521 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000522 break;
523 case IPPROTO_UDP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000524 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000525 break;
Jacob Kellerf223c872017-02-06 14:38:51 -0800526 case IPPROTO_SCTP:
527 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
528 break;
Alexander Duycke1da71c2016-09-14 16:24:35 -0700529 case IPPROTO_IP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000530 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000531 break;
Alexander Duycke1da71c2016-09-14 16:24:35 -0700532 default:
533 /* We cannot support masking based on protocol */
Jacob Kellera346fb82017-04-05 07:50:53 -0400534 dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
535 input->ip4_proto);
536 return -EINVAL;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000537 }
538 break;
539 default:
Jacob Kellera346fb82017-04-05 07:50:53 -0400540 dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n",
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000541 input->flow_type);
Jacob Kellera346fb82017-04-05 07:50:53 -0400542 return -EINVAL;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000543 }
544
Jacob Kellera158aea2017-02-09 23:44:27 -0800545 /* The buffer allocated here will be normally be freed by
546 * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit
547 * completion. In the event of an error adding the buffer to the FDIR
548 * ring, it will immediately be freed. It may also be freed by
549 * i40e_clean_tx_ring() when closing the VSI.
550 */
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000551 return ret;
552}
553
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000554/**
555 * i40e_fd_handle_status - check the Programming Status for FD
556 * @rx_ring: the Rx ring for this descriptor
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000557 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000558 * @prog_id: the id originally used for programming
559 *
560 * This is used to verify if the FD programming or invalidation
561 * requested by SW to the HW is successful or not and take actions accordingly.
562 **/
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000563static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
564 union i40e_rx_desc *rx_desc, u8 prog_id)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000565{
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000566 struct i40e_pf *pf = rx_ring->vsi->back;
567 struct pci_dev *pdev = pf->pdev;
568 u32 fcnt_prog, fcnt_avail;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000569 u32 error;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000570 u64 qw;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000571
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000572 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000573 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
574 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
575
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400576 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
Carolyn Wyborny3487b6c2015-08-27 11:42:38 -0400577 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000578 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
579 (I40E_DEBUG_FD & pf->hw.debug_mask))
580 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
Carolyn Wyborny3487b6c2015-08-27 11:42:38 -0400581 pf->fd_inv);
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000582
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000583 /* Check if the programming error is for ATR.
584 * If so, auto disable ATR and set a state for
585 * flush in progress. Next time we come here if flush is in
586 * progress do nothing, once flush is complete the state will
587 * be cleared.
588 */
Jacob Keller0da36b92017-04-19 09:25:55 -0400589 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000590 return;
591
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000592 pf->fd_add_err++;
593 /* store the current atr filter count */
594 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
595
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000596 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
Jacob Keller134201a2018-03-16 01:26:32 -0700597 test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) {
598 /* These set_bit() calls aren't atomic with the
599 * test_bit() here, but worse case we potentially
600 * disable ATR and queue a flush right after SB
601 * support is re-enabled. That shouldn't cause an
602 * issue in practice
603 */
604 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
Jacob Keller0da36b92017-04-19 09:25:55 -0400605 set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000606 }
607
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000608 /* filter programming failed most likely due to table full */
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000609 fcnt_prog = i40e_get_global_fd_count(pf);
Anjali Singhai Jain12957382014-06-04 04:22:47 +0000610 fcnt_avail = pf->fdir_pf_filter_count;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000611 /* If ATR is running fcnt_prog can quickly change,
612 * if we are very close to full, it makes sense to disable
613 * FD ATR/SB and then re-enable it when there is room.
614 */
615 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000616 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
Jacob Keller134201a2018-03-16 01:26:32 -0700617 !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED,
618 pf->state))
Anjali Singhai Jain2e4875e2015-04-16 20:06:06 -0400619 if (I40E_DEBUG_FD & pf->hw.debug_mask)
620 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000621 }
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400622 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
Anjali Singhai Jain13c28842014-03-06 09:00:04 +0000623 if (I40E_DEBUG_FD & pf->hw.debug_mask)
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000624 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
Anjali Singhai Jain13c28842014-03-06 09:00:04 +0000625 rx_desc->wb.qword0.hi_dword.fd_id);
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000626 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000627}
628
629/**
Alexander Duycka5e9c572013-09-28 06:00:27 +0000630 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000631 * @ring: the ring that owns the buffer
632 * @tx_buffer: the buffer to free
633 **/
Alexander Duycka5e9c572013-09-28 06:00:27 +0000634static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
635 struct i40e_tx_buffer *tx_buffer)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000636{
Alexander Duycka5e9c572013-09-28 06:00:27 +0000637 if (tx_buffer->skb) {
Alexander Duyck64bfd682016-09-12 14:18:39 -0700638 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
639 kfree(tx_buffer->raw_buf);
Björn Töpel74608d12017-05-24 07:55:35 +0200640 else if (ring_is_xdp(ring))
641 page_frag_free(tx_buffer->raw_buf);
Alexander Duyck64bfd682016-09-12 14:18:39 -0700642 else
643 dev_kfree_skb_any(tx_buffer->skb);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000644 if (dma_unmap_len(tx_buffer, len))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000645 dma_unmap_single(ring->dev,
Alexander Duyck35a1e2a2013-09-28 06:00:17 +0000646 dma_unmap_addr(tx_buffer, dma),
647 dma_unmap_len(tx_buffer, len),
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000648 DMA_TO_DEVICE);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000649 } else if (dma_unmap_len(tx_buffer, len)) {
650 dma_unmap_page(ring->dev,
651 dma_unmap_addr(tx_buffer, dma),
652 dma_unmap_len(tx_buffer, len),
653 DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000654 }
Kiran Patila42e7a32015-11-06 15:26:03 -0800655
Alexander Duycka5e9c572013-09-28 06:00:27 +0000656 tx_buffer->next_to_watch = NULL;
657 tx_buffer->skb = NULL;
Alexander Duyck35a1e2a2013-09-28 06:00:17 +0000658 dma_unmap_len_set(tx_buffer, len, 0);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000659 /* tx_buffer must be completely set up in the transmit path */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000660}
661
662/**
663 * i40e_clean_tx_ring - Free any empty Tx buffers
664 * @tx_ring: ring to be cleaned
665 **/
666void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
667{
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000668 unsigned long bi_size;
669 u16 i;
670
671 /* ring already cleared, nothing to do */
672 if (!tx_ring->tx_bi)
673 return;
674
675 /* Free all the Tx ring sk_buffs */
Alexander Duycka5e9c572013-09-28 06:00:27 +0000676 for (i = 0; i < tx_ring->count; i++)
677 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000678
679 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
680 memset(tx_ring->tx_bi, 0, bi_size);
681
682 /* Zero out the descriptor ring */
683 memset(tx_ring->desc, 0, tx_ring->size);
684
685 tx_ring->next_to_use = 0;
686 tx_ring->next_to_clean = 0;
Alexander Duyck7070ce02013-09-28 06:00:37 +0000687
688 if (!tx_ring->netdev)
689 return;
690
691 /* cleanup Tx queue statistics */
Alexander Duycke486bdf2016-09-12 14:18:40 -0700692 netdev_tx_reset_queue(txring_txq(tx_ring));
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000693}
694
695/**
696 * i40e_free_tx_resources - Free Tx resources per queue
697 * @tx_ring: Tx descriptor ring for a specific queue
698 *
699 * Free all transmit software resources
700 **/
701void i40e_free_tx_resources(struct i40e_ring *tx_ring)
702{
703 i40e_clean_tx_ring(tx_ring);
704 kfree(tx_ring->tx_bi);
705 tx_ring->tx_bi = NULL;
706
707 if (tx_ring->desc) {
708 dma_free_coherent(tx_ring->dev, tx_ring->size,
709 tx_ring->desc, tx_ring->dma);
710 tx_ring->desc = NULL;
711 }
712}
713
Jesse Brandeburga68de582015-02-24 05:26:03 +0000714/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000715 * i40e_get_tx_pending - how many tx descriptors not processed
716 * @tx_ring: the ring of descriptors
Alan Brady04d410512018-02-12 09:16:59 -0500717 * @in_sw: use SW variables
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000718 *
719 * Since there is no access to the ring head register
720 * in XL710, we need to use our local copies
721 **/
Alan Brady04d410512018-02-12 09:16:59 -0500722u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000723{
Jesse Brandeburga68de582015-02-24 05:26:03 +0000724 u32 head, tail;
725
Alan Brady04d410512018-02-12 09:16:59 -0500726 if (!in_sw) {
727 head = i40e_get_head(ring);
728 tail = readl(ring->tail);
729 } else {
730 head = ring->next_to_clean;
731 tail = ring->next_to_use;
732 }
Jesse Brandeburga68de582015-02-24 05:26:03 +0000733
734 if (head != tail)
735 return (head < tail) ?
736 tail - head : (tail + ring->count - head);
737
738 return 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000739}
740
Sudheer Mogilappagari07d44192017-12-18 05:17:25 -0500741/**
742 * i40e_detect_recover_hung - Function to detect and recover hung_queues
743 * @vsi: pointer to vsi struct with tx queues
744 *
745 * VSI has netdev and netdev has TX queues. This function is to check each of
746 * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
747 **/
748void i40e_detect_recover_hung(struct i40e_vsi *vsi)
749{
750 struct i40e_ring *tx_ring = NULL;
751 struct net_device *netdev;
752 unsigned int i;
753 int packets;
754
755 if (!vsi)
756 return;
757
758 if (test_bit(__I40E_VSI_DOWN, vsi->state))
759 return;
760
761 netdev = vsi->netdev;
762 if (!netdev)
763 return;
764
765 if (!netif_carrier_ok(netdev))
766 return;
767
768 for (i = 0; i < vsi->num_queue_pairs; i++) {
769 tx_ring = vsi->tx_rings[i];
770 if (tx_ring && tx_ring->desc) {
771 /* If packet counter has not changed the queue is
772 * likely stalled, so force an interrupt for this
773 * queue.
774 *
775 * prev_pkt_ctr would be negative if there was no
776 * pending work.
777 */
778 packets = tx_ring->stats.packets & INT_MAX;
779 if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
780 i40e_force_wb(vsi, tx_ring->q_vector);
781 continue;
782 }
783
784 /* Memory barrier between read of packet count and call
785 * to i40e_get_tx_pending()
786 */
787 smp_rmb();
788 tx_ring->tx_stats.prev_pkt_ctr =
Alan Brady04d410512018-02-12 09:16:59 -0500789 i40e_get_tx_pending(tx_ring, true) ? packets : -1;
Sudheer Mogilappagari07d44192017-12-18 05:17:25 -0500790 }
791 }
792}
793
Alexander Duyck1dc8b532016-10-11 15:26:54 -0700794#define WB_STRIDE 4
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000795
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000796/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000797 * i40e_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duycka619afe2016-03-07 09:30:03 -0800798 * @vsi: the VSI we care about
799 * @tx_ring: Tx ring to clean
800 * @napi_budget: Used to determine if we are in netpoll
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000801 *
802 * Returns true if there's any budget left (e.g. the clean is finished)
803 **/
Alexander Duycka619afe2016-03-07 09:30:03 -0800804static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
805 struct i40e_ring *tx_ring, int napi_budget)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000806{
807 u16 i = tx_ring->next_to_clean;
808 struct i40e_tx_buffer *tx_buf;
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000809 struct i40e_tx_desc *tx_head;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000810 struct i40e_tx_desc *tx_desc;
Alexander Duycka619afe2016-03-07 09:30:03 -0800811 unsigned int total_bytes = 0, total_packets = 0;
812 unsigned int budget = vsi->work_limit;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000813
814 tx_buf = &tx_ring->tx_bi[i];
815 tx_desc = I40E_TX_DESC(tx_ring, i);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000816 i -= tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000817
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000818 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
819
Alexander Duycka5e9c572013-09-28 06:00:27 +0000820 do {
821 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000822
823 /* if next_to_watch is not set then there is no work pending */
824 if (!eop_desc)
825 break;
826
Alexander Duycka5e9c572013-09-28 06:00:27 +0000827 /* prevent any other reads prior to eop_desc */
Brian King52c69122017-11-17 11:05:44 -0600828 smp_rmb();
Alexander Duycka5e9c572013-09-28 06:00:27 +0000829
Scott Petersoned0980c2017-04-13 04:45:44 -0400830 i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000831 /* we have caught up to head, no work left to do */
832 if (tx_head == tx_desc)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000833 break;
834
Alexander Duyckc304fda2013-09-28 06:00:12 +0000835 /* clear next_to_watch to prevent false hangs */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000836 tx_buf->next_to_watch = NULL;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000837
Alexander Duycka5e9c572013-09-28 06:00:27 +0000838 /* update the statistics for this packet */
839 total_bytes += tx_buf->bytecount;
840 total_packets += tx_buf->gso_segs;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000841
Björn Töpel74608d12017-05-24 07:55:35 +0200842 /* free the skb/XDP data */
843 if (ring_is_xdp(tx_ring))
844 page_frag_free(tx_buf->raw_buf);
845 else
846 napi_consume_skb(tx_buf->skb, napi_budget);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000847
Alexander Duycka5e9c572013-09-28 06:00:27 +0000848 /* unmap skb header data */
849 dma_unmap_single(tx_ring->dev,
850 dma_unmap_addr(tx_buf, dma),
851 dma_unmap_len(tx_buf, len),
852 DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000853
Alexander Duycka5e9c572013-09-28 06:00:27 +0000854 /* clear tx_buffer data */
855 tx_buf->skb = NULL;
856 dma_unmap_len_set(tx_buf, len, 0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000857
Alexander Duycka5e9c572013-09-28 06:00:27 +0000858 /* unmap remaining buffers */
859 while (tx_desc != eop_desc) {
Scott Petersoned0980c2017-04-13 04:45:44 -0400860 i40e_trace(clean_tx_irq_unmap,
861 tx_ring, tx_desc, tx_buf);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000862
863 tx_buf++;
864 tx_desc++;
865 i++;
Alexander Duycka5e9c572013-09-28 06:00:27 +0000866 if (unlikely(!i)) {
867 i -= tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000868 tx_buf = tx_ring->tx_bi;
869 tx_desc = I40E_TX_DESC(tx_ring, 0);
870 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000871
Alexander Duycka5e9c572013-09-28 06:00:27 +0000872 /* unmap any remaining paged data */
873 if (dma_unmap_len(tx_buf, len)) {
874 dma_unmap_page(tx_ring->dev,
875 dma_unmap_addr(tx_buf, dma),
876 dma_unmap_len(tx_buf, len),
877 DMA_TO_DEVICE);
878 dma_unmap_len_set(tx_buf, len, 0);
879 }
880 }
881
882 /* move us one more past the eop_desc for start of next pkt */
883 tx_buf++;
884 tx_desc++;
885 i++;
886 if (unlikely(!i)) {
887 i -= tx_ring->count;
888 tx_buf = tx_ring->tx_bi;
889 tx_desc = I40E_TX_DESC(tx_ring, 0);
890 }
891
Jesse Brandeburg016890b2015-02-27 09:15:31 +0000892 prefetch(tx_desc);
893
Alexander Duycka5e9c572013-09-28 06:00:27 +0000894 /* update budget accounting */
895 budget--;
896 } while (likely(budget));
897
898 i += tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000899 tx_ring->next_to_clean = i;
Alexander Duyck980e9b12013-09-28 06:01:03 +0000900 u64_stats_update_begin(&tx_ring->syncp);
Alexander Duycka114d0a2013-09-28 06:00:43 +0000901 tx_ring->stats.bytes += total_bytes;
902 tx_ring->stats.packets += total_packets;
Alexander Duyck980e9b12013-09-28 06:01:03 +0000903 u64_stats_update_end(&tx_ring->syncp);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000904 tx_ring->q_vector->tx.total_bytes += total_bytes;
905 tx_ring->q_vector->tx.total_packets += total_packets;
Alexander Duycka5e9c572013-09-28 06:00:27 +0000906
Anjali Singhai58044742015-09-25 18:26:13 -0700907 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
Anjali Singhai58044742015-09-25 18:26:13 -0700908 /* check to see if there are < 4 descriptors
909 * waiting to be written back, then kick the hardware to force
910 * them to be written back in case we stay in NAPI.
911 * In this mode on X722 we do not enable Interrupt.
912 */
Alan Brady04d410512018-02-12 09:16:59 -0500913 unsigned int j = i40e_get_tx_pending(tx_ring, false);
Anjali Singhai58044742015-09-25 18:26:13 -0700914
915 if (budget &&
Alexander Duyck1dc8b532016-10-11 15:26:54 -0700916 ((j / WB_STRIDE) == 0) && (j > 0) &&
Jacob Keller0da36b92017-04-19 09:25:55 -0400917 !test_bit(__I40E_VSI_DOWN, vsi->state) &&
Anjali Singhai58044742015-09-25 18:26:13 -0700918 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
919 tx_ring->arm_wb = true;
920 }
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000921
Björn Töpel74608d12017-05-24 07:55:35 +0200922 if (ring_is_xdp(tx_ring))
923 return !!budget;
924
Alexander Duycke486bdf2016-09-12 14:18:40 -0700925 /* notify netdev of completed buffers */
926 netdev_tx_completed_queue(txring_txq(tx_ring),
Alexander Duyck7070ce02013-09-28 06:00:37 +0000927 total_packets, total_bytes);
928
Jesse Brandeburgb85c94b2017-06-20 15:16:59 -0700929#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000930 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
931 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
932 /* Make sure that anybody stopping the queue after this
933 * sees the new next_to_clean.
934 */
935 smp_mb();
936 if (__netif_subqueue_stopped(tx_ring->netdev,
937 tx_ring->queue_index) &&
Jacob Keller0da36b92017-04-19 09:25:55 -0400938 !test_bit(__I40E_VSI_DOWN, vsi->state)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000939 netif_wake_subqueue(tx_ring->netdev,
940 tx_ring->queue_index);
941 ++tx_ring->tx_stats.restart_queue;
942 }
943 }
944
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000945 return !!budget;
946}
947
948/**
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -0800949 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
950 * @vsi: the VSI we care about
951 * @q_vector: the vector on which to enable writeback
952 *
953 **/
954static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
955 struct i40e_q_vector *q_vector)
956{
957 u16 flags = q_vector->tx.ring[0].flags;
958 u32 val;
959
960 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
961 return;
962
963 if (q_vector->arm_wb_state)
964 return;
965
966 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
967 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
968 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
969
970 wr32(&vsi->back->hw,
Alexander Duycka3f9fb52017-12-29 08:48:53 -0500971 I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -0800972 val);
973 } else {
974 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
975 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
976
977 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
978 }
979 q_vector->arm_wb_state = true;
980}
981
982/**
983 * i40e_force_wb - Issue SW Interrupt so HW does a wb
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000984 * @vsi: the VSI we care about
985 * @q_vector: the vector on which to force writeback
986 *
987 **/
Kiran Patilb03a8c12015-09-24 18:13:15 -0400988void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000989{
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -0800990 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
Anjali Singhai Jain8e0764b2015-06-05 12:20:30 -0400991 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
992 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
993 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
994 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
995 /* allow 00 to be written to the index */
996
997 wr32(&vsi->back->hw,
Alexander Duycka3f9fb52017-12-29 08:48:53 -0500998 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
Anjali Singhai Jain8e0764b2015-06-05 12:20:30 -0400999 } else {
1000 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
1001 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
1002 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
1003 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
1004 /* allow 00 to be written to the index */
1005
1006 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
1007 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001008}
1009
Alexander Duycka0073a42017-12-29 08:52:19 -05001010static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
1011 struct i40e_ring_container *rc)
1012{
1013 return &q_vector->rx == rc;
1014}
1015
1016static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
1017{
1018 unsigned int divisor;
1019
1020 switch (q_vector->vsi->back->hw.phy.link_info.link_speed) {
1021 case I40E_LINK_SPEED_40GB:
1022 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
1023 break;
1024 case I40E_LINK_SPEED_25GB:
1025 case I40E_LINK_SPEED_20GB:
1026 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
1027 break;
1028 default:
1029 case I40E_LINK_SPEED_10GB:
1030 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
1031 break;
1032 case I40E_LINK_SPEED_1GB:
1033 case I40E_LINK_SPEED_100MB:
1034 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
1035 break;
1036 }
1037
1038 return divisor;
1039}
1040
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001041/**
Alexander Duycka0073a42017-12-29 08:52:19 -05001042 * i40e_update_itr - update the dynamic ITR value based on statistics
1043 * @q_vector: structure containing interrupt and ring information
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001044 * @rc: structure containing ring performance data
1045 *
Alexander Duycka0073a42017-12-29 08:52:19 -05001046 * Stores a new ITR value based on packets and byte
1047 * counts during the last interrupt. The advantage of per interrupt
1048 * computation is faster updates and more accurate ITR for the current
1049 * traffic pattern. Constants in this function were computed
1050 * based on theoretical maximum wire speed and thresholds were set based
1051 * on testing data as well as attempting to minimize response time
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001052 * while increasing bulk throughput.
1053 **/
Alexander Duycka0073a42017-12-29 08:52:19 -05001054static void i40e_update_itr(struct i40e_q_vector *q_vector,
1055 struct i40e_ring_container *rc)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001056{
Alexander Duycka0073a42017-12-29 08:52:19 -05001057 unsigned int avg_wire_size, packets, bytes, itr;
1058 unsigned long next_update = jiffies;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001059
Alexander Duycka0073a42017-12-29 08:52:19 -05001060 /* If we don't have any rings just leave ourselves set for maximum
1061 * possible latency so we take ourselves out of the equation.
1062 */
Alexander Duyck71dc3712017-12-29 08:49:53 -05001063 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
Alexander Duycka0073a42017-12-29 08:52:19 -05001064 return;
Alexander Duyck71dc3712017-12-29 08:49:53 -05001065
Alexander Duycka0073a42017-12-29 08:52:19 -05001066 /* For Rx we want to push the delay up and default to low latency.
1067 * for Tx we want to pull the delay down and default to high latency.
Jacob Keller742c9872017-07-14 09:10:13 -04001068 */
Alexander Duycka0073a42017-12-29 08:52:19 -05001069 itr = i40e_container_is_rx(q_vector, rc) ?
1070 I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
1071 I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;
1072
1073 /* If we didn't update within up to 1 - 2 jiffies we can assume
1074 * that either packets are coming in so slow there hasn't been
1075 * any work, or that there is so much work that NAPI is dealing
1076 * with interrupt moderation and we don't need to do anything.
1077 */
1078 if (time_after(next_update, rc->next_update))
1079 goto clear_counts;
1080
1081 /* If itr_countdown is set it means we programmed an ITR within
1082 * the last 4 interrupt cycles. This has a side effect of us
1083 * potentially firing an early interrupt. In order to work around
1084 * this we need to throw out any data received for a few
1085 * interrupts following the update.
1086 */
1087 if (q_vector->itr_countdown) {
1088 itr = rc->target_itr;
1089 goto clear_counts;
Jacob Keller742c9872017-07-14 09:10:13 -04001090 }
1091
Alexander Duycka0073a42017-12-29 08:52:19 -05001092 packets = rc->total_packets;
1093 bytes = rc->total_bytes;
1094
1095 if (i40e_container_is_rx(q_vector, rc)) {
1096 /* If Rx there are 1 to 4 packets and bytes are less than
1097 * 9000 assume insufficient data to use bulk rate limiting
1098 * approach unless Tx is already in bulk rate limiting. We
1099 * are likely latency driven.
1100 */
1101 if (packets && packets < 4 && bytes < 9000 &&
1102 (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
1103 itr = I40E_ITR_ADAPTIVE_LATENCY;
1104 goto adjust_by_size;
1105 }
1106 } else if (packets < 4) {
1107 /* If we have Tx and Rx ITR maxed and Tx ITR is running in
1108 * bulk mode and we are receiving 4 or fewer packets just
1109 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
1110 * that the Rx can relax.
1111 */
1112 if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
1113 (q_vector->rx.target_itr & I40E_ITR_MASK) ==
1114 I40E_ITR_ADAPTIVE_MAX_USECS)
1115 goto clear_counts;
1116 } else if (packets > 32) {
1117 /* If we have processed over 32 packets in a single interrupt
1118 * for Tx assume we need to switch over to "bulk" mode.
1119 */
1120 rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
1121 }
1122
1123 /* We have no packets to actually measure against. This means
1124 * either one of the other queues on this vector is active or
1125 * we are a Tx queue doing TSO with too high of an interrupt rate.
Jesse Brandeburg51cc6d92015-09-28 14:16:52 -04001126 *
Alexander Duycka0073a42017-12-29 08:52:19 -05001127 * Between 4 and 56 we can assume that our current interrupt delay
1128 * is only slightly too low. As such we should increase it by a small
1129 * fixed amount.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001130 */
Alexander Duycka0073a42017-12-29 08:52:19 -05001131 if (packets < 56) {
1132 itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
1133 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1134 itr &= I40E_ITR_ADAPTIVE_LATENCY;
1135 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1136 }
1137 goto clear_counts;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001138 }
Jesse Brandeburgc56625d2015-09-28 14:16:53 -04001139
Alexander Duycka0073a42017-12-29 08:52:19 -05001140 if (packets <= 256) {
1141 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1142 itr &= I40E_ITR_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001143
Alexander Duycka0073a42017-12-29 08:52:19 -05001144 /* Between 56 and 112 is our "goldilocks" zone where we are
1145 * working out "just right". Just report that our current
1146 * ITR is good for us.
1147 */
1148 if (packets <= 112)
1149 goto clear_counts;
1150
1151 /* If packet count is 128 or greater we are likely looking
1152 * at a slight overrun of the delay we want. Try halving
1153 * our delay to see if that will cut the number of packets
1154 * in half per interrupt.
1155 */
1156 itr /= 2;
1157 itr &= I40E_ITR_MASK;
1158 if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
1159 itr = I40E_ITR_ADAPTIVE_MIN_USECS;
1160
1161 goto clear_counts;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001162 }
1163
Alexander Duycka0073a42017-12-29 08:52:19 -05001164 /* The paths below assume we are dealing with a bulk ITR since
1165 * number of packets is greater than 256. We are just going to have
1166 * to compute a value and try to bring the count under control,
1167 * though for smaller packet sizes there isn't much we can do as
1168 * NAPI polling will likely be kicking in sooner rather than later.
1169 */
1170 itr = I40E_ITR_ADAPTIVE_BULK;
1171
1172adjust_by_size:
1173 /* If packet counts are 256 or greater we can assume we have a gross
1174 * overestimation of what the rate should be. Instead of trying to fine
1175 * tune it just use the formula below to try and dial in an exact value
1176 * give the current packet size of the frame.
1177 */
1178 avg_wire_size = bytes / packets;
1179
1180 /* The following is a crude approximation of:
1181 * wmem_default / (size + overhead) = desired_pkts_per_int
1182 * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
1183 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1184 *
1185 * Assuming wmem_default is 212992 and overhead is 640 bytes per
1186 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1187 * formula down to
1188 *
1189 * (170 * (size + 24)) / (size + 640) = ITR
1190 *
1191 * We first do some math on the packet size and then finally bitshift
1192 * by 8 after rounding up. We also have to account for PCIe link speed
1193 * difference as ITR scales based on this.
1194 */
1195 if (avg_wire_size <= 60) {
1196 /* Start at 250k ints/sec */
1197 avg_wire_size = 4096;
1198 } else if (avg_wire_size <= 380) {
1199 /* 250K ints/sec to 60K ints/sec */
1200 avg_wire_size *= 40;
1201 avg_wire_size += 1696;
1202 } else if (avg_wire_size <= 1084) {
1203 /* 60K ints/sec to 36K ints/sec */
1204 avg_wire_size *= 15;
1205 avg_wire_size += 11452;
1206 } else if (avg_wire_size <= 1980) {
1207 /* 36K ints/sec to 30K ints/sec */
1208 avg_wire_size *= 5;
1209 avg_wire_size += 22420;
1210 } else {
1211 /* plateau at a limit of 30K ints/sec */
1212 avg_wire_size = 32256;
1213 }
1214
1215 /* If we are in low latency mode halve our delay which doubles the
1216 * rate to somewhere between 100K to 16K ints/sec
1217 */
1218 if (itr & I40E_ITR_ADAPTIVE_LATENCY)
1219 avg_wire_size /= 2;
1220
1221 /* Resultant value is 256 times larger than it needs to be. This
1222 * gives us room to adjust the value as needed to either increase
1223 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
1224 *
1225 * Use addition as we have already recorded the new latency flag
1226 * for the ITR value.
1227 */
1228 itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
1229 I40E_ITR_ADAPTIVE_MIN_INC;
1230
1231 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1232 itr &= I40E_ITR_ADAPTIVE_LATENCY;
1233 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1234 }
1235
1236clear_counts:
1237 /* write back value */
1238 rc->target_itr = itr;
1239
1240 /* next update should occur within next jiffy */
1241 rc->next_update = next_update + 1;
1242
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001243 rc->total_bytes = 0;
1244 rc->total_packets = 0;
1245}
1246
1247/**
Alexander Duyck2b9478f2017-10-04 08:44:43 -07001248 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1249 * @rx_ring: rx descriptor ring to store buffers on
1250 * @old_buff: donor buffer to have page reused
1251 *
1252 * Synchronizes page for reuse by the adapter
1253 **/
1254static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1255 struct i40e_rx_buffer *old_buff)
1256{
1257 struct i40e_rx_buffer *new_buff;
1258 u16 nta = rx_ring->next_to_alloc;
1259
1260 new_buff = &rx_ring->rx_bi[nta];
1261
1262 /* update, and store next to alloc */
1263 nta++;
1264 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1265
1266 /* transfer page from old buffer to new buffer */
1267 new_buff->dma = old_buff->dma;
1268 new_buff->page = old_buff->page;
1269 new_buff->page_offset = old_buff->page_offset;
1270 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1271}
1272
1273/**
Alexander Duyck0e626ff2017-04-10 05:18:43 -04001274 * i40e_rx_is_programming_status - check for programming status descriptor
1275 * @qw: qword representing status_error_len in CPU ordering
1276 *
1277 * The value of in the descriptor length field indicate if this
1278 * is a programming status descriptor for flow director or FCoE
1279 * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
1280 * it is a packet descriptor.
1281 **/
1282static inline bool i40e_rx_is_programming_status(u64 qw)
1283{
1284 /* The Rx filter programming status and SPH bit occupy the same
1285 * spot in the descriptor. Since we don't support packet split we
1286 * can just reuse the bit as an indication that this is a
1287 * programming status descriptor.
1288 */
1289 return qw & I40E_RXD_QW1_LENGTH_SPH_MASK;
1290}
1291
1292/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001293 * i40e_clean_programming_status - clean the programming status descriptor
1294 * @rx_ring: the rx ring that has this descriptor
1295 * @rx_desc: the rx descriptor written back by HW
Alexander Duyck0e626ff2017-04-10 05:18:43 -04001296 * @qw: qword representing status_error_len in CPU ordering
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001297 *
1298 * Flow director should handle FD_FILTER_STATUS to check its filter programming
1299 * status being successful or not and take actions accordingly. FCoE should
1300 * handle its context/filter programming/invalidation status and take actions.
1301 *
1302 **/
1303static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
Alexander Duyck0e626ff2017-04-10 05:18:43 -04001304 union i40e_rx_desc *rx_desc,
1305 u64 qw)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001306{
Alexander Duyck2b9478f2017-10-04 08:44:43 -07001307 struct i40e_rx_buffer *rx_buffer;
1308 u32 ntc = rx_ring->next_to_clean;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001309 u8 id;
1310
Alexander Duyck0e626ff2017-04-10 05:18:43 -04001311 /* fetch, update, and store next to clean */
Alexander Duyck2b9478f2017-10-04 08:44:43 -07001312 rx_buffer = &rx_ring->rx_bi[ntc++];
Alexander Duyck0e626ff2017-04-10 05:18:43 -04001313 ntc = (ntc < rx_ring->count) ? ntc : 0;
1314 rx_ring->next_to_clean = ntc;
1315
1316 prefetch(I40E_RX_DESC(rx_ring, ntc));
1317
Alexander Duyck2b9478f2017-10-04 08:44:43 -07001318 /* place unused page back on the ring */
1319 i40e_reuse_rx_page(rx_ring, rx_buffer);
1320 rx_ring->rx_stats.page_reuse_count++;
1321
1322 /* clear contents of buffer_info */
1323 rx_buffer->page = NULL;
1324
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001325 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1326 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1327
1328 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00001329 i40e_fd_handle_status(rx_ring, rx_desc, id);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001330}
1331
1332/**
1333 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
1334 * @tx_ring: the tx ring to set up
1335 *
1336 * Return 0 on success, negative on error
1337 **/
1338int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
1339{
1340 struct device *dev = tx_ring->dev;
1341 int bi_size;
1342
1343 if (!dev)
1344 return -ENOMEM;
1345
Jesse Brandeburge908f812015-07-23 16:54:42 -04001346 /* warn if we are about to overwrite the pointer */
1347 WARN_ON(tx_ring->tx_bi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001348 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
1349 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
1350 if (!tx_ring->tx_bi)
1351 goto err;
1352
Florian Fainelli7d6d0672017-08-01 12:11:07 -07001353 u64_stats_init(&tx_ring->syncp);
1354
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001355 /* round up to nearest 4K */
1356 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +00001357 /* add u32 for head writeback, align after this takes care of
1358 * guaranteeing this is at least one cache line in size
1359 */
1360 tx_ring->size += sizeof(u32);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001361 tx_ring->size = ALIGN(tx_ring->size, 4096);
1362 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1363 &tx_ring->dma, GFP_KERNEL);
1364 if (!tx_ring->desc) {
1365 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1366 tx_ring->size);
1367 goto err;
1368 }
1369
1370 tx_ring->next_to_use = 0;
1371 tx_ring->next_to_clean = 0;
Sudheer Mogilappagari07d44192017-12-18 05:17:25 -05001372 tx_ring->tx_stats.prev_pkt_ctr = -1;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001373 return 0;
1374
1375err:
1376 kfree(tx_ring->tx_bi);
1377 tx_ring->tx_bi = NULL;
1378 return -ENOMEM;
1379}
1380
1381/**
1382 * i40e_clean_rx_ring - Free Rx buffers
1383 * @rx_ring: ring to be cleaned
1384 **/
1385void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1386{
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001387 unsigned long bi_size;
1388 u16 i;
1389
1390 /* ring already cleared, nothing to do */
1391 if (!rx_ring->rx_bi)
1392 return;
1393
Scott Petersone72e5652017-02-09 23:40:25 -08001394 if (rx_ring->skb) {
1395 dev_kfree_skb(rx_ring->skb);
1396 rx_ring->skb = NULL;
1397 }
1398
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001399 /* Free all the Rx ring sk_buffs */
1400 for (i = 0; i < rx_ring->count; i++) {
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001401 struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
1402
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001403 if (!rx_bi->page)
1404 continue;
1405
Alexander Duyck59605bc2017-01-30 12:29:35 -08001406 /* Invalidate cache lines that may have been written to by
1407 * device so that we avoid corrupting memory.
1408 */
1409 dma_sync_single_range_for_cpu(rx_ring->dev,
1410 rx_bi->dma,
1411 rx_bi->page_offset,
Alexander Duyck98efd692017-04-05 07:51:01 -04001412 rx_ring->rx_buf_len,
Alexander Duyck59605bc2017-01-30 12:29:35 -08001413 DMA_FROM_DEVICE);
1414
1415 /* free resources associated with mapping */
1416 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
Alexander Duyck98efd692017-04-05 07:51:01 -04001417 i40e_rx_pg_size(rx_ring),
Alexander Duyck59605bc2017-01-30 12:29:35 -08001418 DMA_FROM_DEVICE,
1419 I40E_RX_DMA_ATTR);
Alexander Duyck98efd692017-04-05 07:51:01 -04001420
Alexander Duyck17936682017-02-21 15:55:39 -08001421 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001422
1423 rx_bi->page = NULL;
1424 rx_bi->page_offset = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001425 }
1426
1427 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1428 memset(rx_ring->rx_bi, 0, bi_size);
1429
1430 /* Zero out the descriptor ring */
1431 memset(rx_ring->desc, 0, rx_ring->size);
1432
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001433 rx_ring->next_to_alloc = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001434 rx_ring->next_to_clean = 0;
1435 rx_ring->next_to_use = 0;
1436}
1437
1438/**
1439 * i40e_free_rx_resources - Free Rx resources
1440 * @rx_ring: ring to clean the resources from
1441 *
1442 * Free all receive software resources
1443 **/
1444void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1445{
1446 i40e_clean_rx_ring(rx_ring);
Jesper Dangaard Brouer87128822018-01-03 11:25:23 +01001447 if (rx_ring->vsi->type == I40E_VSI_MAIN)
1448 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
Björn Töpel0c8493d2017-05-24 07:55:34 +02001449 rx_ring->xdp_prog = NULL;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001450 kfree(rx_ring->rx_bi);
1451 rx_ring->rx_bi = NULL;
1452
1453 if (rx_ring->desc) {
1454 dma_free_coherent(rx_ring->dev, rx_ring->size,
1455 rx_ring->desc, rx_ring->dma);
1456 rx_ring->desc = NULL;
1457 }
1458}
1459
1460/**
1461 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1462 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1463 *
1464 * Returns 0 on success, negative on failure
1465 **/
1466int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1467{
1468 struct device *dev = rx_ring->dev;
Jesper Dangaard Brouer87128822018-01-03 11:25:23 +01001469 int err = -ENOMEM;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001470 int bi_size;
1471
Jesse Brandeburge908f812015-07-23 16:54:42 -04001472 /* warn if we are about to overwrite the pointer */
1473 WARN_ON(rx_ring->rx_bi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001474 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1475 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1476 if (!rx_ring->rx_bi)
1477 goto err;
1478
Carolyn Wybornyf217d6c2015-02-09 17:42:31 -08001479 u64_stats_init(&rx_ring->syncp);
Carolyn Wyborny638702b2015-01-24 09:58:32 +00001480
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001481 /* Round up to nearest 4K */
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001482 rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001483 rx_ring->size = ALIGN(rx_ring->size, 4096);
1484 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1485 &rx_ring->dma, GFP_KERNEL);
1486
1487 if (!rx_ring->desc) {
1488 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1489 rx_ring->size);
1490 goto err;
1491 }
1492
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001493 rx_ring->next_to_alloc = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001494 rx_ring->next_to_clean = 0;
1495 rx_ring->next_to_use = 0;
1496
Jesper Dangaard Brouer87128822018-01-03 11:25:23 +01001497 /* XDP RX-queue info only needed for RX rings exposed to XDP */
1498 if (rx_ring->vsi->type == I40E_VSI_MAIN) {
1499 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
1500 rx_ring->queue_index);
1501 if (err < 0)
1502 goto err;
1503 }
1504
Björn Töpel0c8493d2017-05-24 07:55:34 +02001505 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
1506
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001507 return 0;
1508err:
1509 kfree(rx_ring->rx_bi);
1510 rx_ring->rx_bi = NULL;
Jesper Dangaard Brouer87128822018-01-03 11:25:23 +01001511 return err;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001512}
1513
1514/**
1515 * i40e_release_rx_desc - Store the new tail and head values
1516 * @rx_ring: ring to bump
1517 * @val: new head index
1518 **/
1519static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1520{
1521 rx_ring->next_to_use = val;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001522
1523 /* update next to alloc since we have filled the ring */
1524 rx_ring->next_to_alloc = val;
1525
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001526 /* Force memory writes to complete before letting h/w
1527 * know there are new descriptors to fetch. (Only
1528 * applicable for weak-ordered memory model archs,
1529 * such as IA-64).
1530 */
1531 wmb();
1532 writel(val, rx_ring->tail);
1533}
1534
1535/**
Alexander Duyckca9ec082017-04-05 07:51:02 -04001536 * i40e_rx_offset - Return expected offset into page to access data
1537 * @rx_ring: Ring we are requesting offset of
1538 *
1539 * Returns the offset value for ring into the data buffer.
1540 */
1541static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
1542{
1543 return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
1544}
1545
1546/**
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001547 * i40e_alloc_mapped_page - recycle or make a new page
1548 * @rx_ring: ring to use
1549 * @bi: rx_buffer struct to modify
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001550 *
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001551 * Returns true if the page was successfully allocated or
1552 * reused.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001553 **/
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001554static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1555 struct i40e_rx_buffer *bi)
Mitch Williamsa132af22015-01-24 09:58:35 +00001556{
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001557 struct page *page = bi->page;
1558 dma_addr_t dma;
Mitch Williamsa132af22015-01-24 09:58:35 +00001559
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001560 /* since we are recycling buffers we should seldom need to alloc */
1561 if (likely(page)) {
1562 rx_ring->rx_stats.page_reuse_count++;
1563 return true;
Mitch Williamsa132af22015-01-24 09:58:35 +00001564 }
1565
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001566 /* alloc new page for storage */
Alexander Duyck98efd692017-04-05 07:51:01 -04001567 page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001568 if (unlikely(!page)) {
1569 rx_ring->rx_stats.alloc_page_failed++;
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001570 return false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001571 }
1572
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001573 /* map page for use */
Alexander Duyck59605bc2017-01-30 12:29:35 -08001574 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
Alexander Duyck98efd692017-04-05 07:51:01 -04001575 i40e_rx_pg_size(rx_ring),
Alexander Duyck59605bc2017-01-30 12:29:35 -08001576 DMA_FROM_DEVICE,
1577 I40E_RX_DMA_ATTR);
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001578
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001579 /* if mapping failed free memory back to system since
1580 * there isn't much point in holding memory we can't use
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001581 */
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001582 if (dma_mapping_error(rx_ring->dev, dma)) {
Alexander Duyck98efd692017-04-05 07:51:01 -04001583 __free_pages(page, i40e_rx_pg_order(rx_ring));
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001584 rx_ring->rx_stats.alloc_page_failed++;
1585 return false;
1586 }
1587
1588 bi->dma = dma;
1589 bi->page = page;
Alexander Duyckca9ec082017-04-05 07:51:02 -04001590 bi->page_offset = i40e_rx_offset(rx_ring);
Alexander Duycka0cfc312017-03-14 10:15:24 -07001591
1592 /* initialize pagecnt_bias to 1 representing we fully own page */
Alexander Duyck17936682017-02-21 15:55:39 -08001593 bi->pagecnt_bias = 1;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001594
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001595 return true;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001596}
1597
1598/**
1599 * i40e_receive_skb - Send a completed packet up the stack
1600 * @rx_ring: rx ring in play
1601 * @skb: packet to send up
1602 * @vlan_tag: vlan tag for packet
1603 **/
1604static void i40e_receive_skb(struct i40e_ring *rx_ring,
1605 struct sk_buff *skb, u16 vlan_tag)
1606{
1607 struct i40e_q_vector *q_vector = rx_ring->q_vector;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001608
Jesse Brandeburga149f2c2016-04-12 08:30:49 -07001609 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1610 (vlan_tag & VLAN_VID_MASK))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001611 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1612
Alexander Duyck8b650352015-09-24 09:04:32 -07001613 napi_gro_receive(&q_vector->napi, skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001614}
1615
1616/**
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001617 * i40e_alloc_rx_buffers - Replace used receive buffers
1618 * @rx_ring: ring to place buffers on
1619 * @cleaned_count: number of buffers to replace
1620 *
1621 * Returns false if all allocations were successful, true if any fail
1622 **/
1623bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1624{
1625 u16 ntu = rx_ring->next_to_use;
1626 union i40e_rx_desc *rx_desc;
1627 struct i40e_rx_buffer *bi;
1628
1629 /* do nothing if no valid netdev defined */
1630 if (!rx_ring->netdev || !cleaned_count)
1631 return false;
1632
1633 rx_desc = I40E_RX_DESC(rx_ring, ntu);
1634 bi = &rx_ring->rx_bi[ntu];
1635
1636 do {
1637 if (!i40e_alloc_mapped_page(rx_ring, bi))
1638 goto no_buffers;
1639
Alexander Duyck59605bc2017-01-30 12:29:35 -08001640 /* sync the buffer for use by the device */
1641 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1642 bi->page_offset,
Alexander Duyck98efd692017-04-05 07:51:01 -04001643 rx_ring->rx_buf_len,
Alexander Duyck59605bc2017-01-30 12:29:35 -08001644 DMA_FROM_DEVICE);
1645
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001646 /* Refresh the desc even if buffer_addrs didn't change
1647 * because each write-back erases this info.
1648 */
1649 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001650
1651 rx_desc++;
1652 bi++;
1653 ntu++;
1654 if (unlikely(ntu == rx_ring->count)) {
1655 rx_desc = I40E_RX_DESC(rx_ring, 0);
1656 bi = rx_ring->rx_bi;
1657 ntu = 0;
1658 }
1659
1660 /* clear the status bits for the next_to_use descriptor */
1661 rx_desc->wb.qword1.status_error_len = 0;
1662
1663 cleaned_count--;
1664 } while (cleaned_count);
1665
1666 if (rx_ring->next_to_use != ntu)
1667 i40e_release_rx_desc(rx_ring, ntu);
1668
1669 return false;
1670
1671no_buffers:
1672 if (rx_ring->next_to_use != ntu)
1673 i40e_release_rx_desc(rx_ring, ntu);
1674
1675 /* make sure to come back via polling to try again after
1676 * allocation failure
1677 */
1678 return true;
1679}
1680
1681/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001682 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1683 * @vsi: the VSI we care about
1684 * @skb: skb currently being received and modified
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001685 * @rx_desc: the receive descriptor
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001686 **/
1687static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1688 struct sk_buff *skb,
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001689 union i40e_rx_desc *rx_desc)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001690{
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001691 struct i40e_rx_ptype_decoded decoded;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001692 u32 rx_error, rx_status;
Alexander Duyck858296c82016-06-14 15:45:42 -07001693 bool ipv4, ipv6;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001694 u8 ptype;
1695 u64 qword;
1696
1697 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1698 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
1699 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1700 I40E_RXD_QW1_ERROR_SHIFT;
1701 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1702 I40E_RXD_QW1_STATUS_SHIFT;
1703 decoded = decode_rx_desc_ptype(ptype);
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001704
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001705 skb->ip_summed = CHECKSUM_NONE;
1706
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001707 skb_checksum_none_assert(skb);
1708
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001709 /* Rx csum enabled and ip headers found? */
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001710 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001711 return;
1712
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001713 /* did the hardware decode the packet and checksum? */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001714 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001715 return;
1716
1717 /* both known and outer_ip must be set for the below code to work */
1718 if (!(decoded.known && decoded.outer_ip))
1719 return;
1720
Alexander Duyckfad57332016-01-24 21:17:22 -08001721 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1722 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1723 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1724 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001725
1726 if (ipv4 &&
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001727 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1728 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001729 goto checksum_fail;
1730
Jesse Brandeburgddf1d0d2014-02-13 03:48:39 -08001731 /* likely incorrect csum if alternate IP extension headers found */
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001732 if (ipv6 &&
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001733 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001734 /* don't increment checksum err here, non-fatal err */
Shannon Nelson8ee75a82013-12-21 05:44:46 +00001735 return;
1736
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001737 /* there was some L4 error, count error and punt packet to the stack */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001738 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001739 goto checksum_fail;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001740
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001741 /* handle packets that were not able to be checksummed due
1742 * to arrival speed, in this case the stack can compute
1743 * the csum.
1744 */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001745 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001746 return;
1747
Alexander Duyck858296c82016-06-14 15:45:42 -07001748 /* If there is an outer header present that might contain a checksum
1749 * we need to bump the checksum level by 1 to reflect the fact that
1750 * we are indicating we validated the inner checksum.
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001751 */
Alexander Duyck858296c82016-06-14 15:45:42 -07001752 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
1753 skb->csum_level = 1;
Alexander Duyckfad57332016-01-24 21:17:22 -08001754
Alexander Duyck858296c82016-06-14 15:45:42 -07001755 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
1756 switch (decoded.inner_prot) {
1757 case I40E_RX_PTYPE_INNER_PROT_TCP:
1758 case I40E_RX_PTYPE_INNER_PROT_UDP:
1759 case I40E_RX_PTYPE_INNER_PROT_SCTP:
1760 skb->ip_summed = CHECKSUM_UNNECESSARY;
1761 /* fall though */
1762 default:
1763 break;
1764 }
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001765
1766 return;
1767
1768checksum_fail:
1769 vsi->back->hw_csum_rx_error++;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001770}
1771
1772/**
Anjali Singhai Jain857942f2015-12-09 15:50:21 -08001773 * i40e_ptype_to_htype - get a hash type
Jesse Brandeburg206812b2014-02-12 01:45:33 +00001774 * @ptype: the ptype value from the descriptor
1775 *
1776 * Returns a hash type to be used by skb_set_hash
1777 **/
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001778static inline int i40e_ptype_to_htype(u8 ptype)
Jesse Brandeburg206812b2014-02-12 01:45:33 +00001779{
1780 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1781
1782 if (!decoded.known)
1783 return PKT_HASH_TYPE_NONE;
1784
1785 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1786 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1787 return PKT_HASH_TYPE_L4;
1788 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1789 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1790 return PKT_HASH_TYPE_L3;
1791 else
1792 return PKT_HASH_TYPE_L2;
1793}
1794
1795/**
Anjali Singhai Jain857942f2015-12-09 15:50:21 -08001796 * i40e_rx_hash - set the hash value in the skb
1797 * @ring: descriptor ring
1798 * @rx_desc: specific descriptor
1799 **/
1800static inline void i40e_rx_hash(struct i40e_ring *ring,
1801 union i40e_rx_desc *rx_desc,
1802 struct sk_buff *skb,
1803 u8 rx_ptype)
1804{
1805 u32 hash;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001806 const __le64 rss_mask =
Anjali Singhai Jain857942f2015-12-09 15:50:21 -08001807 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1808 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1809
Mitch Williamsa876c3b2016-05-03 15:13:18 -07001810 if (!(ring->netdev->features & NETIF_F_RXHASH))
Anjali Singhai Jain857942f2015-12-09 15:50:21 -08001811 return;
1812
1813 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1814 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1815 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1816 }
1817}
1818
1819/**
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001820 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
1821 * @rx_ring: rx descriptor ring packet is being transacted on
1822 * @rx_desc: pointer to the EOP Rx descriptor
1823 * @skb: pointer to current skb being populated
1824 * @rx_ptype: the packet type decoded by hardware
Mitch Williamsa132af22015-01-24 09:58:35 +00001825 *
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001826 * This function checks the ring, descriptor, and packet information in
1827 * order to populate the hash, checksum, VLAN, protocol, and
1828 * other fields within the skb.
Mitch Williamsa132af22015-01-24 09:58:35 +00001829 **/
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001830static inline
1831void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1832 union i40e_rx_desc *rx_desc, struct sk_buff *skb,
1833 u8 rx_ptype)
1834{
1835 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1836 u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1837 I40E_RXD_QW1_STATUS_SHIFT;
Jacob Keller144ed172016-10-05 09:30:42 -07001838 u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
1839 u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001840 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
1841
Jacob Keller12490502016-10-05 09:30:44 -07001842 if (unlikely(tsynvalid))
Jacob Keller144ed172016-10-05 09:30:42 -07001843 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001844
1845 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1846
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001847 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1848
1849 skb_record_rx_queue(skb, rx_ring->queue_index);
Alexander Duycka5b268e2017-02-21 15:55:46 -08001850
1851 /* modifies the skb - consumes the enet header */
1852 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001853}
1854
1855/**
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001856 * i40e_cleanup_headers - Correct empty headers
1857 * @rx_ring: rx descriptor ring packet is being transacted on
1858 * @skb: pointer to current skb being fixed
Björn Töpel0c8493d2017-05-24 07:55:34 +02001859 * @rx_desc: pointer to the EOP Rx descriptor
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001860 *
1861 * Also address the case where we are pulling data in on pages only
1862 * and as such no data is present in the skb header.
1863 *
1864 * In addition if skb is not at least 60 bytes we need to pad it so that
1865 * it is large enough to qualify as a valid Ethernet frame.
1866 *
1867 * Returns true if an error was encountered and skb was freed.
1868 **/
Björn Töpel0c8493d2017-05-24 07:55:34 +02001869static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
1870 union i40e_rx_desc *rx_desc)
1871
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001872{
Björn Töpel0c8493d2017-05-24 07:55:34 +02001873 /* XDP packets use error pointer so abort at this point */
1874 if (IS_ERR(skb))
1875 return true;
1876
1877 /* ERR_MASK will only have valid bits if EOP set, and
1878 * what we are doing here is actually checking
1879 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1880 * the error field
1881 */
1882 if (unlikely(i40e_test_staterr(rx_desc,
1883 BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
1884 dev_kfree_skb_any(skb);
1885 return true;
1886 }
1887
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001888 /* if eth_skb_pad returns an error the skb was freed */
1889 if (eth_skb_pad(skb))
1890 return true;
1891
1892 return false;
1893}
1894
1895/**
Scott Peterson9b37c932017-02-09 23:43:30 -08001896 * i40e_page_is_reusable - check if any reuse is possible
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001897 * @page: page struct to check
Scott Peterson9b37c932017-02-09 23:43:30 -08001898 *
1899 * A page is not reusable if it was allocated under low memory
1900 * conditions, or it's not in the same NUMA node as this CPU.
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001901 */
Scott Peterson9b37c932017-02-09 23:43:30 -08001902static inline bool i40e_page_is_reusable(struct page *page)
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001903{
Scott Peterson9b37c932017-02-09 23:43:30 -08001904 return (page_to_nid(page) == numa_mem_id()) &&
1905 !page_is_pfmemalloc(page);
1906}
1907
1908/**
1909 * i40e_can_reuse_rx_page - Determine if this page can be reused by
1910 * the adapter for another receive
1911 *
1912 * @rx_buffer: buffer containing the page
Scott Peterson9b37c932017-02-09 23:43:30 -08001913 *
1914 * If page is reusable, rx_buffer->page_offset is adjusted to point to
1915 * an unused region in the page.
1916 *
1917 * For small pages, @truesize will be a constant value, half the size
1918 * of the memory at page. We'll attempt to alternate between high and
1919 * low halves of the page, with one half ready for use by the hardware
1920 * and the other half being consumed by the stack. We use the page
1921 * ref count to determine whether the stack has finished consuming the
1922 * portion of this page that was passed up with a previous packet. If
1923 * the page ref count is >1, we'll assume the "other" half page is
1924 * still busy, and this page cannot be reused.
1925 *
1926 * For larger pages, @truesize will be the actual space used by the
1927 * received packet (adjusted upward to an even multiple of the cache
1928 * line size). This will advance through the page by the amount
1929 * actually consumed by the received packets while there is still
1930 * space for a buffer. Each region of larger pages will be used at
1931 * most once, after which the page will not be reused.
1932 *
1933 * In either case, if the page is reusable its refcount is increased.
1934 **/
Alexander Duycka0cfc312017-03-14 10:15:24 -07001935static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
Scott Peterson9b37c932017-02-09 23:43:30 -08001936{
Alexander Duycka0cfc312017-03-14 10:15:24 -07001937 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1938 struct page *page = rx_buffer->page;
Scott Peterson9b37c932017-02-09 23:43:30 -08001939
1940 /* Is any reuse possible? */
1941 if (unlikely(!i40e_page_is_reusable(page)))
1942 return false;
1943
1944#if (PAGE_SIZE < 8192)
1945 /* if we are only owner of page we can reuse it */
Alexander Duycka0cfc312017-03-14 10:15:24 -07001946 if (unlikely((page_count(page) - pagecnt_bias) > 1))
Scott Peterson9b37c932017-02-09 23:43:30 -08001947 return false;
Scott Peterson9b37c932017-02-09 23:43:30 -08001948#else
Alexander Duyck98efd692017-04-05 07:51:01 -04001949#define I40E_LAST_OFFSET \
1950 (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
1951 if (rx_buffer->page_offset > I40E_LAST_OFFSET)
Scott Peterson9b37c932017-02-09 23:43:30 -08001952 return false;
1953#endif
1954
Alexander Duyck17936682017-02-21 15:55:39 -08001955 /* If we have drained the page fragment pool we need to update
1956 * the pagecnt_bias and page count so that we fully restock the
1957 * number of references the driver holds.
1958 */
Alexander Duycka0cfc312017-03-14 10:15:24 -07001959 if (unlikely(!pagecnt_bias)) {
Alexander Duyck17936682017-02-21 15:55:39 -08001960 page_ref_add(page, USHRT_MAX);
1961 rx_buffer->pagecnt_bias = USHRT_MAX;
1962 }
Alexander Duycka0cfc312017-03-14 10:15:24 -07001963
Scott Peterson9b37c932017-02-09 23:43:30 -08001964 return true;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001965}
1966
1967/**
1968 * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
1969 * @rx_ring: rx descriptor ring to transact packets on
1970 * @rx_buffer: buffer containing page to add
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001971 * @skb: sk_buff to place the data into
Alexander Duycka0cfc312017-03-14 10:15:24 -07001972 * @size: packet length from rx_desc
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001973 *
1974 * This function will add the data contained in rx_buffer->page to the skb.
Alexander Duyckfa2343e2017-03-14 10:15:25 -07001975 * It will just attach the page as a frag to the skb.
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001976 *
Alexander Duyckfa2343e2017-03-14 10:15:25 -07001977 * The function will then update the page offset.
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001978 **/
Alexander Duycka0cfc312017-03-14 10:15:24 -07001979static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001980 struct i40e_rx_buffer *rx_buffer,
Alexander Duycka0cfc312017-03-14 10:15:24 -07001981 struct sk_buff *skb,
1982 unsigned int size)
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001983{
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001984#if (PAGE_SIZE < 8192)
Alexander Duyck98efd692017-04-05 07:51:01 -04001985 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001986#else
Alexander Duyckca9ec082017-04-05 07:51:02 -04001987 unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001988#endif
Scott Peterson9b37c932017-02-09 23:43:30 -08001989
Alexander Duyckfa2343e2017-03-14 10:15:25 -07001990 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1991 rx_buffer->page_offset, size, truesize);
Scott Peterson9b37c932017-02-09 23:43:30 -08001992
Alexander Duycka0cfc312017-03-14 10:15:24 -07001993 /* page is being used so we must update the page offset */
1994#if (PAGE_SIZE < 8192)
1995 rx_buffer->page_offset ^= truesize;
1996#else
1997 rx_buffer->page_offset += truesize;
1998#endif
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001999}
2000
2001/**
Alexander Duyck9a064122017-03-14 10:15:23 -07002002 * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
2003 * @rx_ring: rx descriptor ring to transact packets on
2004 * @size: size of buffer to add to skb
2005 *
2006 * This function will pull an Rx buffer from the ring and synchronize it
2007 * for use by the CPU.
2008 */
2009static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
2010 const unsigned int size)
2011{
2012 struct i40e_rx_buffer *rx_buffer;
2013
2014 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
2015 prefetchw(rx_buffer->page);
2016
2017 /* we are reusing so sync this buffer for CPU use */
2018 dma_sync_single_range_for_cpu(rx_ring->dev,
2019 rx_buffer->dma,
2020 rx_buffer->page_offset,
2021 size,
2022 DMA_FROM_DEVICE);
2023
Alexander Duycka0cfc312017-03-14 10:15:24 -07002024 /* We have pulled a buffer for use, so decrement pagecnt_bias */
2025 rx_buffer->pagecnt_bias--;
2026
Alexander Duyck9a064122017-03-14 10:15:23 -07002027 return rx_buffer;
2028}
2029
2030/**
Alexander Duyckfa2343e2017-03-14 10:15:25 -07002031 * i40e_construct_skb - Allocate skb and populate it
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002032 * @rx_ring: rx descriptor ring to transact packets on
Alexander Duyck9a064122017-03-14 10:15:23 -07002033 * @rx_buffer: rx buffer to pull data from
Björn Töpel0c8493d2017-05-24 07:55:34 +02002034 * @xdp: xdp_buff pointing to the data
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002035 *
Alexander Duyckfa2343e2017-03-14 10:15:25 -07002036 * This function allocates an skb. It then populates it with the page
2037 * data from the current receive descriptor, taking care to set up the
2038 * skb correctly.
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002039 */
Alexander Duyckfa2343e2017-03-14 10:15:25 -07002040static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
2041 struct i40e_rx_buffer *rx_buffer,
Björn Töpel0c8493d2017-05-24 07:55:34 +02002042 struct xdp_buff *xdp)
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002043{
Björn Töpel0c8493d2017-05-24 07:55:34 +02002044 unsigned int size = xdp->data_end - xdp->data;
Alexander Duyckfa2343e2017-03-14 10:15:25 -07002045#if (PAGE_SIZE < 8192)
Alexander Duyck98efd692017-04-05 07:51:01 -04002046 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
Alexander Duyckfa2343e2017-03-14 10:15:25 -07002047#else
2048 unsigned int truesize = SKB_DATA_ALIGN(size);
2049#endif
2050 unsigned int headlen;
2051 struct sk_buff *skb;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002052
Alexander Duyckfa2343e2017-03-14 10:15:25 -07002053 /* prefetch first cache line of first page */
Björn Töpel0c8493d2017-05-24 07:55:34 +02002054 prefetch(xdp->data);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002055#if L1_CACHE_BYTES < 128
Björn Töpel0c8493d2017-05-24 07:55:34 +02002056 prefetch(xdp->data + L1_CACHE_BYTES);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002057#endif
2058
Alexander Duyckfa2343e2017-03-14 10:15:25 -07002059 /* allocate a skb to store the frags */
2060 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
2061 I40E_RX_HDR_SIZE,
2062 GFP_ATOMIC | __GFP_NOWARN);
2063 if (unlikely(!skb))
2064 return NULL;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002065
Alexander Duyckfa2343e2017-03-14 10:15:25 -07002066 /* Determine available headroom for copy */
2067 headlen = size;
2068 if (headlen > I40E_RX_HDR_SIZE)
Björn Töpel0c8493d2017-05-24 07:55:34 +02002069 headlen = eth_get_headlen(xdp->data, I40E_RX_HDR_SIZE);
Alexander Duyckfa2343e2017-03-14 10:15:25 -07002070
2071 /* align pull length to size of long to optimize memcpy performance */
Björn Töpel0c8493d2017-05-24 07:55:34 +02002072 memcpy(__skb_put(skb, headlen), xdp->data,
2073 ALIGN(headlen, sizeof(long)));
Alexander Duyckfa2343e2017-03-14 10:15:25 -07002074
2075 /* update all of the pointers */
2076 size -= headlen;
2077 if (size) {
2078 skb_add_rx_frag(skb, 0, rx_buffer->page,
2079 rx_buffer->page_offset + headlen,
2080 size, truesize);
2081
2082 /* buffer is used by skb, update page_offset */
2083#if (PAGE_SIZE < 8192)
2084 rx_buffer->page_offset ^= truesize;
2085#else
2086 rx_buffer->page_offset += truesize;
2087#endif
2088 } else {
2089 /* buffer is unused, reset bias back to rx_buffer */
2090 rx_buffer->pagecnt_bias++;
2091 }
Alexander Duycka0cfc312017-03-14 10:15:24 -07002092
2093 return skb;
2094}
2095
2096/**
Alexander Duyckf8b45b72017-04-05 07:51:03 -04002097 * i40e_build_skb - Build skb around an existing buffer
2098 * @rx_ring: Rx descriptor ring to transact packets on
2099 * @rx_buffer: Rx buffer to pull data from
Björn Töpel0c8493d2017-05-24 07:55:34 +02002100 * @xdp: xdp_buff pointing to the data
Alexander Duyckf8b45b72017-04-05 07:51:03 -04002101 *
2102 * This function builds an skb around an existing Rx buffer, taking care
2103 * to set up the skb correctly and avoid any memcpy overhead.
2104 */
2105static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
2106 struct i40e_rx_buffer *rx_buffer,
Björn Töpel0c8493d2017-05-24 07:55:34 +02002107 struct xdp_buff *xdp)
Alexander Duyckf8b45b72017-04-05 07:51:03 -04002108{
Björn Töpel0c8493d2017-05-24 07:55:34 +02002109 unsigned int size = xdp->data_end - xdp->data;
Alexander Duyckf8b45b72017-04-05 07:51:03 -04002110#if (PAGE_SIZE < 8192)
2111 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2112#else
Björn Töpel2aae9182017-05-15 06:52:00 +02002113 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2114 SKB_DATA_ALIGN(I40E_SKB_PAD + size);
Alexander Duyckf8b45b72017-04-05 07:51:03 -04002115#endif
2116 struct sk_buff *skb;
2117
2118 /* prefetch first cache line of first page */
Björn Töpel0c8493d2017-05-24 07:55:34 +02002119 prefetch(xdp->data);
Alexander Duyckf8b45b72017-04-05 07:51:03 -04002120#if L1_CACHE_BYTES < 128
Björn Töpel0c8493d2017-05-24 07:55:34 +02002121 prefetch(xdp->data + L1_CACHE_BYTES);
Alexander Duyckf8b45b72017-04-05 07:51:03 -04002122#endif
2123 /* build an skb around the page buffer */
Björn Töpel0c8493d2017-05-24 07:55:34 +02002124 skb = build_skb(xdp->data_hard_start, truesize);
Alexander Duyckf8b45b72017-04-05 07:51:03 -04002125 if (unlikely(!skb))
2126 return NULL;
2127
2128 /* update pointers within the skb to store the data */
2129 skb_reserve(skb, I40E_SKB_PAD);
2130 __skb_put(skb, size);
2131
2132 /* buffer is used by skb, update page_offset */
2133#if (PAGE_SIZE < 8192)
2134 rx_buffer->page_offset ^= truesize;
2135#else
2136 rx_buffer->page_offset += truesize;
2137#endif
2138
2139 return skb;
2140}
2141
2142/**
Alexander Duycka0cfc312017-03-14 10:15:24 -07002143 * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
2144 * @rx_ring: rx descriptor ring to transact packets on
2145 * @rx_buffer: rx buffer to pull data from
2146 *
2147 * This function will clean up the contents of the rx_buffer. It will
Alan Brady11a350c2017-12-29 08:48:33 -05002148 * either recycle the buffer or unmap it and free the associated resources.
Alexander Duycka0cfc312017-03-14 10:15:24 -07002149 */
2150static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
2151 struct i40e_rx_buffer *rx_buffer)
2152{
2153 if (i40e_can_reuse_rx_page(rx_buffer)) {
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002154 /* hand second half of page back to the ring */
2155 i40e_reuse_rx_page(rx_ring, rx_buffer);
2156 rx_ring->rx_stats.page_reuse_count++;
2157 } else {
2158 /* we are not reusing the buffer so unmap it */
Alexander Duyck98efd692017-04-05 07:51:01 -04002159 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2160 i40e_rx_pg_size(rx_ring),
Alexander Duyck59605bc2017-01-30 12:29:35 -08002161 DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
Alexander Duyck17936682017-02-21 15:55:39 -08002162 __page_frag_cache_drain(rx_buffer->page,
2163 rx_buffer->pagecnt_bias);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002164 }
2165
2166 /* clear contents of buffer_info */
2167 rx_buffer->page = NULL;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002168}
2169
2170/**
2171 * i40e_is_non_eop - process handling of non-EOP buffers
2172 * @rx_ring: Rx ring being processed
2173 * @rx_desc: Rx descriptor for current buffer
2174 * @skb: Current socket buffer containing buffer in progress
2175 *
2176 * This function updates next to clean. If the buffer is an EOP buffer
2177 * this function exits returning false, otherwise it will place the
2178 * sk_buff in the next buffer to be chained and return true indicating
2179 * that this is in fact a non-EOP buffer.
2180 **/
2181static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
2182 union i40e_rx_desc *rx_desc,
2183 struct sk_buff *skb)
2184{
2185 u32 ntc = rx_ring->next_to_clean + 1;
2186
2187 /* fetch, update, and store next to clean */
2188 ntc = (ntc < rx_ring->count) ? ntc : 0;
2189 rx_ring->next_to_clean = ntc;
2190
2191 prefetch(I40E_RX_DESC(rx_ring, ntc));
2192
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002193 /* if we are the last buffer then there is nothing else to do */
2194#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
2195 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
2196 return false;
2197
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002198 rx_ring->rx_stats.non_eop_descs++;
2199
2200 return true;
2201}
2202
Björn Töpel0c8493d2017-05-24 07:55:34 +02002203#define I40E_XDP_PASS 0
2204#define I40E_XDP_CONSUMED 1
Björn Töpel74608d12017-05-24 07:55:35 +02002205#define I40E_XDP_TX 2
2206
2207static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
2208 struct i40e_ring *xdp_ring);
Björn Töpel0c8493d2017-05-24 07:55:34 +02002209
2210/**
2211 * i40e_run_xdp - run an XDP program
2212 * @rx_ring: Rx ring being processed
2213 * @xdp: XDP buffer containing the frame
2214 **/
2215static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
2216 struct xdp_buff *xdp)
2217{
2218 int result = I40E_XDP_PASS;
Björn Töpel74608d12017-05-24 07:55:35 +02002219 struct i40e_ring *xdp_ring;
Björn Töpel0c8493d2017-05-24 07:55:34 +02002220 struct bpf_prog *xdp_prog;
2221 u32 act;
2222
2223 rcu_read_lock();
2224 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2225
2226 if (!xdp_prog)
2227 goto xdp_out;
2228
2229 act = bpf_prog_run_xdp(xdp_prog, xdp);
2230 switch (act) {
2231 case XDP_PASS:
2232 break;
Björn Töpel74608d12017-05-24 07:55:35 +02002233 case XDP_TX:
2234 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2235 result = i40e_xmit_xdp_ring(xdp, xdp_ring);
2236 break;
Björn Töpel0c8493d2017-05-24 07:55:34 +02002237 default:
2238 bpf_warn_invalid_xdp_action(act);
Björn Töpel0c8493d2017-05-24 07:55:34 +02002239 case XDP_ABORTED:
2240 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2241 /* fallthrough -- handle aborts by dropping packet */
2242 case XDP_DROP:
2243 result = I40E_XDP_CONSUMED;
2244 break;
2245 }
2246xdp_out:
2247 rcu_read_unlock();
2248 return ERR_PTR(-result);
2249}
2250
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002251/**
Björn Töpel74608d12017-05-24 07:55:35 +02002252 * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
2253 * @rx_ring: Rx ring
2254 * @rx_buffer: Rx buffer to adjust
2255 * @size: Size of adjustment
2256 **/
2257static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
2258 struct i40e_rx_buffer *rx_buffer,
2259 unsigned int size)
2260{
2261#if (PAGE_SIZE < 8192)
2262 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2263
2264 rx_buffer->page_offset ^= truesize;
2265#else
2266 unsigned int truesize = SKB_DATA_ALIGN(i40e_rx_offset(rx_ring) + size);
2267
2268 rx_buffer->page_offset += truesize;
2269#endif
2270}
2271
2272/**
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002273 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
2274 * @rx_ring: rx descriptor ring to transact packets on
2275 * @budget: Total limit on number of packets to process
2276 *
2277 * This function provides a "bounce buffer" approach to Rx interrupt
2278 * processing. The advantage to this is that on systems that have
2279 * expensive overhead for IOMMU access this provides a means of avoiding
2280 * it by maintaining the mapping of the page to the system.
2281 *
2282 * Returns amount of work completed
2283 **/
2284static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
Mitch Williamsa132af22015-01-24 09:58:35 +00002285{
2286 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
Scott Petersone72e5652017-02-09 23:40:25 -08002287 struct sk_buff *skb = rx_ring->skb;
Mitch Williamsa132af22015-01-24 09:58:35 +00002288 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
Björn Töpel74608d12017-05-24 07:55:35 +02002289 bool failure = false, xdp_xmit = false;
Jesper Dangaard Brouer87128822018-01-03 11:25:23 +01002290 struct xdp_buff xdp;
2291
2292 xdp.rxq = &rx_ring->xdp_rxq;
Mitch Williamsa132af22015-01-24 09:58:35 +00002293
Jesse Brandeburgb85c94b2017-06-20 15:16:59 -07002294 while (likely(total_rx_packets < (unsigned int)budget)) {
Alexander Duyck9a064122017-03-14 10:15:23 -07002295 struct i40e_rx_buffer *rx_buffer;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002296 union i40e_rx_desc *rx_desc;
Alexander Duyckd57c0e02017-03-14 10:15:22 -07002297 unsigned int size;
Mitch Williamsa132af22015-01-24 09:58:35 +00002298 u16 vlan_tag;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002299 u8 rx_ptype;
2300 u64 qword;
2301
Mitch Williamsa132af22015-01-24 09:58:35 +00002302 /* return some buffers to hardware, one at a time is too slow */
2303 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08002304 failure = failure ||
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002305 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
Mitch Williamsa132af22015-01-24 09:58:35 +00002306 cleaned_count = 0;
2307 }
2308
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002309 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
2310
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002311 /* status_error_len will always be zero for unused descriptors
2312 * because it's cleared in cleanup, and overlaps with hdr_addr
2313 * which is always zero because packet split isn't used, if the
Alexander Duyckd57c0e02017-03-14 10:15:22 -07002314 * hardware wrote DD then the length will be non-zero
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002315 */
Alexander Duyckd57c0e02017-03-14 10:15:22 -07002316 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002317
Mitch Williamsa132af22015-01-24 09:58:35 +00002318 /* This memory barrier is needed to keep us from reading
Alexander Duyckd57c0e02017-03-14 10:15:22 -07002319 * any other fields out of the rx_desc until we have
2320 * verified the descriptor has been written back.
Mitch Williamsa132af22015-01-24 09:58:35 +00002321 */
Alexander Duyck67317162015-04-08 18:49:43 -07002322 dma_rmb();
Mitch Williamsa132af22015-01-24 09:58:35 +00002323
Alexander Duyck0e626ff2017-04-10 05:18:43 -04002324 if (unlikely(i40e_rx_is_programming_status(qword))) {
2325 i40e_clean_programming_status(rx_ring, rx_desc, qword);
Alexander Duyck62b4c662017-10-21 18:12:29 -07002326 cleaned_count++;
Alexander Duyck0e626ff2017-04-10 05:18:43 -04002327 continue;
2328 }
2329 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
2330 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
2331 if (!size)
2332 break;
2333
Scott Petersoned0980c2017-04-13 04:45:44 -04002334 i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
Alexander Duyck9a064122017-03-14 10:15:23 -07002335 rx_buffer = i40e_get_rx_buffer(rx_ring, size);
2336
Alexander Duyckfa2343e2017-03-14 10:15:25 -07002337 /* retrieve a buffer from the ring */
Björn Töpel0c8493d2017-05-24 07:55:34 +02002338 if (!skb) {
2339 xdp.data = page_address(rx_buffer->page) +
2340 rx_buffer->page_offset;
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02002341 xdp_set_data_meta_invalid(&xdp);
Björn Töpel0c8493d2017-05-24 07:55:34 +02002342 xdp.data_hard_start = xdp.data -
2343 i40e_rx_offset(rx_ring);
2344 xdp.data_end = xdp.data + size;
2345
2346 skb = i40e_run_xdp(rx_ring, &xdp);
2347 }
2348
2349 if (IS_ERR(skb)) {
Björn Töpel74608d12017-05-24 07:55:35 +02002350 if (PTR_ERR(skb) == -I40E_XDP_TX) {
2351 xdp_xmit = true;
2352 i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
2353 } else {
2354 rx_buffer->pagecnt_bias++;
2355 }
Björn Töpel0c8493d2017-05-24 07:55:34 +02002356 total_rx_bytes += size;
2357 total_rx_packets++;
Björn Töpel0c8493d2017-05-24 07:55:34 +02002358 } else if (skb) {
Alexander Duyckfa2343e2017-03-14 10:15:25 -07002359 i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
Björn Töpel0c8493d2017-05-24 07:55:34 +02002360 } else if (ring_uses_build_skb(rx_ring)) {
2361 skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
2362 } else {
2363 skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
2364 }
Alexander Duyckfa2343e2017-03-14 10:15:25 -07002365
2366 /* exit if we failed to retrieve a buffer */
2367 if (!skb) {
2368 rx_ring->rx_stats.alloc_buff_failed++;
2369 rx_buffer->pagecnt_bias++;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002370 break;
Alexander Duyckfa2343e2017-03-14 10:15:25 -07002371 }
Mitch Williamsa132af22015-01-24 09:58:35 +00002372
Alexander Duycka0cfc312017-03-14 10:15:24 -07002373 i40e_put_rx_buffer(rx_ring, rx_buffer);
Mitch Williamsa132af22015-01-24 09:58:35 +00002374 cleaned_count++;
2375
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002376 if (i40e_is_non_eop(rx_ring, rx_desc, skb))
Mitch Williamsa132af22015-01-24 09:58:35 +00002377 continue;
Mitch Williamsa132af22015-01-24 09:58:35 +00002378
Björn Töpel0c8493d2017-05-24 07:55:34 +02002379 if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
Scott Petersone72e5652017-02-09 23:40:25 -08002380 skb = NULL;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002381 continue;
Scott Petersone72e5652017-02-09 23:40:25 -08002382 }
Mitch Williamsa132af22015-01-24 09:58:35 +00002383
2384 /* probably a little skewed due to removing CRC */
2385 total_rx_bytes += skb->len;
Mitch Williamsa132af22015-01-24 09:58:35 +00002386
Alexander Duyck99dad8b2016-09-27 11:28:50 -07002387 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2388 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
2389 I40E_RXD_QW1_PTYPE_SHIFT;
2390
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002391 /* populate checksum, VLAN, and protocol */
2392 i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
Mitch Williamsa132af22015-01-24 09:58:35 +00002393
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002394 vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
2395 le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
2396
Scott Petersoned0980c2017-04-13 04:45:44 -04002397 i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
Mitch Williamsa132af22015-01-24 09:58:35 +00002398 i40e_receive_skb(rx_ring, skb, vlan_tag);
Scott Petersone72e5652017-02-09 23:40:25 -08002399 skb = NULL;
Mitch Williamsa132af22015-01-24 09:58:35 +00002400
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002401 /* update budget accounting */
2402 total_rx_packets++;
2403 }
Mitch Williamsa132af22015-01-24 09:58:35 +00002404
Björn Töpel74608d12017-05-24 07:55:35 +02002405 if (xdp_xmit) {
2406 struct i40e_ring *xdp_ring;
2407
2408 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2409
2410 /* Force memory writes to complete before letting h/w
2411 * know there are new descriptors to fetch.
2412 */
2413 wmb();
2414
2415 writel(xdp_ring->next_to_use, xdp_ring->tail);
2416 }
2417
Scott Petersone72e5652017-02-09 23:40:25 -08002418 rx_ring->skb = skb;
2419
Mitch Williamsa132af22015-01-24 09:58:35 +00002420 u64_stats_update_begin(&rx_ring->syncp);
2421 rx_ring->stats.packets += total_rx_packets;
2422 rx_ring->stats.bytes += total_rx_bytes;
2423 u64_stats_update_end(&rx_ring->syncp);
2424 rx_ring->q_vector->rx.total_packets += total_rx_packets;
2425 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
2426
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002427 /* guarantee a trip back through this routine if there was a failure */
Jesse Brandeburgb85c94b2017-06-20 15:16:59 -07002428 return failure ? budget : (int)total_rx_packets;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002429}
2430
Alexander Duyck92418fb2017-12-29 08:51:08 -05002431static inline u32 i40e_buildreg_itr(const int type, u16 itr)
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04002432{
2433 u32 val;
2434
Alexander Duyck4ff17922017-12-29 08:50:55 -05002435 /* We don't bother with setting the CLEARPBA bit as the data sheet
2436 * points out doing so is "meaningless since it was already
2437 * auto-cleared". The auto-clearing happens when the interrupt is
2438 * asserted.
2439 *
2440 * Hardware errata 28 for also indicates that writing to a
2441 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
2442 * an event in the PBA anyway so we need to rely on the automask
2443 * to hold pending events for us until the interrupt is re-enabled
Alexander Duyck92418fb2017-12-29 08:51:08 -05002444 *
2445 * The itr value is reported in microseconds, and the register
2446 * value is recorded in 2 microsecond units. For this reason we
2447 * only need to shift by the interval shift - 1 instead of the
2448 * full value.
Alexander Duyck4ff17922017-12-29 08:50:55 -05002449 */
Alexander Duyck92418fb2017-12-29 08:51:08 -05002450 itr &= I40E_ITR_MASK;
2451
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04002452 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04002453 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
Alexander Duyck92418fb2017-12-29 08:51:08 -05002454 (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04002455
2456 return val;
2457}
2458
2459/* a small macro to shorten up some long lines */
2460#define INTREG I40E_PFINT_DYN_CTLN
2461
Alexander Duycka0073a42017-12-29 08:52:19 -05002462/* The act of updating the ITR will cause it to immediately trigger. In order
2463 * to prevent this from throwing off adaptive update statistics we defer the
2464 * update so that it can only happen so often. So after either Tx or Rx are
2465 * updated we make the adaptive scheme wait until either the ITR completely
2466 * expires via the next_update expiration or we have been through at least
2467 * 3 interrupts.
2468 */
2469#define ITR_COUNTDOWN_START 3
2470
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002471/**
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04002472 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
2473 * @vsi: the VSI we care about
2474 * @q_vector: q_vector for which itr is being updated and interrupt enabled
2475 *
2476 **/
2477static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
2478 struct i40e_q_vector *q_vector)
2479{
2480 struct i40e_hw *hw = &vsi->back->hw;
Alexander Duyck556fdfd2017-12-29 08:51:25 -05002481 u32 intval;
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04002482
Jacob Keller9254c0e2017-07-14 09:10:09 -04002483 /* If we don't have MSIX, then we only need to re-enable icr0 */
2484 if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
Jacob Kellerdbadbbe2017-09-07 08:05:49 -04002485 i40e_irq_dynamic_enable_icr0(vsi->back);
Jacob Keller9254c0e2017-07-14 09:10:09 -04002486 return;
2487 }
2488
Alexander Duycka0073a42017-12-29 08:52:19 -05002489 /* These will do nothing if dynamic updates are not enabled */
2490 i40e_update_itr(q_vector, &q_vector->tx);
2491 i40e_update_itr(q_vector, &q_vector->rx);
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04002492
Alexander Duycka0073a42017-12-29 08:52:19 -05002493 /* This block of logic allows us to get away with only updating
2494 * one ITR value with each interrupt. The idea is to perform a
2495 * pseudo-lazy update with the following criteria.
2496 *
2497 * 1. Rx is given higher priority than Tx if both are in same state
2498 * 2. If we must reduce an ITR that is given highest priority.
2499 * 3. We then give priority to increasing ITR based on amount.
2500 */
2501 if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
2502 /* Rx ITR needs to be reduced, this is highest priority */
Alexander Duyck556fdfd2017-12-29 08:51:25 -05002503 intval = i40e_buildreg_itr(I40E_RX_ITR,
2504 q_vector->rx.target_itr);
2505 q_vector->rx.current_itr = q_vector->rx.target_itr;
Alexander Duycka0073a42017-12-29 08:52:19 -05002506 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2507 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
2508 ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
2509 (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
2510 /* Tx ITR needs to be reduced, this is second priority
2511 * Tx ITR needs to be increased more than Rx, fourth priority
2512 */
Alexander Duyck556fdfd2017-12-29 08:51:25 -05002513 intval = i40e_buildreg_itr(I40E_TX_ITR,
2514 q_vector->tx.target_itr);
2515 q_vector->tx.current_itr = q_vector->tx.target_itr;
Alexander Duycka0073a42017-12-29 08:52:19 -05002516 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2517 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
2518 /* Rx ITR needs to be increased, third priority */
2519 intval = i40e_buildreg_itr(I40E_RX_ITR,
2520 q_vector->rx.target_itr);
2521 q_vector->rx.current_itr = q_vector->rx.target_itr;
2522 q_vector->itr_countdown = ITR_COUNTDOWN_START;
Alexander Duyck556fdfd2017-12-29 08:51:25 -05002523 } else {
Alexander Duycka0073a42017-12-29 08:52:19 -05002524 /* No ITR update, lowest priority */
Alexander Duyck556fdfd2017-12-29 08:51:25 -05002525 intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
Alexander Duycka0073a42017-12-29 08:52:19 -05002526 if (q_vector->itr_countdown)
2527 q_vector->itr_countdown--;
Alexander Duyck556fdfd2017-12-29 08:51:25 -05002528 }
2529
Jacob Keller0da36b92017-04-19 09:25:55 -04002530 if (!test_bit(__I40E_VSI_DOWN, vsi->state))
Alexander Duyck556fdfd2017-12-29 08:51:25 -05002531 wr32(hw, INTREG(q_vector->reg_idx), intval);
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04002532}
2533
2534/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002535 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
2536 * @napi: napi struct with our devices info in it
2537 * @budget: amount of work driver is allowed to do this pass, in packets
2538 *
2539 * This function will clean all queues associated with a q_vector.
2540 *
2541 * Returns the amount of work done
2542 **/
2543int i40e_napi_poll(struct napi_struct *napi, int budget)
2544{
2545 struct i40e_q_vector *q_vector =
2546 container_of(napi, struct i40e_q_vector, napi);
2547 struct i40e_vsi *vsi = q_vector->vsi;
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00002548 struct i40e_ring *ring;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002549 bool clean_complete = true;
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00002550 bool arm_wb = false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002551 int budget_per_ring;
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07002552 int work_done = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002553
Jacob Keller0da36b92017-04-19 09:25:55 -04002554 if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002555 napi_complete(napi);
2556 return 0;
2557 }
2558
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00002559 /* Since the actual Tx work is minimal, we can give the Tx a larger
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002560 * budget and be more aggressive about cleaning up the Tx descriptors.
2561 */
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00002562 i40e_for_each_ring(ring, q_vector->tx) {
Alexander Duycka619afe2016-03-07 09:30:03 -08002563 if (!i40e_clean_tx_irq(vsi, ring, budget)) {
Alexander Duyckf2edaaa2016-03-07 09:29:57 -08002564 clean_complete = false;
2565 continue;
2566 }
2567 arm_wb |= ring->arm_wb;
Jesse Brandeburg0deda862015-07-23 16:54:34 -04002568 ring->arm_wb = false;
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00002569 }
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00002570
Alexander Duyckc67cace2015-09-24 09:04:26 -07002571 /* Handle case where we are called by netpoll with a budget of 0 */
2572 if (budget <= 0)
2573 goto tx_only;
2574
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00002575 /* We attempt to distribute budget to each Rx queue fairly, but don't
2576 * allow the budget to go below 1 because that would exit polling early.
2577 */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002578 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00002579
Mitch Williamsa132af22015-01-24 09:58:35 +00002580 i40e_for_each_ring(ring, q_vector->rx) {
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002581 int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07002582
2583 work_done += cleaned;
Alexander Duyckf2edaaa2016-03-07 09:29:57 -08002584 /* if we clean as many as budgeted, we must not be done */
2585 if (cleaned >= budget_per_ring)
2586 clean_complete = false;
Mitch Williamsa132af22015-01-24 09:58:35 +00002587 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002588
2589 /* If work not completed, return budget and polling will return */
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00002590 if (!clean_complete) {
Alan Brady96db7762016-09-14 16:24:38 -07002591 int cpu_id = smp_processor_id();
2592
2593 /* It is possible that the interrupt affinity has changed but,
2594 * if the cpu is pegged at 100%, polling will never exit while
2595 * traffic continues and the interrupt will be stuck on this
2596 * cpu. We check to make sure affinity is correct before we
2597 * continue to poll, otherwise we must stop polling so the
2598 * interrupt can move to the correct cpu.
2599 */
Jacob Keller6d977722017-07-14 09:10:11 -04002600 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
2601 /* Tell napi that we are done polling */
2602 napi_complete_done(napi, work_done);
2603
2604 /* Force an interrupt */
2605 i40e_force_wb(vsi, q_vector);
2606
2607 /* Return budget-1 so that polling stops */
2608 return budget - 1;
Anjali Singhai Jain164c9f52015-10-21 19:47:08 -04002609 }
Jacob Keller6d977722017-07-14 09:10:11 -04002610tx_only:
2611 if (arm_wb) {
2612 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2613 i40e_enable_wb_on_itr(vsi, q_vector);
2614 }
2615 return budget;
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00002616 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002617
Anjali Singhai Jain8e0764b2015-06-05 12:20:30 -04002618 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
2619 q_vector->arm_wb_state = false;
2620
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002621 /* Work is done so exit the polling mode and re-enable the interrupt */
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07002622 napi_complete_done(napi, work_done);
Alan Brady96db7762016-09-14 16:24:38 -07002623
Jacob Keller6d977722017-07-14 09:10:11 -04002624 i40e_update_enable_itr(vsi, q_vector);
Alan Brady96db7762016-09-14 16:24:38 -07002625
Alexander Duyck6beb84a2016-11-08 13:05:16 -08002626 return min(work_done, budget - 1);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002627}
2628
2629/**
2630 * i40e_atr - Add a Flow Director ATR filter
2631 * @tx_ring: ring to add programming descriptor to
2632 * @skb: send buffer
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002633 * @tx_flags: send tx flags
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002634 **/
2635static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002636 u32 tx_flags)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002637{
2638 struct i40e_filter_program_desc *fdir_desc;
2639 struct i40e_pf *pf = tx_ring->vsi->back;
2640 union {
2641 unsigned char *network;
2642 struct iphdr *ipv4;
2643 struct ipv6hdr *ipv6;
2644 } hdr;
2645 struct tcphdr *th;
2646 unsigned int hlen;
2647 u32 flex_ptype, dtype_cmd;
Alexander Duyckffcc55c2016-01-25 19:32:54 -08002648 int l4_proto;
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002649 u16 i;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002650
2651 /* make sure ATR is enabled */
Jesse Brandeburg60ea5f82014-01-17 15:36:34 -08002652 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002653 return;
2654
Jacob Keller134201a2018-03-16 01:26:32 -07002655 if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00002656 return;
2657
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002658 /* if sampling is disabled do nothing */
2659 if (!tx_ring->atr_sample_rate)
2660 return;
2661
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002662 /* Currently only IPv4/IPv6 with TCP is supported */
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002663 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002664 return;
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002665
Alexander Duyckffcc55c2016-01-25 19:32:54 -08002666 /* snag network header to get L4 type and address */
2667 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2668 skb_inner_network_header(skb) : skb_network_header(skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002669
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002670 /* Note: tx_flags gets modified to reflect inner protocols in
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002671 * tx_enable_csum function if encap is enabled.
2672 */
Alexander Duyckffcc55c2016-01-25 19:32:54 -08002673 if (tx_flags & I40E_TX_FLAGS_IPV4) {
2674 /* access ihl as u8 to avoid unaligned access on ia64 */
2675 hlen = (hdr.network[0] & 0x0F) << 2;
2676 l4_proto = hdr.ipv4->protocol;
2677 } else {
Jesse Brandeburg601a2e72017-06-20 15:16:58 -07002678 /* find the start of the innermost ipv6 header */
2679 unsigned int inner_hlen = hdr.network - skb->data;
2680 unsigned int h_offset = inner_hlen;
2681
2682 /* this function updates h_offset to the end of the header */
2683 l4_proto =
2684 ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
2685 /* hlen will contain our best estimate of the tcp header */
2686 hlen = h_offset - inner_hlen;
Alexander Duyckffcc55c2016-01-25 19:32:54 -08002687 }
2688
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002689 if (l4_proto != IPPROTO_TCP)
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002690 return;
2691
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002692 th = (struct tcphdr *)(hdr.network + hlen);
2693
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00002694 /* Due to lack of space, no more new filters can be programmed */
Jacob Keller134201a2018-03-16 01:26:32 -07002695 if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00002696 return;
Jacob Keller6964e532017-06-12 15:38:36 -07002697 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
Anjali Singhai Jain52eb95e2015-06-05 12:20:33 -04002698 /* HW ATR eviction will take care of removing filters on FIN
2699 * and RST packets.
2700 */
2701 if (th->fin || th->rst)
2702 return;
2703 }
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00002704
2705 tx_ring->atr_count++;
2706
Anjali Singhai Jaince806782014-03-06 08:59:54 +00002707 /* sample on all syn/fin/rst packets or once every atr sample rate */
2708 if (!th->fin &&
2709 !th->syn &&
2710 !th->rst &&
2711 (tx_ring->atr_count < tx_ring->atr_sample_rate))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002712 return;
2713
2714 tx_ring->atr_count = 0;
2715
2716 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002717 i = tx_ring->next_to_use;
2718 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2719
2720 i++;
2721 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002722
2723 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2724 I40E_TXD_FLTR_QW0_QINDEX_MASK;
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002725 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002726 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2727 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2728 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2729 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2730
2731 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2732
2733 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2734
Anjali Singhai Jaince806782014-03-06 08:59:54 +00002735 dtype_cmd |= (th->fin || th->rst) ?
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002736 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2737 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2738 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2739 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2740
2741 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2742 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2743
2744 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2745 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2746
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +00002747 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
Singhai, Anjali6a899022015-12-14 12:21:18 -08002748 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
Anjali Singhai Jain60ccd452015-04-16 20:06:01 -04002749 dtype_cmd |=
2750 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2751 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2752 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2753 else
2754 dtype_cmd |=
2755 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2756 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2757 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +00002758
Jacob Keller6964e532017-06-12 15:38:36 -07002759 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED)
Anjali Singhai Jain52eb95e2015-06-05 12:20:33 -04002760 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2761
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002762 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
Jesse Brandeburg99753ea2014-06-04 04:22:49 +00002763 fdir_desc->rsvd = cpu_to_le32(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002764 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
Jesse Brandeburg99753ea2014-06-04 04:22:49 +00002765 fdir_desc->fd_id = cpu_to_le32(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002766}
2767
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002768/**
2769 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2770 * @skb: send buffer
2771 * @tx_ring: ring to send buffer on
2772 * @flags: the tx flags to be set
2773 *
2774 * Checks the skb and set up correspondingly several generic transmit flags
2775 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2776 *
2777 * Returns error code indicate the frame should be dropped upon error and the
2778 * otherwise returns 0 to indicate the flags has been set properly.
2779 **/
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002780static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2781 struct i40e_ring *tx_ring,
2782 u32 *flags)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002783{
2784 __be16 protocol = skb->protocol;
2785 u32 tx_flags = 0;
2786
Greg Rose31eaacc2015-03-31 00:45:03 -07002787 if (protocol == htons(ETH_P_8021Q) &&
2788 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2789 /* When HW VLAN acceleration is turned off by the user the
2790 * stack sets the protocol to 8021q so that the driver
2791 * can take any steps required to support the SW only
2792 * VLAN handling. In our case the driver doesn't need
2793 * to take any further steps so just set the protocol
2794 * to the encapsulated ethertype.
2795 */
2796 skb->protocol = vlan_get_protocol(skb);
2797 goto out;
2798 }
2799
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002800 /* if we have a HW VLAN tag being added, default to the HW one */
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01002801 if (skb_vlan_tag_present(skb)) {
2802 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002803 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2804 /* else if it is a SW VLAN, check the next protocol and store the tag */
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00002805 } else if (protocol == htons(ETH_P_8021Q)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002806 struct vlan_hdr *vhdr, _vhdr;
Jesse Brandeburg6995b362015-08-28 17:55:54 -04002807
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002808 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2809 if (!vhdr)
2810 return -EINVAL;
2811
2812 protocol = vhdr->h_vlan_encapsulated_proto;
2813 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2814 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2815 }
2816
Neerav Parikhd40d00b2015-02-24 06:58:40 +00002817 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2818 goto out;
2819
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002820 /* Insert 802.1p priority into VLAN header */
Vasu Dev38e00432014-08-01 13:27:03 -07002821 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2822 (skb->priority != TC_PRIO_CONTROL)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002823 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2824 tx_flags |= (skb->priority & 0x7) <<
2825 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2826 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2827 struct vlan_ethhdr *vhdr;
Francois Romieudd225bc2014-03-30 03:14:48 +00002828 int rc;
2829
2830 rc = skb_cow_head(skb, 0);
2831 if (rc < 0)
2832 return rc;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002833 vhdr = (struct vlan_ethhdr *)skb->data;
2834 vhdr->h_vlan_TCI = htons(tx_flags >>
2835 I40E_TX_FLAGS_VLAN_SHIFT);
2836 } else {
2837 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2838 }
2839 }
Neerav Parikhd40d00b2015-02-24 06:58:40 +00002840
2841out:
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002842 *flags = tx_flags;
2843 return 0;
2844}
2845
2846/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002847 * i40e_tso - set up the tso context descriptor
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002848 * @first: pointer to first Tx buffer for xmit
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002849 * @hdr_len: ptr to the size of the packet header
Shannon Nelson9c883bd2015-10-21 19:47:02 -04002850 * @cd_type_cmd_tso_mss: Quad Word 1
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002851 *
2852 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2853 **/
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002854static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
2855 u64 *cd_type_cmd_tso_mss)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002856{
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002857 struct sk_buff *skb = first->skb;
Alexander Duyck03f9d6a2016-01-24 21:16:20 -08002858 u64 cd_cmd, cd_tso_len, cd_mss;
Alexander Duyckc7770192016-01-24 21:16:35 -08002859 union {
2860 struct iphdr *v4;
2861 struct ipv6hdr *v6;
2862 unsigned char *hdr;
2863 } ip;
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002864 union {
2865 struct tcphdr *tcp;
Alexander Duyck54532052016-01-24 21:17:29 -08002866 struct udphdr *udp;
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002867 unsigned char *hdr;
2868 } l4;
2869 u32 paylen, l4_offset;
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002870 u16 gso_segs, gso_size;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002871 int err;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002872
Shannon Nelsone9f65632016-01-04 10:33:04 -08002873 if (skb->ip_summed != CHECKSUM_PARTIAL)
2874 return 0;
2875
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002876 if (!skb_is_gso(skb))
2877 return 0;
2878
Francois Romieudd225bc2014-03-30 03:14:48 +00002879 err = skb_cow_head(skb, 0);
2880 if (err < 0)
2881 return err;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002882
Alexander Duyckc7770192016-01-24 21:16:35 -08002883 ip.hdr = skb_network_header(skb);
2884 l4.hdr = skb_transport_header(skb);
Anjali Singhaidf230752014-12-19 02:58:16 +00002885
Alexander Duyckc7770192016-01-24 21:16:35 -08002886 /* initialize outer IP header fields */
2887 if (ip.v4->version == 4) {
2888 ip.v4->tot_len = 0;
2889 ip.v4->check = 0;
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002890 } else {
Alexander Duyckc7770192016-01-24 21:16:35 -08002891 ip.v6->payload_len = 0;
2892 }
2893
Alexander Duyck577389a2016-04-02 00:06:56 -07002894 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04002895 SKB_GSO_GRE_CSUM |
Tom Herbert7e133182016-05-18 09:06:10 -07002896 SKB_GSO_IPXIP4 |
Alexander Duyckbf2d1df2016-05-18 10:44:53 -07002897 SKB_GSO_IPXIP6 |
Alexander Duyck577389a2016-04-02 00:06:56 -07002898 SKB_GSO_UDP_TUNNEL |
Alexander Duyck54532052016-01-24 21:17:29 -08002899 SKB_GSO_UDP_TUNNEL_CSUM)) {
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04002900 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2901 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2902 l4.udp->len = 0;
2903
Alexander Duyck54532052016-01-24 21:17:29 -08002904 /* determine offset of outer transport header */
2905 l4_offset = l4.hdr - skb->data;
2906
2907 /* remove payload length from outer checksum */
Alexander Duyck24d41e52016-03-18 16:06:47 -07002908 paylen = skb->len - l4_offset;
Jacob Kellerb9c015d2016-12-12 15:44:17 -08002909 csum_replace_by_diff(&l4.udp->check,
2910 (__force __wsum)htonl(paylen));
Alexander Duyck54532052016-01-24 21:17:29 -08002911 }
2912
Alexander Duyckc7770192016-01-24 21:16:35 -08002913 /* reset pointers to inner headers */
2914 ip.hdr = skb_inner_network_header(skb);
2915 l4.hdr = skb_inner_transport_header(skb);
2916
2917 /* initialize inner IP header fields */
2918 if (ip.v4->version == 4) {
2919 ip.v4->tot_len = 0;
2920 ip.v4->check = 0;
2921 } else {
2922 ip.v6->payload_len = 0;
2923 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002924 }
2925
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002926 /* determine offset of inner transport header */
2927 l4_offset = l4.hdr - skb->data;
2928
2929 /* remove payload length from inner checksum */
Alexander Duyck24d41e52016-03-18 16:06:47 -07002930 paylen = skb->len - l4_offset;
Jacob Kellerb9c015d2016-12-12 15:44:17 -08002931 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002932
2933 /* compute length of segmentation header */
2934 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002935
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002936 /* pull values out of skb_shinfo */
2937 gso_size = skb_shinfo(skb)->gso_size;
2938 gso_segs = skb_shinfo(skb)->gso_segs;
2939
2940 /* update GSO size and bytecount with header size */
2941 first->gso_segs = gso_segs;
2942 first->bytecount += (first->gso_segs - 1) * *hdr_len;
2943
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002944 /* find the field values */
2945 cd_cmd = I40E_TX_CTX_DESC_TSO;
2946 cd_tso_len = skb->len - *hdr_len;
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002947 cd_mss = gso_size;
Alexander Duyck03f9d6a2016-01-24 21:16:20 -08002948 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2949 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2950 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002951 return 1;
2952}
2953
2954/**
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002955 * i40e_tsyn - set up the tsyn context descriptor
2956 * @tx_ring: ptr to the ring to send
2957 * @skb: ptr to the skb we're sending
2958 * @tx_flags: the collected send information
Shannon Nelson9c883bd2015-10-21 19:47:02 -04002959 * @cd_type_cmd_tso_mss: Quad Word 1
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002960 *
2961 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2962 **/
2963static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2964 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2965{
2966 struct i40e_pf *pf;
2967
2968 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2969 return 0;
2970
2971 /* Tx timestamps cannot be sampled when doing TSO */
2972 if (tx_flags & I40E_TX_FLAGS_TSO)
2973 return 0;
2974
2975 /* only timestamp the outbound packet if the user has requested it and
2976 * we are not already transmitting a packet to be timestamped
2977 */
2978 pf = i40e_netdev_to_pf(tx_ring->netdev);
Jacob Keller22b47772014-12-14 01:55:09 +00002979 if (!(pf->flags & I40E_FLAG_PTP))
2980 return 0;
2981
Jakub Kicinski9ce34f02014-03-15 14:55:42 +00002982 if (pf->ptp_tx &&
Jacob Keller0da36b92017-04-19 09:25:55 -04002983 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002984 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
Jacob Keller0bc07062017-05-03 10:29:02 -07002985 pf->ptp_tx_start = jiffies;
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002986 pf->ptp_tx_skb = skb_get(skb);
2987 } else {
Jacob Keller2955fac2017-05-03 10:28:58 -07002988 pf->tx_hwtstamp_skipped++;
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002989 return 0;
2990 }
2991
2992 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
2993 I40E_TXD_CTX_QW1_CMD_SHIFT;
2994
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002995 return 1;
2996}
2997
2998/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002999 * i40e_tx_enable_csum - Enable Tx checksum offloads
3000 * @skb: send buffer
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04003001 * @tx_flags: pointer to Tx flags currently set
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003002 * @td_cmd: Tx descriptor command bits to set
3003 * @td_offset: Tx descriptor header offsets to set
Jean Sacren554f4542015-10-13 01:06:28 -06003004 * @tx_ring: Tx descriptor ring
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003005 * @cd_tunneling: ptr to context desc bits
3006 **/
Alexander Duyck529f1f62016-01-24 21:17:10 -08003007static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
3008 u32 *td_cmd, u32 *td_offset,
3009 struct i40e_ring *tx_ring,
3010 u32 *cd_tunneling)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003011{
Alexander Duyckb96b78f2016-01-24 21:16:42 -08003012 union {
3013 struct iphdr *v4;
3014 struct ipv6hdr *v6;
3015 unsigned char *hdr;
3016 } ip;
3017 union {
3018 struct tcphdr *tcp;
3019 struct udphdr *udp;
3020 unsigned char *hdr;
3021 } l4;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08003022 unsigned char *exthdr;
Jesse Brandeburgd1bd7432016-04-01 03:56:04 -07003023 u32 offset, cmd = 0;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08003024 __be16 frag_off;
Alexander Duyckb96b78f2016-01-24 21:16:42 -08003025 u8 l4_proto = 0;
3026
Alexander Duyck529f1f62016-01-24 21:17:10 -08003027 if (skb->ip_summed != CHECKSUM_PARTIAL)
3028 return 0;
3029
Alexander Duyckb96b78f2016-01-24 21:16:42 -08003030 ip.hdr = skb_network_header(skb);
3031 l4.hdr = skb_transport_header(skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003032
Alexander Duyck475b4202016-01-24 21:17:01 -08003033 /* compute outer L2 header size */
3034 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
3035
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003036 if (skb->encapsulation) {
Jesse Brandeburgd1bd7432016-04-01 03:56:04 -07003037 u32 tunnel = 0;
Alexander Duycka0064722016-01-24 21:16:48 -08003038 /* define outer network header type */
3039 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
Alexander Duyck475b4202016-01-24 21:17:01 -08003040 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3041 I40E_TX_CTX_EXT_IP_IPV4 :
3042 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
3043
Alexander Duycka0064722016-01-24 21:16:48 -08003044 l4_proto = ip.v4->protocol;
3045 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
Alexander Duyck475b4202016-01-24 21:17:01 -08003046 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08003047
3048 exthdr = ip.hdr + sizeof(*ip.v6);
Alexander Duycka0064722016-01-24 21:16:48 -08003049 l4_proto = ip.v6->nexthdr;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08003050 if (l4.hdr != exthdr)
3051 ipv6_skip_exthdr(skb, exthdr - skb->data,
3052 &l4_proto, &frag_off);
Alexander Duycka0064722016-01-24 21:16:48 -08003053 }
3054
3055 /* define outer transport */
3056 switch (l4_proto) {
Anjali Singhai Jain45991202015-02-27 09:15:29 +00003057 case IPPROTO_UDP:
Alexander Duyck475b4202016-01-24 21:17:01 -08003058 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
Singhai, Anjali6a899022015-12-14 12:21:18 -08003059 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00003060 break;
Shannon Nelsonc1d17912015-09-25 19:26:04 +00003061 case IPPROTO_GRE:
Alexander Duyck475b4202016-01-24 21:17:01 -08003062 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
Alexander Duycka0064722016-01-24 21:16:48 -08003063 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
Shannon Nelsonc1d17912015-09-25 19:26:04 +00003064 break;
Alexander Duyck577389a2016-04-02 00:06:56 -07003065 case IPPROTO_IPIP:
3066 case IPPROTO_IPV6:
3067 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3068 l4.hdr = skb_inner_network_header(skb);
3069 break;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00003070 default:
Alexander Duyck529f1f62016-01-24 21:17:10 -08003071 if (*tx_flags & I40E_TX_FLAGS_TSO)
3072 return -1;
3073
3074 skb_checksum_help(skb);
3075 return 0;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00003076 }
Alexander Duyckb96b78f2016-01-24 21:16:42 -08003077
Alexander Duyck577389a2016-04-02 00:06:56 -07003078 /* compute outer L3 header size */
3079 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
3080 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
3081
3082 /* switch IP header pointer from outer to inner header */
3083 ip.hdr = skb_inner_network_header(skb);
3084
Alexander Duyck475b4202016-01-24 21:17:01 -08003085 /* compute tunnel header size */
3086 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
3087 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
3088
Alexander Duyck54532052016-01-24 21:17:29 -08003089 /* indicate if we need to offload outer UDP header */
3090 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04003091 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
Alexander Duyck54532052016-01-24 21:17:29 -08003092 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
3093 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
3094
Alexander Duyck475b4202016-01-24 21:17:01 -08003095 /* record tunnel offload values */
3096 *cd_tunneling |= tunnel;
3097
Alexander Duyckb96b78f2016-01-24 21:16:42 -08003098 /* switch L4 header pointer from outer to inner */
Alexander Duyckb96b78f2016-01-24 21:16:42 -08003099 l4.hdr = skb_inner_transport_header(skb);
Alexander Duycka0064722016-01-24 21:16:48 -08003100 l4_proto = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003101
Alexander Duycka0064722016-01-24 21:16:48 -08003102 /* reset type as we transition from outer to inner headers */
3103 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
3104 if (ip.v4->version == 4)
3105 *tx_flags |= I40E_TX_FLAGS_IPV4;
3106 if (ip.v6->version == 6)
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04003107 *tx_flags |= I40E_TX_FLAGS_IPV6;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003108 }
3109
3110 /* Enable IP checksum offloads */
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04003111 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
Alexander Duyckb96b78f2016-01-24 21:16:42 -08003112 l4_proto = ip.v4->protocol;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003113 /* the stack computes the IP header already, the only time we
3114 * need the hardware to recompute it is in the case of TSO.
3115 */
Alexander Duyck475b4202016-01-24 21:17:01 -08003116 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3117 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
3118 I40E_TX_DESC_CMD_IIPT_IPV4;
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04003119 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
Alexander Duyck475b4202016-01-24 21:17:01 -08003120 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08003121
3122 exthdr = ip.hdr + sizeof(*ip.v6);
3123 l4_proto = ip.v6->nexthdr;
3124 if (l4.hdr != exthdr)
3125 ipv6_skip_exthdr(skb, exthdr - skb->data,
3126 &l4_proto, &frag_off);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003127 }
Alexander Duyckb96b78f2016-01-24 21:16:42 -08003128
Alexander Duyck475b4202016-01-24 21:17:01 -08003129 /* compute inner L3 header size */
3130 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003131
3132 /* Enable L4 checksum offloads */
Alexander Duyckb96b78f2016-01-24 21:16:42 -08003133 switch (l4_proto) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003134 case IPPROTO_TCP:
3135 /* enable checksum offloads */
Alexander Duyck475b4202016-01-24 21:17:01 -08003136 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
3137 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003138 break;
3139 case IPPROTO_SCTP:
3140 /* enable SCTP checksum offload */
Alexander Duyck475b4202016-01-24 21:17:01 -08003141 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
3142 offset |= (sizeof(struct sctphdr) >> 2) <<
3143 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003144 break;
3145 case IPPROTO_UDP:
3146 /* enable UDP checksum offload */
Alexander Duyck475b4202016-01-24 21:17:01 -08003147 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
3148 offset |= (sizeof(struct udphdr) >> 2) <<
3149 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003150 break;
3151 default:
Alexander Duyck529f1f62016-01-24 21:17:10 -08003152 if (*tx_flags & I40E_TX_FLAGS_TSO)
3153 return -1;
3154 skb_checksum_help(skb);
3155 return 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003156 }
Alexander Duyck475b4202016-01-24 21:17:01 -08003157
3158 *td_cmd |= cmd;
3159 *td_offset |= offset;
Alexander Duyck529f1f62016-01-24 21:17:10 -08003160
3161 return 1;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003162}
3163
3164/**
3165 * i40e_create_tx_ctx Build the Tx context descriptor
3166 * @tx_ring: ring to create the descriptor on
3167 * @cd_type_cmd_tso_mss: Quad Word 1
3168 * @cd_tunneling: Quad Word 0 - bits 0-31
3169 * @cd_l2tag2: Quad Word 0 - bits 32-63
3170 **/
3171static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
3172 const u64 cd_type_cmd_tso_mss,
3173 const u32 cd_tunneling, const u32 cd_l2tag2)
3174{
3175 struct i40e_tx_context_desc *context_desc;
Alexander Duyckfc4ac672013-09-28 06:00:22 +00003176 int i = tx_ring->next_to_use;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003177
Jesse Brandeburgff40dd52014-02-14 02:14:41 +00003178 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
3179 !cd_tunneling && !cd_l2tag2)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003180 return;
3181
3182 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +00003183 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
3184
3185 i++;
3186 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003187
3188 /* cpu_to_le32 and assign to struct fields */
3189 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
3190 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
Jesse Brandeburg3efbbb22014-06-04 20:41:54 +00003191 context_desc->rsvd = cpu_to_le16(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003192 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
3193}
3194
3195/**
Eric Dumazet4567dc12014-10-07 13:30:23 -07003196 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
3197 * @tx_ring: the ring to be checked
3198 * @size: the size buffer we want to assure is available
3199 *
3200 * Returns -EBUSY if a stop is needed, else 0
3201 **/
Alexander Duyck4ec441d2016-02-17 11:02:43 -08003202int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
Eric Dumazet4567dc12014-10-07 13:30:23 -07003203{
3204 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3205 /* Memory barrier before checking head and tail */
3206 smp_mb();
3207
3208 /* Check again in a case another CPU has just made room available. */
3209 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
3210 return -EBUSY;
3211
3212 /* A reprieve! - use start_queue because it doesn't call schedule */
3213 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3214 ++tx_ring->tx_stats.restart_queue;
3215 return 0;
3216}
3217
3218/**
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07003219 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
Anjali Singhai71da6192015-02-21 06:42:35 +00003220 * @skb: send buffer
Anjali Singhai71da6192015-02-21 06:42:35 +00003221 *
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07003222 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
3223 * and so we need to figure out the cases where we need to linearize the skb.
3224 *
3225 * For TSO we need to count the TSO header and segment payload separately.
3226 * As such we need to check cases where we have 7 fragments or more as we
3227 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
3228 * the segment payload in the first descriptor, and another 7 for the
3229 * fragments.
Anjali Singhai71da6192015-02-21 06:42:35 +00003230 **/
Alexander Duyck2d374902016-02-17 11:02:50 -08003231bool __i40e_chk_linearize(struct sk_buff *skb)
Anjali Singhai71da6192015-02-21 06:42:35 +00003232{
Alexander Duyck2d374902016-02-17 11:02:50 -08003233 const struct skb_frag_struct *frag, *stale;
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07003234 int nr_frags, sum;
Anjali Singhai71da6192015-02-21 06:42:35 +00003235
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07003236 /* no need to check if number of frags is less than 7 */
Alexander Duyck2d374902016-02-17 11:02:50 -08003237 nr_frags = skb_shinfo(skb)->nr_frags;
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07003238 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
Alexander Duyck2d374902016-02-17 11:02:50 -08003239 return false;
Anjali Singhai71da6192015-02-21 06:42:35 +00003240
Alexander Duyck2d374902016-02-17 11:02:50 -08003241 /* We need to walk through the list and validate that each group
Alexander Duyck841493a2016-09-06 18:05:04 -07003242 * of 6 fragments totals at least gso_size.
Alexander Duyck2d374902016-02-17 11:02:50 -08003243 */
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07003244 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
Alexander Duyck2d374902016-02-17 11:02:50 -08003245 frag = &skb_shinfo(skb)->frags[0];
3246
3247 /* Initialize size to the negative value of gso_size minus 1. We
3248 * use this as the worst case scenerio in which the frag ahead
3249 * of us only provides one byte which is why we are limited to 6
3250 * descriptors for a single transmit as the header and previous
3251 * fragment are already consuming 2 descriptors.
3252 */
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07003253 sum = 1 - skb_shinfo(skb)->gso_size;
Alexander Duyck2d374902016-02-17 11:02:50 -08003254
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07003255 /* Add size of frags 0 through 4 to create our initial sum */
3256 sum += skb_frag_size(frag++);
3257 sum += skb_frag_size(frag++);
3258 sum += skb_frag_size(frag++);
3259 sum += skb_frag_size(frag++);
3260 sum += skb_frag_size(frag++);
Alexander Duyck2d374902016-02-17 11:02:50 -08003261
3262 /* Walk through fragments adding latest fragment, testing it, and
3263 * then removing stale fragments from the sum.
3264 */
Alexander Duyck248de222017-12-08 10:55:04 -08003265 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
3266 int stale_size = skb_frag_size(stale);
3267
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07003268 sum += skb_frag_size(frag++);
Alexander Duyck2d374902016-02-17 11:02:50 -08003269
Alexander Duyck248de222017-12-08 10:55:04 -08003270 /* The stale fragment may present us with a smaller
3271 * descriptor than the actual fragment size. To account
3272 * for that we need to remove all the data on the front and
3273 * figure out what the remainder would be in the last
3274 * descriptor associated with the fragment.
3275 */
3276 if (stale_size > I40E_MAX_DATA_PER_TXD) {
3277 int align_pad = -(stale->page_offset) &
3278 (I40E_MAX_READ_REQ_SIZE - 1);
3279
3280 sum -= align_pad;
3281 stale_size -= align_pad;
3282
3283 do {
3284 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3285 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3286 } while (stale_size > I40E_MAX_DATA_PER_TXD);
3287 }
3288
Alexander Duyck2d374902016-02-17 11:02:50 -08003289 /* if sum is negative we failed to make sufficient progress */
3290 if (sum < 0)
3291 return true;
3292
Alexander Duyck841493a2016-09-06 18:05:04 -07003293 if (!nr_frags--)
Alexander Duyck2d374902016-02-17 11:02:50 -08003294 break;
3295
Alexander Duyck248de222017-12-08 10:55:04 -08003296 sum -= stale_size;
Anjali Singhai71da6192015-02-21 06:42:35 +00003297 }
3298
Alexander Duyck2d374902016-02-17 11:02:50 -08003299 return false;
Anjali Singhai71da6192015-02-21 06:42:35 +00003300}
3301
3302/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003303 * i40e_tx_map - Build the Tx descriptor
3304 * @tx_ring: ring to send buffer on
3305 * @skb: send buffer
3306 * @first: first buffer info buffer to use
3307 * @tx_flags: collected send information
3308 * @hdr_len: size of the packet header
3309 * @td_cmd: the command field in the descriptor
3310 * @td_offset: offset for checksum or crc
Jacob Keller69077572017-05-03 10:28:54 -07003311 *
3312 * Returns 0 on success, -1 on failure to DMA
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003313 **/
Jacob Keller69077572017-05-03 10:28:54 -07003314static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
3315 struct i40e_tx_buffer *first, u32 tx_flags,
3316 const u8 hdr_len, u32 td_cmd, u32 td_offset)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003317{
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003318 unsigned int data_len = skb->data_len;
3319 unsigned int size = skb_headlen(skb);
Alexander Duycka5e9c572013-09-28 06:00:27 +00003320 struct skb_frag_struct *frag;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003321 struct i40e_tx_buffer *tx_bi;
3322 struct i40e_tx_desc *tx_desc;
Alexander Duycka5e9c572013-09-28 06:00:27 +00003323 u16 i = tx_ring->next_to_use;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003324 u32 td_tag = 0;
3325 dma_addr_t dma;
Alexander Duyck1dc8b532016-10-11 15:26:54 -07003326 u16 desc_count = 1;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003327
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003328 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
3329 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
3330 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
3331 I40E_TX_FLAGS_VLAN_SHIFT;
3332 }
3333
Alexander Duycka5e9c572013-09-28 06:00:27 +00003334 first->tx_flags = tx_flags;
3335
3336 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3337
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003338 tx_desc = I40E_TX_DESC(tx_ring, i);
Alexander Duycka5e9c572013-09-28 06:00:27 +00003339 tx_bi = first;
3340
3341 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
Alexander Duyck5c4654d2016-02-19 12:17:08 -08003342 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3343
Alexander Duycka5e9c572013-09-28 06:00:27 +00003344 if (dma_mapping_error(tx_ring->dev, dma))
3345 goto dma_error;
3346
3347 /* record length, and DMA address */
3348 dma_unmap_len_set(tx_bi, len, size);
3349 dma_unmap_addr_set(tx_bi, dma, dma);
3350
Alexander Duyck5c4654d2016-02-19 12:17:08 -08003351 /* align size to end of page */
3352 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
Alexander Duycka5e9c572013-09-28 06:00:27 +00003353 tx_desc->buffer_addr = cpu_to_le64(dma);
3354
3355 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003356 tx_desc->cmd_type_offset_bsz =
3357 build_ctob(td_cmd, td_offset,
Alexander Duyck5c4654d2016-02-19 12:17:08 -08003358 max_data, td_tag);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003359
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003360 tx_desc++;
3361 i++;
Anjali Singhai58044742015-09-25 18:26:13 -07003362 desc_count++;
3363
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003364 if (i == tx_ring->count) {
3365 tx_desc = I40E_TX_DESC(tx_ring, 0);
3366 i = 0;
3367 }
Alexander Duycka5e9c572013-09-28 06:00:27 +00003368
Alexander Duyck5c4654d2016-02-19 12:17:08 -08003369 dma += max_data;
3370 size -= max_data;
Alexander Duycka5e9c572013-09-28 06:00:27 +00003371
Alexander Duyck5c4654d2016-02-19 12:17:08 -08003372 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
Alexander Duycka5e9c572013-09-28 06:00:27 +00003373 tx_desc->buffer_addr = cpu_to_le64(dma);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003374 }
3375
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003376 if (likely(!data_len))
3377 break;
3378
Alexander Duycka5e9c572013-09-28 06:00:27 +00003379 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
3380 size, td_tag);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003381
3382 tx_desc++;
3383 i++;
Anjali Singhai58044742015-09-25 18:26:13 -07003384 desc_count++;
3385
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003386 if (i == tx_ring->count) {
3387 tx_desc = I40E_TX_DESC(tx_ring, 0);
3388 i = 0;
3389 }
3390
Alexander Duycka5e9c572013-09-28 06:00:27 +00003391 size = skb_frag_size(frag);
3392 data_len -= size;
3393
3394 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3395 DMA_TO_DEVICE);
3396
3397 tx_bi = &tx_ring->tx_bi[i];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003398 }
3399
Alexander Duyck1dc8b532016-10-11 15:26:54 -07003400 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
Alexander Duycka5e9c572013-09-28 06:00:27 +00003401
3402 i++;
3403 if (i == tx_ring->count)
3404 i = 0;
3405
3406 tx_ring->next_to_use = i;
3407
Eric Dumazet4567dc12014-10-07 13:30:23 -07003408 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
Anjali Singhai58044742015-09-25 18:26:13 -07003409
Alexander Duyck1dc8b532016-10-11 15:26:54 -07003410 /* write last descriptor with EOP bit */
3411 td_cmd |= I40E_TX_DESC_CMD_EOP;
3412
Jacob Kellera5340d92017-08-29 05:32:42 -04003413 /* We OR these values together to check both against 4 (WB_STRIDE)
3414 * below. This is safe since we don't re-use desc_count afterwards.
Alexander Duyck1dc8b532016-10-11 15:26:54 -07003415 */
3416 desc_count |= ++tx_ring->packet_stride;
3417
Jacob Kellera5340d92017-08-29 05:32:42 -04003418 if (desc_count >= WB_STRIDE) {
Alexander Duyck1dc8b532016-10-11 15:26:54 -07003419 /* write last descriptor with RS bit set */
3420 td_cmd |= I40E_TX_DESC_CMD_RS;
Anjali Singhai58044742015-09-25 18:26:13 -07003421 tx_ring->packet_stride = 0;
Anjali Singhai58044742015-09-25 18:26:13 -07003422 }
Anjali Singhai58044742015-09-25 18:26:13 -07003423
3424 tx_desc->cmd_type_offset_bsz =
Alexander Duyck1dc8b532016-10-11 15:26:54 -07003425 build_ctob(td_cmd, td_offset, size, td_tag);
3426
3427 /* Force memory writes to complete before letting h/w know there
3428 * are new descriptors to fetch.
3429 *
3430 * We also use this memory barrier to make certain all of the
3431 * status bits have been updated before next_to_watch is written.
3432 */
3433 wmb();
3434
3435 /* set next_to_watch value indicating a packet is present */
3436 first->next_to_watch = tx_desc;
Anjali Singhai58044742015-09-25 18:26:13 -07003437
Alexander Duycka5e9c572013-09-28 06:00:27 +00003438 /* notify HW of packet */
Jacob Kellera5340d92017-08-29 05:32:42 -04003439 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
Anjali Singhai58044742015-09-25 18:26:13 -07003440 writel(i, tx_ring->tail);
Alexander Duyck1dc8b532016-10-11 15:26:54 -07003441
3442 /* we need this if more than one processor can write to our tail
3443 * at a time, it synchronizes IO on IA64/Altix systems
3444 */
3445 mmiowb();
Anjali Singhai58044742015-09-25 18:26:13 -07003446 }
Alexander Duyck1dc8b532016-10-11 15:26:54 -07003447
Jacob Keller69077572017-05-03 10:28:54 -07003448 return 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003449
3450dma_error:
Alexander Duycka5e9c572013-09-28 06:00:27 +00003451 dev_info(tx_ring->dev, "TX DMA map failed\n");
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003452
3453 /* clear dma mappings for failed tx_bi map */
3454 for (;;) {
3455 tx_bi = &tx_ring->tx_bi[i];
Alexander Duycka5e9c572013-09-28 06:00:27 +00003456 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003457 if (tx_bi == first)
3458 break;
3459 if (i == 0)
3460 i = tx_ring->count;
3461 i--;
3462 }
3463
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003464 tx_ring->next_to_use = i;
Jacob Keller69077572017-05-03 10:28:54 -07003465
3466 return -1;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003467}
3468
3469/**
Björn Töpel74608d12017-05-24 07:55:35 +02003470 * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
3471 * @xdp: data to transmit
3472 * @xdp_ring: XDP Tx ring
3473 **/
3474static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
3475 struct i40e_ring *xdp_ring)
3476{
3477 u32 size = xdp->data_end - xdp->data;
3478 u16 i = xdp_ring->next_to_use;
3479 struct i40e_tx_buffer *tx_bi;
3480 struct i40e_tx_desc *tx_desc;
3481 dma_addr_t dma;
3482
3483 if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
3484 xdp_ring->tx_stats.tx_busy++;
3485 return I40E_XDP_CONSUMED;
3486 }
3487
3488 dma = dma_map_single(xdp_ring->dev, xdp->data, size, DMA_TO_DEVICE);
3489 if (dma_mapping_error(xdp_ring->dev, dma))
3490 return I40E_XDP_CONSUMED;
3491
3492 tx_bi = &xdp_ring->tx_bi[i];
3493 tx_bi->bytecount = size;
3494 tx_bi->gso_segs = 1;
3495 tx_bi->raw_buf = xdp->data;
3496
3497 /* record length, and DMA address */
3498 dma_unmap_len_set(tx_bi, len, size);
3499 dma_unmap_addr_set(tx_bi, dma, dma);
3500
3501 tx_desc = I40E_TX_DESC(xdp_ring, i);
3502 tx_desc->buffer_addr = cpu_to_le64(dma);
3503 tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC
3504 | I40E_TXD_CMD,
3505 0, size, 0);
3506
3507 /* Make certain all of the status bits have been updated
3508 * before next_to_watch is written.
3509 */
3510 smp_wmb();
3511
3512 i++;
3513 if (i == xdp_ring->count)
3514 i = 0;
3515
3516 tx_bi->next_to_watch = tx_desc;
3517 xdp_ring->next_to_use = i;
3518
3519 return I40E_XDP_TX;
3520}
3521
3522/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003523 * i40e_xmit_frame_ring - Sends buffer on Tx ring
3524 * @skb: send buffer
3525 * @tx_ring: ring to send buffer on
3526 *
3527 * Returns NETDEV_TX_OK if sent, else an error code
3528 **/
3529static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
3530 struct i40e_ring *tx_ring)
3531{
3532 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
3533 u32 cd_tunneling = 0, cd_l2tag2 = 0;
3534 struct i40e_tx_buffer *first;
3535 u32 td_offset = 0;
3536 u32 tx_flags = 0;
3537 __be16 protocol;
3538 u32 td_cmd = 0;
3539 u8 hdr_len = 0;
Alexander Duyck4ec441d2016-02-17 11:02:43 -08003540 int tso, count;
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00003541 int tsyn;
Jesse Brandeburg6995b362015-08-28 17:55:54 -04003542
Jesse Brandeburgb74118f2015-10-26 19:44:30 -04003543 /* prefetch the data, we'll need it later */
3544 prefetch(skb->data);
3545
Scott Petersoned0980c2017-04-13 04:45:44 -04003546 i40e_trace(xmit_frame_ring, skb, tx_ring);
3547
Alexander Duyck4ec441d2016-02-17 11:02:43 -08003548 count = i40e_xmit_descriptor_count(skb);
Alexander Duyck2d374902016-02-17 11:02:50 -08003549 if (i40e_chk_linearize(skb, count)) {
Alexander Duyck52ea3e82016-11-28 16:05:59 -08003550 if (__skb_linearize(skb)) {
3551 dev_kfree_skb_any(skb);
3552 return NETDEV_TX_OK;
3553 }
Alexander Duyck5c4654d2016-02-19 12:17:08 -08003554 count = i40e_txd_use_count(skb->len);
Alexander Duyck2d374902016-02-17 11:02:50 -08003555 tx_ring->tx_stats.tx_linearize++;
3556 }
Alexander Duyck4ec441d2016-02-17 11:02:43 -08003557
3558 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
3559 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
3560 * + 4 desc gap to avoid the cache line where head is,
3561 * + 1 desc for context descriptor,
3562 * otherwise try next time
3563 */
3564 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
3565 tx_ring->tx_stats.tx_busy++;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003566 return NETDEV_TX_BUSY;
Alexander Duyck4ec441d2016-02-17 11:02:43 -08003567 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003568
Alexander Duyck52ea3e82016-11-28 16:05:59 -08003569 /* record the location of the first descriptor for this packet */
3570 first = &tx_ring->tx_bi[tx_ring->next_to_use];
3571 first->skb = skb;
3572 first->bytecount = skb->len;
3573 first->gso_segs = 1;
3574
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003575 /* prepare the xmit flags */
3576 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
3577 goto out_drop;
3578
3579 /* obtain protocol of skb */
Vlad Yasevich3d34dd02014-08-25 10:34:52 -04003580 protocol = vlan_get_protocol(skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003581
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003582 /* setup IPv4/IPv6 offloads */
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00003583 if (protocol == htons(ETH_P_IP))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003584 tx_flags |= I40E_TX_FLAGS_IPV4;
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00003585 else if (protocol == htons(ETH_P_IPV6))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003586 tx_flags |= I40E_TX_FLAGS_IPV6;
3587
Alexander Duyck52ea3e82016-11-28 16:05:59 -08003588 tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003589
3590 if (tso < 0)
3591 goto out_drop;
3592 else if (tso)
3593 tx_flags |= I40E_TX_FLAGS_TSO;
3594
Alexander Duyck3bc67972016-02-17 11:02:56 -08003595 /* Always offload the checksum, since it's in the data descriptor */
3596 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
3597 tx_ring, &cd_tunneling);
3598 if (tso < 0)
3599 goto out_drop;
3600
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00003601 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
3602
3603 if (tsyn)
3604 tx_flags |= I40E_TX_FLAGS_TSYN;
3605
Jakub Kicinski259afec2014-03-15 14:55:37 +00003606 skb_tx_timestamp(skb);
3607
Alexander Duyckb1941302013-09-28 06:00:32 +00003608 /* always enable CRC insertion offload */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003609 td_cmd |= I40E_TX_DESC_CMD_ICRC;
3610
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003611 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
3612 cd_tunneling, cd_l2tag2);
3613
3614 /* Add Flow Director ATR if it's enabled.
3615 *
3616 * NOTE: this must always be directly before the data descriptor.
3617 */
Alexander Duyck6b037cd2016-01-24 21:17:36 -08003618 i40e_atr(tx_ring, skb, tx_flags);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003619
Jacob Keller69077572017-05-03 10:28:54 -07003620 if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
3621 td_cmd, td_offset))
3622 goto cleanup_tx_tstamp;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003623
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003624 return NETDEV_TX_OK;
3625
3626out_drop:
Scott Petersoned0980c2017-04-13 04:45:44 -04003627 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
Alexander Duyck52ea3e82016-11-28 16:05:59 -08003628 dev_kfree_skb_any(first->skb);
3629 first->skb = NULL;
Jacob Keller69077572017-05-03 10:28:54 -07003630cleanup_tx_tstamp:
3631 if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
3632 struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
3633
3634 dev_kfree_skb_any(pf->ptp_tx_skb);
3635 pf->ptp_tx_skb = NULL;
3636 clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
3637 }
3638
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003639 return NETDEV_TX_OK;
3640}
3641
3642/**
3643 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
3644 * @skb: send buffer
3645 * @netdev: network interface device structure
3646 *
3647 * Returns NETDEV_TX_OK if sent, else an error code
3648 **/
3649netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3650{
3651 struct i40e_netdev_priv *np = netdev_priv(netdev);
3652 struct i40e_vsi *vsi = np->vsi;
Alexander Duyck9f65e152013-09-28 06:00:58 +00003653 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003654
3655 /* hardware can't handle really short frames, hardware padding works
3656 * beyond this point
3657 */
Alexander Duycka94d9e22014-12-03 08:17:39 -08003658 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
3659 return NETDEV_TX_OK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003660
3661 return i40e_xmit_frame_ring(skb, tx_ring);
3662}