blob: 783ac4ed9c6db6084be0fb42a4bb7512d69e6a6c [file] [log] [blame]
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -08004 * Copyright(c) 2013 - 2016 Intel Corporation.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
Greg Rosedc641b72013-12-18 13:45:51 +000015 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000017 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
Mitch Williams1c112a62014-04-04 04:43:06 +000027#include <linux/prefetch.h>
Mitch Williamsa132af22015-01-24 09:58:35 +000028#include <net/busy_poll.h>
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000029#include "i40e.h"
Jesse Brandeburg206812b2014-02-12 01:45:33 +000030#include "i40e_prototype.h"
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000031
32static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
33 u32 td_tag)
34{
35 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
36 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
37 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
38 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
39 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
40}
41
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +000042#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
Alexander Duyck5e02f282016-09-12 14:18:41 -070043/**
44 * i40e_fdir - Generate a Flow Director descriptor based on fdata
45 * @tx_ring: Tx ring to send buffer on
46 * @fdata: Flow director filter data
47 * @add: Indicate if we are adding a rule or deleting one
48 *
49 **/
50static void i40e_fdir(struct i40e_ring *tx_ring,
51 struct i40e_fdir_filter *fdata, bool add)
52{
53 struct i40e_filter_program_desc *fdir_desc;
54 struct i40e_pf *pf = tx_ring->vsi->back;
55 u32 flex_ptype, dtype_cmd;
56 u16 i;
57
58 /* grab the next descriptor */
59 i = tx_ring->next_to_use;
60 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
61
62 i++;
63 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
64
65 flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
66 (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
67
68 flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
69 (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
70
71 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
72 (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
73
74 /* Use LAN VSI Id if not programmed by user */
75 flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
76 ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
77 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
78
79 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
80
81 dtype_cmd |= add ?
82 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
83 I40E_TXD_FLTR_QW1_PCMD_SHIFT :
84 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
85 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
86
87 dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
88 (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
89
90 dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
91 (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
92
93 if (fdata->cnt_index) {
94 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
95 dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
96 ((u32)fdata->cnt_index <<
97 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
98 }
99
100 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
101 fdir_desc->rsvd = cpu_to_le32(0);
102 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
103 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
104}
105
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000106#define I40E_FD_CLEAN_DELAY 10
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000107/**
108 * i40e_program_fdir_filter - Program a Flow Director filter
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000109 * @fdir_data: Packet data that will be filter parameters
110 * @raw_packet: the pre-allocated packet buffer for FDir
Jeff Kirsherb40c82e62015-02-27 09:18:34 +0000111 * @pf: The PF pointer
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000112 * @add: True for add/update, False for remove
113 **/
Alexander Duyck1eb846a2016-09-12 14:18:42 -0700114static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
115 u8 *raw_packet, struct i40e_pf *pf,
116 bool add)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000117{
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000118 struct i40e_tx_buffer *tx_buf, *first;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000119 struct i40e_tx_desc *tx_desc;
120 struct i40e_ring *tx_ring;
121 struct i40e_vsi *vsi;
122 struct device *dev;
123 dma_addr_t dma;
124 u32 td_cmd = 0;
125 u16 i;
126
127 /* find existing FDIR VSI */
128 vsi = NULL;
Mitch Williams505682c2014-05-20 08:01:37 +0000129 for (i = 0; i < pf->num_alloc_vsi; i++)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000130 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
131 vsi = pf->vsi[i];
132 if (!vsi)
133 return -ENOENT;
134
Alexander Duyck9f65e152013-09-28 06:00:58 +0000135 tx_ring = vsi->tx_rings[0];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000136 dev = tx_ring->dev;
137
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000138 /* we need two descriptors to add/del a filter and we can wait */
Alexander Duycked245402016-09-14 16:24:32 -0700139 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
140 if (!i)
141 return -EAGAIN;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000142 msleep_interruptible(1);
Alexander Duycked245402016-09-14 16:24:32 -0700143 }
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000144
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000145 dma = dma_map_single(dev, raw_packet,
146 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000147 if (dma_mapping_error(dev, dma))
148 goto dma_fail;
149
150 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000151 i = tx_ring->next_to_use;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000152 first = &tx_ring->tx_bi[i];
Alexander Duyck5e02f282016-09-12 14:18:41 -0700153 i40e_fdir(tx_ring, fdir_data, add);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000154
155 /* Now program a dummy descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000156 i = tx_ring->next_to_use;
157 tx_desc = I40E_TX_DESC(tx_ring, i);
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000158 tx_buf = &tx_ring->tx_bi[i];
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000159
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000160 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
161
162 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000163
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000164 /* record length, and DMA address */
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000165 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000166 dma_unmap_addr_set(tx_buf, dma, dma);
167
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000168 tx_desc->buffer_addr = cpu_to_le64(dma);
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000169 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000170
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000171 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
172 tx_buf->raw_buf = (void *)raw_packet;
173
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000174 tx_desc->cmd_type_offset_bsz =
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000175 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000176
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000177 /* Force memory writes to complete before letting h/w
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000178 * know there are new descriptors to fetch.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000179 */
180 wmb();
181
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000182 /* Mark the data descriptor to be watched */
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000183 first->next_to_watch = tx_desc;
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000184
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000185 writel(tx_ring->next_to_use, tx_ring->tail);
186 return 0;
187
188dma_fail:
189 return -1;
190}
191
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000192#define IP_HEADER_OFFSET 14
193#define I40E_UDPIP_DUMMY_PACKET_LEN 42
194/**
195 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
196 * @vsi: pointer to the targeted VSI
197 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000198 * @add: true adds a filter, false removes it
199 *
200 * Returns 0 if the filters were successfully added or removed
201 **/
202static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
203 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000204 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000205{
206 struct i40e_pf *pf = vsi->back;
207 struct udphdr *udp;
208 struct iphdr *ip;
209 bool err = false;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000210 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000211 int ret;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000212 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
213 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
214 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
215
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000216 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
217 if (!raw_packet)
218 return -ENOMEM;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000219 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
220
221 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
222 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
223 + sizeof(struct iphdr));
224
225 ip->daddr = fd_data->dst_ip[0];
226 udp->dest = fd_data->dst_port;
227 ip->saddr = fd_data->src_ip[0];
228 udp->source = fd_data->src_port;
229
Kevin Scottb2d36c02014-04-09 05:58:59 +0000230 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
231 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
232 if (ret) {
233 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000234 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
235 fd_data->pctype, fd_data->fd_id, ret);
Kevin Scottb2d36c02014-04-09 05:58:59 +0000236 err = true;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000237 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000238 if (add)
239 dev_info(&pf->pdev->dev,
240 "Filter OK for PCTYPE %d loc = %d\n",
241 fd_data->pctype, fd_data->fd_id);
242 else
243 dev_info(&pf->pdev->dev,
244 "Filter deleted for PCTYPE %d loc = %d\n",
245 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000246 }
Kiran Patila42e7a32015-11-06 15:26:03 -0800247 if (err)
248 kfree(raw_packet);
249
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000250 return err ? -EOPNOTSUPP : 0;
251}
252
253#define I40E_TCPIP_DUMMY_PACKET_LEN 54
254/**
255 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
256 * @vsi: pointer to the targeted VSI
257 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000258 * @add: true adds a filter, false removes it
259 *
260 * Returns 0 if the filters were successfully added or removed
261 **/
262static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
263 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000264 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000265{
266 struct i40e_pf *pf = vsi->back;
267 struct tcphdr *tcp;
268 struct iphdr *ip;
269 bool err = false;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000270 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000271 int ret;
272 /* Dummy packet */
273 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
274 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
275 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
276 0x0, 0x72, 0, 0, 0, 0};
277
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000278 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
279 if (!raw_packet)
280 return -ENOMEM;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000281 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
282
283 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
284 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
285 + sizeof(struct iphdr));
286
287 ip->daddr = fd_data->dst_ip[0];
288 tcp->dest = fd_data->dst_port;
289 ip->saddr = fd_data->src_ip[0];
290 tcp->source = fd_data->src_port;
291
292 if (add) {
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000293 pf->fd_tcp_rule++;
Jacob Keller234dc4e2016-09-06 18:05:09 -0700294 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
295 I40E_DEBUG_FD & pf->hw.debug_mask)
296 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
297 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000298 } else {
299 pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
300 (pf->fd_tcp_rule - 1) : 0;
301 if (pf->fd_tcp_rule == 0) {
Jacob Keller234dc4e2016-09-06 18:05:09 -0700302 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
303 I40E_DEBUG_FD & pf->hw.debug_mask)
Anjali Singhai Jain2e4875e2015-04-16 20:06:06 -0400304 dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
Jacob Keller234dc4e2016-09-06 18:05:09 -0700305 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000306 }
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000307 }
308
Kevin Scottb2d36c02014-04-09 05:58:59 +0000309 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000310 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
311
312 if (ret) {
313 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000314 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
315 fd_data->pctype, fd_data->fd_id, ret);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000316 err = true;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000317 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000318 if (add)
319 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
320 fd_data->pctype, fd_data->fd_id);
321 else
322 dev_info(&pf->pdev->dev,
323 "Filter deleted for PCTYPE %d loc = %d\n",
324 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000325 }
326
Kiran Patila42e7a32015-11-06 15:26:03 -0800327 if (err)
328 kfree(raw_packet);
329
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000330 return err ? -EOPNOTSUPP : 0;
331}
332
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000333#define I40E_IP_DUMMY_PACKET_LEN 34
334/**
335 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
336 * a specific flow spec
337 * @vsi: pointer to the targeted VSI
338 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000339 * @add: true adds a filter, false removes it
340 *
341 * Returns 0 if the filters were successfully added or removed
342 **/
343static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
344 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000345 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000346{
347 struct i40e_pf *pf = vsi->back;
348 struct iphdr *ip;
349 bool err = false;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000350 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000351 int ret;
352 int i;
353 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
354 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
355 0, 0, 0, 0};
356
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000357 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
358 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000359 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
360 if (!raw_packet)
361 return -ENOMEM;
362 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
363 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
364
365 ip->saddr = fd_data->src_ip[0];
366 ip->daddr = fd_data->dst_ip[0];
367 ip->protocol = 0;
368
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000369 fd_data->pctype = i;
370 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
371
372 if (ret) {
373 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000374 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
375 fd_data->pctype, fd_data->fd_id, ret);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000376 err = true;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000377 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000378 if (add)
379 dev_info(&pf->pdev->dev,
380 "Filter OK for PCTYPE %d loc = %d\n",
381 fd_data->pctype, fd_data->fd_id);
382 else
383 dev_info(&pf->pdev->dev,
384 "Filter deleted for PCTYPE %d loc = %d\n",
385 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000386 }
387 }
388
Kiran Patila42e7a32015-11-06 15:26:03 -0800389 if (err)
390 kfree(raw_packet);
391
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000392 return err ? -EOPNOTSUPP : 0;
393}
394
395/**
396 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
397 * @vsi: pointer to the targeted VSI
398 * @cmd: command to get or set RX flow classification rules
399 * @add: true adds a filter, false removes it
400 *
401 **/
402int i40e_add_del_fdir(struct i40e_vsi *vsi,
403 struct i40e_fdir_filter *input, bool add)
404{
405 struct i40e_pf *pf = vsi->back;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000406 int ret;
407
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000408 switch (input->flow_type & ~FLOW_EXT) {
409 case TCP_V4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000410 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000411 break;
412 case UDP_V4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000413 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000414 break;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000415 case IP_USER_FLOW:
416 switch (input->ip4_proto) {
417 case IPPROTO_TCP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000418 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000419 break;
420 case IPPROTO_UDP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000421 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000422 break;
Alexander Duycke1da71c2016-09-14 16:24:35 -0700423 case IPPROTO_IP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000424 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000425 break;
Alexander Duycke1da71c2016-09-14 16:24:35 -0700426 default:
427 /* We cannot support masking based on protocol */
428 goto unsupported_flow;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000429 }
430 break;
431 default:
Alexander Duycke1da71c2016-09-14 16:24:35 -0700432unsupported_flow:
Jakub Kicinskic5ffe7e2014-04-02 10:33:22 +0000433 dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000434 input->flow_type);
435 ret = -EINVAL;
436 }
437
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000438 /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000439 return ret;
440}
441
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000442/**
443 * i40e_fd_handle_status - check the Programming Status for FD
444 * @rx_ring: the Rx ring for this descriptor
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000445 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000446 * @prog_id: the id originally used for programming
447 *
448 * This is used to verify if the FD programming or invalidation
449 * requested by SW to the HW is successful or not and take actions accordingly.
450 **/
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000451static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
452 union i40e_rx_desc *rx_desc, u8 prog_id)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000453{
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000454 struct i40e_pf *pf = rx_ring->vsi->back;
455 struct pci_dev *pdev = pf->pdev;
456 u32 fcnt_prog, fcnt_avail;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000457 u32 error;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000458 u64 qw;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000459
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000460 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000461 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
462 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
463
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400464 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
Carolyn Wyborny3487b6c2015-08-27 11:42:38 -0400465 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000466 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
467 (I40E_DEBUG_FD & pf->hw.debug_mask))
468 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
Carolyn Wyborny3487b6c2015-08-27 11:42:38 -0400469 pf->fd_inv);
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000470
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000471 /* Check if the programming error is for ATR.
472 * If so, auto disable ATR and set a state for
473 * flush in progress. Next time we come here if flush is in
474 * progress do nothing, once flush is complete the state will
475 * be cleared.
476 */
477 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
478 return;
479
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000480 pf->fd_add_err++;
481 /* store the current atr filter count */
482 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
483
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000484 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
485 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
486 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
487 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
488 }
489
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000490 /* filter programming failed most likely due to table full */
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000491 fcnt_prog = i40e_get_global_fd_count(pf);
Anjali Singhai Jain12957382014-06-04 04:22:47 +0000492 fcnt_avail = pf->fdir_pf_filter_count;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000493 /* If ATR is running fcnt_prog can quickly change,
494 * if we are very close to full, it makes sense to disable
495 * FD ATR/SB and then re-enable it when there is room.
496 */
497 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000498 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
Anjali Singhai Jainb814ba62014-06-04 20:41:48 +0000499 !(pf->auto_disable_flags &
Anjali Singhai Jainb814ba62014-06-04 20:41:48 +0000500 I40E_FLAG_FD_SB_ENABLED)) {
Anjali Singhai Jain2e4875e2015-04-16 20:06:06 -0400501 if (I40E_DEBUG_FD & pf->hw.debug_mask)
502 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000503 pf->auto_disable_flags |=
504 I40E_FLAG_FD_SB_ENABLED;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000505 }
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000506 }
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400507 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
Anjali Singhai Jain13c28842014-03-06 09:00:04 +0000508 if (I40E_DEBUG_FD & pf->hw.debug_mask)
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000509 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
Anjali Singhai Jain13c28842014-03-06 09:00:04 +0000510 rx_desc->wb.qword0.hi_dword.fd_id);
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000511 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000512}
513
514/**
Alexander Duycka5e9c572013-09-28 06:00:27 +0000515 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000516 * @ring: the ring that owns the buffer
517 * @tx_buffer: the buffer to free
518 **/
Alexander Duycka5e9c572013-09-28 06:00:27 +0000519static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
520 struct i40e_tx_buffer *tx_buffer)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000521{
Alexander Duycka5e9c572013-09-28 06:00:27 +0000522 if (tx_buffer->skb) {
Alexander Duyck64bfd682016-09-12 14:18:39 -0700523 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
524 kfree(tx_buffer->raw_buf);
525 else
526 dev_kfree_skb_any(tx_buffer->skb);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000527 if (dma_unmap_len(tx_buffer, len))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000528 dma_unmap_single(ring->dev,
Alexander Duyck35a1e2a2013-09-28 06:00:17 +0000529 dma_unmap_addr(tx_buffer, dma),
530 dma_unmap_len(tx_buffer, len),
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000531 DMA_TO_DEVICE);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000532 } else if (dma_unmap_len(tx_buffer, len)) {
533 dma_unmap_page(ring->dev,
534 dma_unmap_addr(tx_buffer, dma),
535 dma_unmap_len(tx_buffer, len),
536 DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000537 }
Kiran Patila42e7a32015-11-06 15:26:03 -0800538
Alexander Duycka5e9c572013-09-28 06:00:27 +0000539 tx_buffer->next_to_watch = NULL;
540 tx_buffer->skb = NULL;
Alexander Duyck35a1e2a2013-09-28 06:00:17 +0000541 dma_unmap_len_set(tx_buffer, len, 0);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000542 /* tx_buffer must be completely set up in the transmit path */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000543}
544
545/**
546 * i40e_clean_tx_ring - Free any empty Tx buffers
547 * @tx_ring: ring to be cleaned
548 **/
549void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
550{
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000551 unsigned long bi_size;
552 u16 i;
553
554 /* ring already cleared, nothing to do */
555 if (!tx_ring->tx_bi)
556 return;
557
558 /* Free all the Tx ring sk_buffs */
Alexander Duycka5e9c572013-09-28 06:00:27 +0000559 for (i = 0; i < tx_ring->count; i++)
560 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000561
562 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
563 memset(tx_ring->tx_bi, 0, bi_size);
564
565 /* Zero out the descriptor ring */
566 memset(tx_ring->desc, 0, tx_ring->size);
567
568 tx_ring->next_to_use = 0;
569 tx_ring->next_to_clean = 0;
Alexander Duyck7070ce02013-09-28 06:00:37 +0000570
571 if (!tx_ring->netdev)
572 return;
573
574 /* cleanup Tx queue statistics */
Alexander Duycke486bdf2016-09-12 14:18:40 -0700575 netdev_tx_reset_queue(txring_txq(tx_ring));
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000576}
577
578/**
579 * i40e_free_tx_resources - Free Tx resources per queue
580 * @tx_ring: Tx descriptor ring for a specific queue
581 *
582 * Free all transmit software resources
583 **/
584void i40e_free_tx_resources(struct i40e_ring *tx_ring)
585{
586 i40e_clean_tx_ring(tx_ring);
587 kfree(tx_ring->tx_bi);
588 tx_ring->tx_bi = NULL;
589
590 if (tx_ring->desc) {
591 dma_free_coherent(tx_ring->dev, tx_ring->size,
592 tx_ring->desc, tx_ring->dma);
593 tx_ring->desc = NULL;
594 }
595}
596
Jesse Brandeburga68de582015-02-24 05:26:03 +0000597/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000598 * i40e_get_tx_pending - how many tx descriptors not processed
599 * @tx_ring: the ring of descriptors
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800600 * @in_sw: is tx_pending being checked in SW or HW
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000601 *
602 * Since there is no access to the ring head register
603 * in XL710, we need to use our local copies
604 **/
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800605u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000606{
Jesse Brandeburga68de582015-02-24 05:26:03 +0000607 u32 head, tail;
608
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800609 if (!in_sw)
610 head = i40e_get_head(ring);
611 else
612 head = ring->next_to_clean;
Jesse Brandeburga68de582015-02-24 05:26:03 +0000613 tail = readl(ring->tail);
614
615 if (head != tail)
616 return (head < tail) ?
617 tail - head : (tail + ring->count - head);
618
619 return 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000620}
621
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000622#define WB_STRIDE 0x3
623
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000624/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000625 * i40e_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duycka619afe2016-03-07 09:30:03 -0800626 * @vsi: the VSI we care about
627 * @tx_ring: Tx ring to clean
628 * @napi_budget: Used to determine if we are in netpoll
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000629 *
630 * Returns true if there's any budget left (e.g. the clean is finished)
631 **/
Alexander Duycka619afe2016-03-07 09:30:03 -0800632static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
633 struct i40e_ring *tx_ring, int napi_budget)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000634{
635 u16 i = tx_ring->next_to_clean;
636 struct i40e_tx_buffer *tx_buf;
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000637 struct i40e_tx_desc *tx_head;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000638 struct i40e_tx_desc *tx_desc;
Alexander Duycka619afe2016-03-07 09:30:03 -0800639 unsigned int total_bytes = 0, total_packets = 0;
640 unsigned int budget = vsi->work_limit;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000641
642 tx_buf = &tx_ring->tx_bi[i];
643 tx_desc = I40E_TX_DESC(tx_ring, i);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000644 i -= tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000645
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000646 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
647
Alexander Duycka5e9c572013-09-28 06:00:27 +0000648 do {
649 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000650
651 /* if next_to_watch is not set then there is no work pending */
652 if (!eop_desc)
653 break;
654
Alexander Duycka5e9c572013-09-28 06:00:27 +0000655 /* prevent any other reads prior to eop_desc */
656 read_barrier_depends();
657
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000658 /* we have caught up to head, no work left to do */
659 if (tx_head == tx_desc)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000660 break;
661
Alexander Duyckc304fda2013-09-28 06:00:12 +0000662 /* clear next_to_watch to prevent false hangs */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000663 tx_buf->next_to_watch = NULL;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000664
Alexander Duycka5e9c572013-09-28 06:00:27 +0000665 /* update the statistics for this packet */
666 total_bytes += tx_buf->bytecount;
667 total_packets += tx_buf->gso_segs;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000668
Alexander Duycka5e9c572013-09-28 06:00:27 +0000669 /* free the skb */
Alexander Duycka619afe2016-03-07 09:30:03 -0800670 napi_consume_skb(tx_buf->skb, napi_budget);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000671
Alexander Duycka5e9c572013-09-28 06:00:27 +0000672 /* unmap skb header data */
673 dma_unmap_single(tx_ring->dev,
674 dma_unmap_addr(tx_buf, dma),
675 dma_unmap_len(tx_buf, len),
676 DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000677
Alexander Duycka5e9c572013-09-28 06:00:27 +0000678 /* clear tx_buffer data */
679 tx_buf->skb = NULL;
680 dma_unmap_len_set(tx_buf, len, 0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000681
Alexander Duycka5e9c572013-09-28 06:00:27 +0000682 /* unmap remaining buffers */
683 while (tx_desc != eop_desc) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000684
685 tx_buf++;
686 tx_desc++;
687 i++;
Alexander Duycka5e9c572013-09-28 06:00:27 +0000688 if (unlikely(!i)) {
689 i -= tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000690 tx_buf = tx_ring->tx_bi;
691 tx_desc = I40E_TX_DESC(tx_ring, 0);
692 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000693
Alexander Duycka5e9c572013-09-28 06:00:27 +0000694 /* unmap any remaining paged data */
695 if (dma_unmap_len(tx_buf, len)) {
696 dma_unmap_page(tx_ring->dev,
697 dma_unmap_addr(tx_buf, dma),
698 dma_unmap_len(tx_buf, len),
699 DMA_TO_DEVICE);
700 dma_unmap_len_set(tx_buf, len, 0);
701 }
702 }
703
704 /* move us one more past the eop_desc for start of next pkt */
705 tx_buf++;
706 tx_desc++;
707 i++;
708 if (unlikely(!i)) {
709 i -= tx_ring->count;
710 tx_buf = tx_ring->tx_bi;
711 tx_desc = I40E_TX_DESC(tx_ring, 0);
712 }
713
Jesse Brandeburg016890b2015-02-27 09:15:31 +0000714 prefetch(tx_desc);
715
Alexander Duycka5e9c572013-09-28 06:00:27 +0000716 /* update budget accounting */
717 budget--;
718 } while (likely(budget));
719
720 i += tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000721 tx_ring->next_to_clean = i;
Alexander Duyck980e9b12013-09-28 06:01:03 +0000722 u64_stats_update_begin(&tx_ring->syncp);
Alexander Duycka114d0a2013-09-28 06:00:43 +0000723 tx_ring->stats.bytes += total_bytes;
724 tx_ring->stats.packets += total_packets;
Alexander Duyck980e9b12013-09-28 06:01:03 +0000725 u64_stats_update_end(&tx_ring->syncp);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000726 tx_ring->q_vector->tx.total_bytes += total_bytes;
727 tx_ring->q_vector->tx.total_packets += total_packets;
Alexander Duycka5e9c572013-09-28 06:00:27 +0000728
Anjali Singhai58044742015-09-25 18:26:13 -0700729 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
Anjali Singhai58044742015-09-25 18:26:13 -0700730 /* check to see if there are < 4 descriptors
731 * waiting to be written back, then kick the hardware to force
732 * them to be written back in case we stay in NAPI.
733 * In this mode on X722 we do not enable Interrupt.
734 */
Mitch Williams88dc9e62016-06-20 09:10:35 -0700735 unsigned int j = i40e_get_tx_pending(tx_ring, false);
Anjali Singhai58044742015-09-25 18:26:13 -0700736
737 if (budget &&
738 ((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
Alexander Duycka619afe2016-03-07 09:30:03 -0800739 !test_bit(__I40E_DOWN, &vsi->state) &&
Anjali Singhai58044742015-09-25 18:26:13 -0700740 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
741 tx_ring->arm_wb = true;
742 }
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000743
Alexander Duycke486bdf2016-09-12 14:18:40 -0700744 /* notify netdev of completed buffers */
745 netdev_tx_completed_queue(txring_txq(tx_ring),
Alexander Duyck7070ce02013-09-28 06:00:37 +0000746 total_packets, total_bytes);
747
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000748#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
749 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
750 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
751 /* Make sure that anybody stopping the queue after this
752 * sees the new next_to_clean.
753 */
754 smp_mb();
755 if (__netif_subqueue_stopped(tx_ring->netdev,
756 tx_ring->queue_index) &&
Alexander Duycka619afe2016-03-07 09:30:03 -0800757 !test_bit(__I40E_DOWN, &vsi->state)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000758 netif_wake_subqueue(tx_ring->netdev,
759 tx_ring->queue_index);
760 ++tx_ring->tx_stats.restart_queue;
761 }
762 }
763
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000764 return !!budget;
765}
766
767/**
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -0800768 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
769 * @vsi: the VSI we care about
770 * @q_vector: the vector on which to enable writeback
771 *
772 **/
773static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
774 struct i40e_q_vector *q_vector)
775{
776 u16 flags = q_vector->tx.ring[0].flags;
777 u32 val;
778
779 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
780 return;
781
782 if (q_vector->arm_wb_state)
783 return;
784
785 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
786 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
787 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
788
789 wr32(&vsi->back->hw,
790 I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
791 val);
792 } else {
793 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
794 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
795
796 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
797 }
798 q_vector->arm_wb_state = true;
799}
800
801/**
802 * i40e_force_wb - Issue SW Interrupt so HW does a wb
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000803 * @vsi: the VSI we care about
804 * @q_vector: the vector on which to force writeback
805 *
806 **/
Kiran Patilb03a8c12015-09-24 18:13:15 -0400807void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000808{
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -0800809 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
Anjali Singhai Jain8e0764b2015-06-05 12:20:30 -0400810 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
811 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
812 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
813 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
814 /* allow 00 to be written to the index */
815
816 wr32(&vsi->back->hw,
817 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
818 vsi->base_vector - 1), val);
819 } else {
820 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
821 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
822 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
823 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
824 /* allow 00 to be written to the index */
825
826 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
827 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000828}
829
830/**
831 * i40e_set_new_dynamic_itr - Find new ITR level
832 * @rc: structure containing ring performance data
833 *
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400834 * Returns true if ITR changed, false if not
835 *
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000836 * Stores a new ITR value based on packets and byte counts during
837 * the last interrupt. The advantage of per interrupt computation
838 * is faster updates and more accurate ITR for the current traffic
839 * pattern. Constants in this function were computed based on
840 * theoretical maximum wire speed and thresholds were set based on
841 * testing data as well as attempting to minimize response time
842 * while increasing bulk throughput.
843 **/
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400844static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000845{
846 enum i40e_latency_range new_latency_range = rc->latency_range;
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400847 struct i40e_q_vector *qv = rc->ring->q_vector;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000848 u32 new_itr = rc->itr;
849 int bytes_per_int;
Jesse Brandeburg51cc6d92015-09-28 14:16:52 -0400850 int usecs;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000851
852 if (rc->total_packets == 0 || !rc->itr)
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400853 return false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000854
855 /* simple throttlerate management
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400856 * 0-10MB/s lowest (50000 ints/s)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000857 * 10-20MB/s low (20000 ints/s)
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400858 * 20-1249MB/s bulk (18000 ints/s)
859 * > 40000 Rx packets per second (8000 ints/s)
Jesse Brandeburg51cc6d92015-09-28 14:16:52 -0400860 *
861 * The math works out because the divisor is in 10^(-6) which
862 * turns the bytes/us input value into MB/s values, but
863 * make sure to use usecs, as the register values written
Jesse Brandeburgee2319c2015-09-28 14:16:54 -0400864 * are in 2 usec increments in the ITR registers, and make sure
865 * to use the smoothed values that the countdown timer gives us.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000866 */
Jesse Brandeburgee2319c2015-09-28 14:16:54 -0400867 usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
Jesse Brandeburg51cc6d92015-09-28 14:16:52 -0400868 bytes_per_int = rc->total_bytes / usecs;
Jesse Brandeburgee2319c2015-09-28 14:16:54 -0400869
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400870 switch (new_latency_range) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000871 case I40E_LOWEST_LATENCY:
872 if (bytes_per_int > 10)
873 new_latency_range = I40E_LOW_LATENCY;
874 break;
875 case I40E_LOW_LATENCY:
876 if (bytes_per_int > 20)
877 new_latency_range = I40E_BULK_LATENCY;
878 else if (bytes_per_int <= 10)
879 new_latency_range = I40E_LOWEST_LATENCY;
880 break;
881 case I40E_BULK_LATENCY:
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400882 case I40E_ULTRA_LATENCY:
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400883 default:
884 if (bytes_per_int <= 20)
885 new_latency_range = I40E_LOW_LATENCY;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000886 break;
887 }
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400888
889 /* this is to adjust RX more aggressively when streaming small
890 * packets. The value of 40000 was picked as it is just beyond
891 * what the hardware can receive per second if in low latency
892 * mode.
893 */
894#define RX_ULTRA_PACKET_RATE 40000
895
896 if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
897 (&qv->rx == rc))
898 new_latency_range = I40E_ULTRA_LATENCY;
899
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400900 rc->latency_range = new_latency_range;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000901
902 switch (new_latency_range) {
903 case I40E_LOWEST_LATENCY:
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400904 new_itr = I40E_ITR_50K;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000905 break;
906 case I40E_LOW_LATENCY:
907 new_itr = I40E_ITR_20K;
908 break;
909 case I40E_BULK_LATENCY:
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400910 new_itr = I40E_ITR_18K;
911 break;
912 case I40E_ULTRA_LATENCY:
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000913 new_itr = I40E_ITR_8K;
914 break;
915 default:
916 break;
917 }
918
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000919 rc->total_bytes = 0;
920 rc->total_packets = 0;
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400921
922 if (new_itr != rc->itr) {
923 rc->itr = new_itr;
924 return true;
925 }
926
927 return false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000928}
929
930/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000931 * i40e_clean_programming_status - clean the programming status descriptor
932 * @rx_ring: the rx ring that has this descriptor
933 * @rx_desc: the rx descriptor written back by HW
934 *
935 * Flow director should handle FD_FILTER_STATUS to check its filter programming
936 * status being successful or not and take actions accordingly. FCoE should
937 * handle its context/filter programming/invalidation status and take actions.
938 *
939 **/
940static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
941 union i40e_rx_desc *rx_desc)
942{
943 u64 qw;
944 u8 id;
945
946 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
947 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
948 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
949
950 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000951 i40e_fd_handle_status(rx_ring, rx_desc, id);
Vasu Dev38e00432014-08-01 13:27:03 -0700952#ifdef I40E_FCOE
953 else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
954 (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
955 i40e_fcoe_handle_status(rx_ring, rx_desc, id);
956#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000957}
958
959/**
960 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
961 * @tx_ring: the tx ring to set up
962 *
963 * Return 0 on success, negative on error
964 **/
965int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
966{
967 struct device *dev = tx_ring->dev;
968 int bi_size;
969
970 if (!dev)
971 return -ENOMEM;
972
Jesse Brandeburge908f812015-07-23 16:54:42 -0400973 /* warn if we are about to overwrite the pointer */
974 WARN_ON(tx_ring->tx_bi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000975 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
976 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
977 if (!tx_ring->tx_bi)
978 goto err;
979
980 /* round up to nearest 4K */
981 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000982 /* add u32 for head writeback, align after this takes care of
983 * guaranteeing this is at least one cache line in size
984 */
985 tx_ring->size += sizeof(u32);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000986 tx_ring->size = ALIGN(tx_ring->size, 4096);
987 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
988 &tx_ring->dma, GFP_KERNEL);
989 if (!tx_ring->desc) {
990 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
991 tx_ring->size);
992 goto err;
993 }
994
995 tx_ring->next_to_use = 0;
996 tx_ring->next_to_clean = 0;
997 return 0;
998
999err:
1000 kfree(tx_ring->tx_bi);
1001 tx_ring->tx_bi = NULL;
1002 return -ENOMEM;
1003}
1004
1005/**
1006 * i40e_clean_rx_ring - Free Rx buffers
1007 * @rx_ring: ring to be cleaned
1008 **/
1009void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1010{
1011 struct device *dev = rx_ring->dev;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001012 unsigned long bi_size;
1013 u16 i;
1014
1015 /* ring already cleared, nothing to do */
1016 if (!rx_ring->rx_bi)
1017 return;
1018
1019 /* Free all the Rx ring sk_buffs */
1020 for (i = 0; i < rx_ring->count; i++) {
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001021 struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
1022
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001023 if (rx_bi->skb) {
1024 dev_kfree_skb(rx_bi->skb);
1025 rx_bi->skb = NULL;
1026 }
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001027 if (!rx_bi->page)
1028 continue;
1029
1030 dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE);
1031 __free_pages(rx_bi->page, 0);
1032
1033 rx_bi->page = NULL;
1034 rx_bi->page_offset = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001035 }
1036
1037 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1038 memset(rx_ring->rx_bi, 0, bi_size);
1039
1040 /* Zero out the descriptor ring */
1041 memset(rx_ring->desc, 0, rx_ring->size);
1042
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001043 rx_ring->next_to_alloc = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001044 rx_ring->next_to_clean = 0;
1045 rx_ring->next_to_use = 0;
1046}
1047
1048/**
1049 * i40e_free_rx_resources - Free Rx resources
1050 * @rx_ring: ring to clean the resources from
1051 *
1052 * Free all receive software resources
1053 **/
1054void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1055{
1056 i40e_clean_rx_ring(rx_ring);
1057 kfree(rx_ring->rx_bi);
1058 rx_ring->rx_bi = NULL;
1059
1060 if (rx_ring->desc) {
1061 dma_free_coherent(rx_ring->dev, rx_ring->size,
1062 rx_ring->desc, rx_ring->dma);
1063 rx_ring->desc = NULL;
1064 }
1065}
1066
1067/**
1068 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1069 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1070 *
1071 * Returns 0 on success, negative on failure
1072 **/
1073int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1074{
1075 struct device *dev = rx_ring->dev;
1076 int bi_size;
1077
Jesse Brandeburge908f812015-07-23 16:54:42 -04001078 /* warn if we are about to overwrite the pointer */
1079 WARN_ON(rx_ring->rx_bi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001080 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1081 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1082 if (!rx_ring->rx_bi)
1083 goto err;
1084
Carolyn Wybornyf217d6c2015-02-09 17:42:31 -08001085 u64_stats_init(&rx_ring->syncp);
Carolyn Wyborny638702b2015-01-24 09:58:32 +00001086
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001087 /* Round up to nearest 4K */
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001088 rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001089 rx_ring->size = ALIGN(rx_ring->size, 4096);
1090 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1091 &rx_ring->dma, GFP_KERNEL);
1092
1093 if (!rx_ring->desc) {
1094 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1095 rx_ring->size);
1096 goto err;
1097 }
1098
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001099 rx_ring->next_to_alloc = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001100 rx_ring->next_to_clean = 0;
1101 rx_ring->next_to_use = 0;
1102
1103 return 0;
1104err:
1105 kfree(rx_ring->rx_bi);
1106 rx_ring->rx_bi = NULL;
1107 return -ENOMEM;
1108}
1109
1110/**
1111 * i40e_release_rx_desc - Store the new tail and head values
1112 * @rx_ring: ring to bump
1113 * @val: new head index
1114 **/
1115static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1116{
1117 rx_ring->next_to_use = val;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001118
1119 /* update next to alloc since we have filled the ring */
1120 rx_ring->next_to_alloc = val;
1121
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001122 /* Force memory writes to complete before letting h/w
1123 * know there are new descriptors to fetch. (Only
1124 * applicable for weak-ordered memory model archs,
1125 * such as IA-64).
1126 */
1127 wmb();
1128 writel(val, rx_ring->tail);
1129}
1130
1131/**
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001132 * i40e_alloc_mapped_page - recycle or make a new page
1133 * @rx_ring: ring to use
1134 * @bi: rx_buffer struct to modify
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001135 *
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001136 * Returns true if the page was successfully allocated or
1137 * reused.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001138 **/
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001139static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1140 struct i40e_rx_buffer *bi)
Mitch Williamsa132af22015-01-24 09:58:35 +00001141{
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001142 struct page *page = bi->page;
1143 dma_addr_t dma;
Mitch Williamsa132af22015-01-24 09:58:35 +00001144
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001145 /* since we are recycling buffers we should seldom need to alloc */
1146 if (likely(page)) {
1147 rx_ring->rx_stats.page_reuse_count++;
1148 return true;
Mitch Williamsa132af22015-01-24 09:58:35 +00001149 }
1150
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001151 /* alloc new page for storage */
1152 page = dev_alloc_page();
1153 if (unlikely(!page)) {
1154 rx_ring->rx_stats.alloc_page_failed++;
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001155 return false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001156 }
1157
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001158 /* map page for use */
1159 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001160
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001161 /* if mapping failed free memory back to system since
1162 * there isn't much point in holding memory we can't use
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001163 */
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001164 if (dma_mapping_error(rx_ring->dev, dma)) {
1165 __free_pages(page, 0);
1166 rx_ring->rx_stats.alloc_page_failed++;
1167 return false;
1168 }
1169
1170 bi->dma = dma;
1171 bi->page = page;
1172 bi->page_offset = 0;
1173
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001174 return true;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001175}
1176
1177/**
1178 * i40e_receive_skb - Send a completed packet up the stack
1179 * @rx_ring: rx ring in play
1180 * @skb: packet to send up
1181 * @vlan_tag: vlan tag for packet
1182 **/
1183static void i40e_receive_skb(struct i40e_ring *rx_ring,
1184 struct sk_buff *skb, u16 vlan_tag)
1185{
1186 struct i40e_q_vector *q_vector = rx_ring->q_vector;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001187
Jesse Brandeburga149f2c2016-04-12 08:30:49 -07001188 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1189 (vlan_tag & VLAN_VID_MASK))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001190 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1191
Alexander Duyck8b650352015-09-24 09:04:32 -07001192 napi_gro_receive(&q_vector->napi, skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001193}
1194
1195/**
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001196 * i40e_alloc_rx_buffers - Replace used receive buffers
1197 * @rx_ring: ring to place buffers on
1198 * @cleaned_count: number of buffers to replace
1199 *
1200 * Returns false if all allocations were successful, true if any fail
1201 **/
1202bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1203{
1204 u16 ntu = rx_ring->next_to_use;
1205 union i40e_rx_desc *rx_desc;
1206 struct i40e_rx_buffer *bi;
1207
1208 /* do nothing if no valid netdev defined */
1209 if (!rx_ring->netdev || !cleaned_count)
1210 return false;
1211
1212 rx_desc = I40E_RX_DESC(rx_ring, ntu);
1213 bi = &rx_ring->rx_bi[ntu];
1214
1215 do {
1216 if (!i40e_alloc_mapped_page(rx_ring, bi))
1217 goto no_buffers;
1218
1219 /* Refresh the desc even if buffer_addrs didn't change
1220 * because each write-back erases this info.
1221 */
1222 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001223
1224 rx_desc++;
1225 bi++;
1226 ntu++;
1227 if (unlikely(ntu == rx_ring->count)) {
1228 rx_desc = I40E_RX_DESC(rx_ring, 0);
1229 bi = rx_ring->rx_bi;
1230 ntu = 0;
1231 }
1232
1233 /* clear the status bits for the next_to_use descriptor */
1234 rx_desc->wb.qword1.status_error_len = 0;
1235
1236 cleaned_count--;
1237 } while (cleaned_count);
1238
1239 if (rx_ring->next_to_use != ntu)
1240 i40e_release_rx_desc(rx_ring, ntu);
1241
1242 return false;
1243
1244no_buffers:
1245 if (rx_ring->next_to_use != ntu)
1246 i40e_release_rx_desc(rx_ring, ntu);
1247
1248 /* make sure to come back via polling to try again after
1249 * allocation failure
1250 */
1251 return true;
1252}
1253
1254/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001255 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1256 * @vsi: the VSI we care about
1257 * @skb: skb currently being received and modified
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001258 * @rx_desc: the receive descriptor
1259 *
1260 * skb->protocol must be set before this function is called
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001261 **/
1262static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1263 struct sk_buff *skb,
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001264 union i40e_rx_desc *rx_desc)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001265{
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001266 struct i40e_rx_ptype_decoded decoded;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001267 u32 rx_error, rx_status;
Alexander Duyck858296c82016-06-14 15:45:42 -07001268 bool ipv4, ipv6;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001269 u8 ptype;
1270 u64 qword;
1271
1272 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1273 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
1274 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1275 I40E_RXD_QW1_ERROR_SHIFT;
1276 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1277 I40E_RXD_QW1_STATUS_SHIFT;
1278 decoded = decode_rx_desc_ptype(ptype);
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001279
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001280 skb->ip_summed = CHECKSUM_NONE;
1281
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001282 skb_checksum_none_assert(skb);
1283
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001284 /* Rx csum enabled and ip headers found? */
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001285 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001286 return;
1287
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001288 /* did the hardware decode the packet and checksum? */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001289 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001290 return;
1291
1292 /* both known and outer_ip must be set for the below code to work */
1293 if (!(decoded.known && decoded.outer_ip))
1294 return;
1295
Alexander Duyckfad57332016-01-24 21:17:22 -08001296 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1297 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1298 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1299 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001300
1301 if (ipv4 &&
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001302 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1303 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001304 goto checksum_fail;
1305
Jesse Brandeburgddf1d0d2014-02-13 03:48:39 -08001306 /* likely incorrect csum if alternate IP extension headers found */
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001307 if (ipv6 &&
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001308 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001309 /* don't increment checksum err here, non-fatal err */
Shannon Nelson8ee75a82013-12-21 05:44:46 +00001310 return;
1311
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001312 /* there was some L4 error, count error and punt packet to the stack */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001313 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001314 goto checksum_fail;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001315
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001316 /* handle packets that were not able to be checksummed due
1317 * to arrival speed, in this case the stack can compute
1318 * the csum.
1319 */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001320 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001321 return;
1322
Alexander Duyck858296c82016-06-14 15:45:42 -07001323 /* If there is an outer header present that might contain a checksum
1324 * we need to bump the checksum level by 1 to reflect the fact that
1325 * we are indicating we validated the inner checksum.
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001326 */
Alexander Duyck858296c82016-06-14 15:45:42 -07001327 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
1328 skb->csum_level = 1;
Alexander Duyckfad57332016-01-24 21:17:22 -08001329
Alexander Duyck858296c82016-06-14 15:45:42 -07001330 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
1331 switch (decoded.inner_prot) {
1332 case I40E_RX_PTYPE_INNER_PROT_TCP:
1333 case I40E_RX_PTYPE_INNER_PROT_UDP:
1334 case I40E_RX_PTYPE_INNER_PROT_SCTP:
1335 skb->ip_summed = CHECKSUM_UNNECESSARY;
1336 /* fall though */
1337 default:
1338 break;
1339 }
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001340
1341 return;
1342
1343checksum_fail:
1344 vsi->back->hw_csum_rx_error++;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001345}
1346
1347/**
Anjali Singhai Jain857942f2015-12-09 15:50:21 -08001348 * i40e_ptype_to_htype - get a hash type
Jesse Brandeburg206812b2014-02-12 01:45:33 +00001349 * @ptype: the ptype value from the descriptor
1350 *
1351 * Returns a hash type to be used by skb_set_hash
1352 **/
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001353static inline int i40e_ptype_to_htype(u8 ptype)
Jesse Brandeburg206812b2014-02-12 01:45:33 +00001354{
1355 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1356
1357 if (!decoded.known)
1358 return PKT_HASH_TYPE_NONE;
1359
1360 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1361 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1362 return PKT_HASH_TYPE_L4;
1363 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1364 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1365 return PKT_HASH_TYPE_L3;
1366 else
1367 return PKT_HASH_TYPE_L2;
1368}
1369
1370/**
Anjali Singhai Jain857942f2015-12-09 15:50:21 -08001371 * i40e_rx_hash - set the hash value in the skb
1372 * @ring: descriptor ring
1373 * @rx_desc: specific descriptor
1374 **/
1375static inline void i40e_rx_hash(struct i40e_ring *ring,
1376 union i40e_rx_desc *rx_desc,
1377 struct sk_buff *skb,
1378 u8 rx_ptype)
1379{
1380 u32 hash;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001381 const __le64 rss_mask =
Anjali Singhai Jain857942f2015-12-09 15:50:21 -08001382 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1383 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1384
Mitch Williamsa876c3b2016-05-03 15:13:18 -07001385 if (!(ring->netdev->features & NETIF_F_RXHASH))
Anjali Singhai Jain857942f2015-12-09 15:50:21 -08001386 return;
1387
1388 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1389 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1390 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1391 }
1392}
1393
1394/**
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001395 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
1396 * @rx_ring: rx descriptor ring packet is being transacted on
1397 * @rx_desc: pointer to the EOP Rx descriptor
1398 * @skb: pointer to current skb being populated
1399 * @rx_ptype: the packet type decoded by hardware
Mitch Williamsa132af22015-01-24 09:58:35 +00001400 *
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001401 * This function checks the ring, descriptor, and packet information in
1402 * order to populate the hash, checksum, VLAN, protocol, and
1403 * other fields within the skb.
Mitch Williamsa132af22015-01-24 09:58:35 +00001404 **/
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001405static inline
1406void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1407 union i40e_rx_desc *rx_desc, struct sk_buff *skb,
1408 u8 rx_ptype)
1409{
1410 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1411 u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1412 I40E_RXD_QW1_STATUS_SHIFT;
Jacob Keller144ed172016-10-05 09:30:42 -07001413 u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
1414 u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001415 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
1416
Jacob Keller12490502016-10-05 09:30:44 -07001417 if (unlikely(tsynvalid))
Jacob Keller144ed172016-10-05 09:30:42 -07001418 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001419
1420 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1421
1422 /* modifies the skb - consumes the enet header */
1423 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1424
1425 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1426
1427 skb_record_rx_queue(skb, rx_ring->queue_index);
1428}
1429
1430/**
1431 * i40e_pull_tail - i40e specific version of skb_pull_tail
1432 * @rx_ring: rx descriptor ring packet is being transacted on
1433 * @skb: pointer to current skb being adjusted
1434 *
1435 * This function is an i40e specific version of __pskb_pull_tail. The
1436 * main difference between this version and the original function is that
1437 * this function can make several assumptions about the state of things
1438 * that allow for significant optimizations versus the standard function.
1439 * As a result we can do things like drop a frag and maintain an accurate
1440 * truesize for the skb.
1441 */
1442static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
1443{
1444 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1445 unsigned char *va;
1446 unsigned int pull_len;
1447
1448 /* it is valid to use page_address instead of kmap since we are
1449 * working with pages allocated out of the lomem pool per
1450 * alloc_page(GFP_ATOMIC)
1451 */
1452 va = skb_frag_address(frag);
1453
1454 /* we need the header to contain the greater of either ETH_HLEN or
1455 * 60 bytes if the skb->len is less than 60 for skb_pad.
1456 */
1457 pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
1458
1459 /* align pull length to size of long to optimize memcpy performance */
1460 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1461
1462 /* update all of the pointers */
1463 skb_frag_size_sub(frag, pull_len);
1464 frag->page_offset += pull_len;
1465 skb->data_len -= pull_len;
1466 skb->tail += pull_len;
1467}
1468
1469/**
1470 * i40e_cleanup_headers - Correct empty headers
1471 * @rx_ring: rx descriptor ring packet is being transacted on
1472 * @skb: pointer to current skb being fixed
1473 *
1474 * Also address the case where we are pulling data in on pages only
1475 * and as such no data is present in the skb header.
1476 *
1477 * In addition if skb is not at least 60 bytes we need to pad it so that
1478 * it is large enough to qualify as a valid Ethernet frame.
1479 *
1480 * Returns true if an error was encountered and skb was freed.
1481 **/
1482static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
1483{
1484 /* place header in linear portion of buffer */
1485 if (skb_is_nonlinear(skb))
1486 i40e_pull_tail(rx_ring, skb);
1487
1488 /* if eth_skb_pad returns an error the skb was freed */
1489 if (eth_skb_pad(skb))
1490 return true;
1491
1492 return false;
1493}
1494
1495/**
1496 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1497 * @rx_ring: rx descriptor ring to store buffers on
1498 * @old_buff: donor buffer to have page reused
1499 *
1500 * Synchronizes page for reuse by the adapter
1501 **/
1502static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1503 struct i40e_rx_buffer *old_buff)
1504{
1505 struct i40e_rx_buffer *new_buff;
1506 u16 nta = rx_ring->next_to_alloc;
1507
1508 new_buff = &rx_ring->rx_bi[nta];
1509
1510 /* update, and store next to alloc */
1511 nta++;
1512 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1513
1514 /* transfer page from old buffer to new buffer */
1515 *new_buff = *old_buff;
1516}
1517
1518/**
1519 * i40e_page_is_reserved - check if reuse is possible
1520 * @page: page struct to check
1521 */
1522static inline bool i40e_page_is_reserved(struct page *page)
1523{
1524 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
1525}
1526
1527/**
1528 * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
1529 * @rx_ring: rx descriptor ring to transact packets on
1530 * @rx_buffer: buffer containing page to add
1531 * @rx_desc: descriptor containing length of buffer written by hardware
1532 * @skb: sk_buff to place the data into
1533 *
1534 * This function will add the data contained in rx_buffer->page to the skb.
1535 * This is done either through a direct copy if the data in the buffer is
1536 * less than the skb header size, otherwise it will just attach the page as
1537 * a frag to the skb.
1538 *
1539 * The function will then update the page offset if necessary and return
1540 * true if the buffer can be reused by the adapter.
1541 **/
1542static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
1543 struct i40e_rx_buffer *rx_buffer,
1544 union i40e_rx_desc *rx_desc,
1545 struct sk_buff *skb)
1546{
1547 struct page *page = rx_buffer->page;
1548 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1549 unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1550 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1551#if (PAGE_SIZE < 8192)
1552 unsigned int truesize = I40E_RXBUFFER_2048;
1553#else
1554 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
1555 unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
1556#endif
1557
1558 /* will the data fit in the skb we allocated? if so, just
1559 * copy it as it is pretty small anyway
1560 */
1561 if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
1562 unsigned char *va = page_address(page) + rx_buffer->page_offset;
1563
1564 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
1565
1566 /* page is not reserved, we can reuse buffer as-is */
1567 if (likely(!i40e_page_is_reserved(page)))
1568 return true;
1569
1570 /* this page cannot be reused so discard it */
1571 __free_pages(page, 0);
1572 return false;
1573 }
1574
1575 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1576 rx_buffer->page_offset, size, truesize);
1577
1578 /* avoid re-using remote pages */
1579 if (unlikely(i40e_page_is_reserved(page)))
1580 return false;
1581
1582#if (PAGE_SIZE < 8192)
1583 /* if we are only owner of page we can reuse it */
1584 if (unlikely(page_count(page) != 1))
1585 return false;
1586
1587 /* flip page offset to other buffer */
1588 rx_buffer->page_offset ^= truesize;
1589#else
1590 /* move offset up to the next cache line */
1591 rx_buffer->page_offset += truesize;
1592
1593 if (rx_buffer->page_offset > last_offset)
1594 return false;
1595#endif
1596
1597 /* Even if we own the page, we are not allowed to use atomic_set()
1598 * This would break get_page_unless_zero() users.
1599 */
1600 get_page(rx_buffer->page);
1601
1602 return true;
1603}
1604
1605/**
1606 * i40e_fetch_rx_buffer - Allocate skb and populate it
1607 * @rx_ring: rx descriptor ring to transact packets on
1608 * @rx_desc: descriptor containing info written by hardware
1609 *
1610 * This function allocates an skb on the fly, and populates it with the page
1611 * data from the current receive descriptor, taking care to set up the skb
1612 * correctly, as well as handling calling the page recycle function if
1613 * necessary.
1614 */
1615static inline
1616struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
1617 union i40e_rx_desc *rx_desc)
1618{
1619 struct i40e_rx_buffer *rx_buffer;
1620 struct sk_buff *skb;
1621 struct page *page;
1622
1623 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
1624 page = rx_buffer->page;
1625 prefetchw(page);
1626
1627 skb = rx_buffer->skb;
1628
1629 if (likely(!skb)) {
1630 void *page_addr = page_address(page) + rx_buffer->page_offset;
1631
1632 /* prefetch first cache line of first page */
1633 prefetch(page_addr);
1634#if L1_CACHE_BYTES < 128
1635 prefetch(page_addr + L1_CACHE_BYTES);
1636#endif
1637
1638 /* allocate a skb to store the frags */
1639 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
1640 I40E_RX_HDR_SIZE,
1641 GFP_ATOMIC | __GFP_NOWARN);
1642 if (unlikely(!skb)) {
1643 rx_ring->rx_stats.alloc_buff_failed++;
1644 return NULL;
1645 }
1646
1647 /* we will be copying header into skb->data in
1648 * pskb_may_pull so it is in our interest to prefetch
1649 * it now to avoid a possible cache miss
1650 */
1651 prefetchw(skb->data);
1652 } else {
1653 rx_buffer->skb = NULL;
1654 }
1655
1656 /* we are reusing so sync this buffer for CPU use */
1657 dma_sync_single_range_for_cpu(rx_ring->dev,
1658 rx_buffer->dma,
1659 rx_buffer->page_offset,
1660 I40E_RXBUFFER_2048,
1661 DMA_FROM_DEVICE);
1662
1663 /* pull page into skb */
1664 if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
1665 /* hand second half of page back to the ring */
1666 i40e_reuse_rx_page(rx_ring, rx_buffer);
1667 rx_ring->rx_stats.page_reuse_count++;
1668 } else {
1669 /* we are not reusing the buffer so unmap it */
1670 dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
1671 DMA_FROM_DEVICE);
1672 }
1673
1674 /* clear contents of buffer_info */
1675 rx_buffer->page = NULL;
1676
1677 return skb;
1678}
1679
1680/**
1681 * i40e_is_non_eop - process handling of non-EOP buffers
1682 * @rx_ring: Rx ring being processed
1683 * @rx_desc: Rx descriptor for current buffer
1684 * @skb: Current socket buffer containing buffer in progress
1685 *
1686 * This function updates next to clean. If the buffer is an EOP buffer
1687 * this function exits returning false, otherwise it will place the
1688 * sk_buff in the next buffer to be chained and return true indicating
1689 * that this is in fact a non-EOP buffer.
1690 **/
1691static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
1692 union i40e_rx_desc *rx_desc,
1693 struct sk_buff *skb)
1694{
1695 u32 ntc = rx_ring->next_to_clean + 1;
1696
1697 /* fetch, update, and store next to clean */
1698 ntc = (ntc < rx_ring->count) ? ntc : 0;
1699 rx_ring->next_to_clean = ntc;
1700
1701 prefetch(I40E_RX_DESC(rx_ring, ntc));
1702
1703#define staterrlen rx_desc->wb.qword1.status_error_len
1704 if (unlikely(i40e_rx_is_programming_status(le64_to_cpu(staterrlen)))) {
1705 i40e_clean_programming_status(rx_ring, rx_desc);
1706 rx_ring->rx_bi[ntc].skb = skb;
1707 return true;
1708 }
1709 /* if we are the last buffer then there is nothing else to do */
1710#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
1711 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
1712 return false;
1713
1714 /* place skb in next buffer to be received */
1715 rx_ring->rx_bi[ntc].skb = skb;
1716 rx_ring->rx_stats.non_eop_descs++;
1717
1718 return true;
1719}
1720
1721/**
1722 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1723 * @rx_ring: rx descriptor ring to transact packets on
1724 * @budget: Total limit on number of packets to process
1725 *
1726 * This function provides a "bounce buffer" approach to Rx interrupt
1727 * processing. The advantage to this is that on systems that have
1728 * expensive overhead for IOMMU access this provides a means of avoiding
1729 * it by maintaining the mapping of the page to the system.
1730 *
1731 * Returns amount of work completed
1732 **/
1733static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
Mitch Williamsa132af22015-01-24 09:58:35 +00001734{
1735 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1736 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001737 bool failure = false;
Mitch Williamsa132af22015-01-24 09:58:35 +00001738
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001739 while (likely(total_rx_packets < budget)) {
1740 union i40e_rx_desc *rx_desc;
Mitch Williamsa132af22015-01-24 09:58:35 +00001741 struct sk_buff *skb;
1742 u16 vlan_tag;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001743 u8 rx_ptype;
1744 u64 qword;
1745
Mitch Williamsa132af22015-01-24 09:58:35 +00001746 /* return some buffers to hardware, one at a time is too slow */
1747 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001748 failure = failure ||
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001749 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
Mitch Williamsa132af22015-01-24 09:58:35 +00001750 cleaned_count = 0;
1751 }
1752
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001753 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
1754
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001755 /* status_error_len will always be zero for unused descriptors
1756 * because it's cleared in cleanup, and overlaps with hdr_addr
1757 * which is always zero because packet split isn't used, if the
1758 * hardware wrote DD then it will be non-zero
1759 */
Alexander Duyck99dad8b2016-09-27 11:28:50 -07001760 if (!i40e_test_staterr(rx_desc,
1761 BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001762 break;
1763
Mitch Williamsa132af22015-01-24 09:58:35 +00001764 /* This memory barrier is needed to keep us from reading
1765 * any other fields out of the rx_desc until we know the
1766 * DD bit is set.
1767 */
Alexander Duyck67317162015-04-08 18:49:43 -07001768 dma_rmb();
Mitch Williamsa132af22015-01-24 09:58:35 +00001769
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001770 skb = i40e_fetch_rx_buffer(rx_ring, rx_desc);
1771 if (!skb)
1772 break;
Mitch Williamsa132af22015-01-24 09:58:35 +00001773
Mitch Williamsa132af22015-01-24 09:58:35 +00001774 cleaned_count++;
1775
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001776 if (i40e_is_non_eop(rx_ring, rx_desc, skb))
Mitch Williamsa132af22015-01-24 09:58:35 +00001777 continue;
Mitch Williamsa132af22015-01-24 09:58:35 +00001778
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001779 /* ERR_MASK will only have valid bits if EOP set, and
1780 * what we are doing here is actually checking
1781 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1782 * the error field
1783 */
1784 if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
Mitch Williamsa132af22015-01-24 09:58:35 +00001785 dev_kfree_skb_any(skb);
Mitch Williamsa132af22015-01-24 09:58:35 +00001786 continue;
1787 }
1788
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001789 if (i40e_cleanup_headers(rx_ring, skb))
1790 continue;
Mitch Williamsa132af22015-01-24 09:58:35 +00001791
1792 /* probably a little skewed due to removing CRC */
1793 total_rx_bytes += skb->len;
Mitch Williamsa132af22015-01-24 09:58:35 +00001794
Alexander Duyck99dad8b2016-09-27 11:28:50 -07001795 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1796 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1797 I40E_RXD_QW1_PTYPE_SHIFT;
1798
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001799 /* populate checksum, VLAN, and protocol */
1800 i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
Mitch Williamsa132af22015-01-24 09:58:35 +00001801
Mitch Williamsa132af22015-01-24 09:58:35 +00001802#ifdef I40E_FCOE
Jesse Brandeburg1f15d662016-04-01 03:56:06 -07001803 if (unlikely(
1804 i40e_rx_is_fcoe(rx_ptype) &&
1805 !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) {
Mitch Williamsa132af22015-01-24 09:58:35 +00001806 dev_kfree_skb_any(skb);
1807 continue;
1808 }
1809#endif
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001810
1811 vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
1812 le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
1813
Mitch Williamsa132af22015-01-24 09:58:35 +00001814 i40e_receive_skb(rx_ring, skb, vlan_tag);
1815
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001816 /* update budget accounting */
1817 total_rx_packets++;
1818 }
Mitch Williamsa132af22015-01-24 09:58:35 +00001819
1820 u64_stats_update_begin(&rx_ring->syncp);
1821 rx_ring->stats.packets += total_rx_packets;
1822 rx_ring->stats.bytes += total_rx_bytes;
1823 u64_stats_update_end(&rx_ring->syncp);
1824 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1825 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1826
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001827 /* guarantee a trip back through this routine if there was a failure */
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001828 return failure ? budget : total_rx_packets;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001829}
1830
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001831static u32 i40e_buildreg_itr(const int type, const u16 itr)
1832{
1833 u32 val;
1834
1835 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
Jesse Brandeburg40d72a52016-01-13 16:51:45 -08001836 /* Don't clear PBA because that can cause lost interrupts that
1837 * came in while we were cleaning/polling
1838 */
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001839 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1840 (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
1841
1842 return val;
1843}
1844
1845/* a small macro to shorten up some long lines */
1846#define INTREG I40E_PFINT_DYN_CTLN
Jacob Keller65e87c02016-09-12 14:18:44 -07001847static inline int get_rx_itr_enabled(struct i40e_vsi *vsi, int idx)
1848{
1849 return !!(vsi->rx_rings[idx]->rx_itr_setting);
1850}
1851
1852static inline int get_tx_itr_enabled(struct i40e_vsi *vsi, int idx)
1853{
1854 return !!(vsi->tx_rings[idx]->tx_itr_setting);
1855}
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001856
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001857/**
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001858 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
1859 * @vsi: the VSI we care about
1860 * @q_vector: q_vector for which itr is being updated and interrupt enabled
1861 *
1862 **/
1863static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
1864 struct i40e_q_vector *q_vector)
1865{
1866 struct i40e_hw *hw = &vsi->back->hw;
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001867 bool rx = false, tx = false;
1868 u32 rxval, txval;
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001869 int vector;
Kan Lianga75e8002016-02-19 09:24:04 -05001870 int idx = q_vector->v_idx;
Jacob Keller65e87c02016-09-12 14:18:44 -07001871 int rx_itr_setting, tx_itr_setting;
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001872
1873 vector = (q_vector->v_idx + vsi->base_vector);
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001874
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001875 /* avoid dynamic calculation if in countdown mode OR if
1876 * all dynamic is disabled
1877 */
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001878 rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
1879
Jacob Keller65e87c02016-09-12 14:18:44 -07001880 rx_itr_setting = get_rx_itr_enabled(vsi, idx);
1881 tx_itr_setting = get_tx_itr_enabled(vsi, idx);
1882
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001883 if (q_vector->itr_countdown > 0 ||
Jacob Keller65e87c02016-09-12 14:18:44 -07001884 (!ITR_IS_DYNAMIC(rx_itr_setting) &&
1885 !ITR_IS_DYNAMIC(tx_itr_setting))) {
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001886 goto enable_int;
1887 }
1888
Jacob Keller65e87c02016-09-12 14:18:44 -07001889 if (ITR_IS_DYNAMIC(tx_itr_setting)) {
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001890 rx = i40e_set_new_dynamic_itr(&q_vector->rx);
1891 rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001892 }
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001893
Jacob Keller65e87c02016-09-12 14:18:44 -07001894 if (ITR_IS_DYNAMIC(tx_itr_setting)) {
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001895 tx = i40e_set_new_dynamic_itr(&q_vector->tx);
1896 txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001897 }
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001898
1899 if (rx || tx) {
1900 /* get the higher of the two ITR adjustments and
1901 * use the same value for both ITR registers
1902 * when in adaptive mode (Rx and/or Tx)
1903 */
1904 u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
1905
1906 q_vector->tx.itr = q_vector->rx.itr = itr;
1907 txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
1908 tx = true;
1909 rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
1910 rx = true;
1911 }
1912
1913 /* only need to enable the interrupt once, but need
1914 * to possibly update both ITR values
1915 */
1916 if (rx) {
1917 /* set the INTENA_MSK_MASK so that this first write
1918 * won't actually enable the interrupt, instead just
1919 * updating the ITR (it's bit 31 PF and VF)
1920 */
1921 rxval |= BIT(31);
1922 /* don't check _DOWN because interrupt isn't being enabled */
1923 wr32(hw, INTREG(vector - 1), rxval);
1924 }
1925
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001926enable_int:
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001927 if (!test_bit(__I40E_DOWN, &vsi->state))
1928 wr32(hw, INTREG(vector - 1), txval);
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001929
1930 if (q_vector->itr_countdown)
1931 q_vector->itr_countdown--;
1932 else
1933 q_vector->itr_countdown = ITR_COUNTDOWN_START;
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001934}
1935
1936/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001937 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1938 * @napi: napi struct with our devices info in it
1939 * @budget: amount of work driver is allowed to do this pass, in packets
1940 *
1941 * This function will clean all queues associated with a q_vector.
1942 *
1943 * Returns the amount of work done
1944 **/
1945int i40e_napi_poll(struct napi_struct *napi, int budget)
1946{
1947 struct i40e_q_vector *q_vector =
1948 container_of(napi, struct i40e_q_vector, napi);
1949 struct i40e_vsi *vsi = q_vector->vsi;
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001950 struct i40e_ring *ring;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001951 bool clean_complete = true;
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001952 bool arm_wb = false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001953 int budget_per_ring;
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07001954 int work_done = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001955
1956 if (test_bit(__I40E_DOWN, &vsi->state)) {
1957 napi_complete(napi);
1958 return 0;
1959 }
1960
Kiran Patil9c6c1252015-11-06 15:26:02 -08001961 /* Clear hung_detected bit */
1962 clear_bit(I40E_Q_VECTOR_HUNG_DETECT, &q_vector->hung_detected);
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001963 /* Since the actual Tx work is minimal, we can give the Tx a larger
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001964 * budget and be more aggressive about cleaning up the Tx descriptors.
1965 */
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001966 i40e_for_each_ring(ring, q_vector->tx) {
Alexander Duycka619afe2016-03-07 09:30:03 -08001967 if (!i40e_clean_tx_irq(vsi, ring, budget)) {
Alexander Duyckf2edaaa2016-03-07 09:29:57 -08001968 clean_complete = false;
1969 continue;
1970 }
1971 arm_wb |= ring->arm_wb;
Jesse Brandeburg0deda862015-07-23 16:54:34 -04001972 ring->arm_wb = false;
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001973 }
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001974
Alexander Duyckc67cace2015-09-24 09:04:26 -07001975 /* Handle case where we are called by netpoll with a budget of 0 */
1976 if (budget <= 0)
1977 goto tx_only;
1978
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001979 /* We attempt to distribute budget to each Rx queue fairly, but don't
1980 * allow the budget to go below 1 because that would exit polling early.
1981 */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001982 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001983
Mitch Williamsa132af22015-01-24 09:58:35 +00001984 i40e_for_each_ring(ring, q_vector->rx) {
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001985 int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07001986
1987 work_done += cleaned;
Alexander Duyckf2edaaa2016-03-07 09:29:57 -08001988 /* if we clean as many as budgeted, we must not be done */
1989 if (cleaned >= budget_per_ring)
1990 clean_complete = false;
Mitch Williamsa132af22015-01-24 09:58:35 +00001991 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001992
1993 /* If work not completed, return budget and polling will return */
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001994 if (!clean_complete) {
Alan Brady96db7762016-09-14 16:24:38 -07001995 const cpumask_t *aff_mask = &q_vector->affinity_mask;
1996 int cpu_id = smp_processor_id();
1997
1998 /* It is possible that the interrupt affinity has changed but,
1999 * if the cpu is pegged at 100%, polling will never exit while
2000 * traffic continues and the interrupt will be stuck on this
2001 * cpu. We check to make sure affinity is correct before we
2002 * continue to poll, otherwise we must stop polling so the
2003 * interrupt can move to the correct cpu.
2004 */
2005 if (likely(cpumask_test_cpu(cpu_id, aff_mask) ||
2006 !(vsi->back->flags & I40E_FLAG_MSIX_ENABLED))) {
Alexander Duyckc67cace2015-09-24 09:04:26 -07002007tx_only:
Alan Brady96db7762016-09-14 16:24:38 -07002008 if (arm_wb) {
2009 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2010 i40e_enable_wb_on_itr(vsi, q_vector);
2011 }
2012 return budget;
Anjali Singhai Jain164c9f52015-10-21 19:47:08 -04002013 }
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00002014 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002015
Anjali Singhai Jain8e0764b2015-06-05 12:20:30 -04002016 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
2017 q_vector->arm_wb_state = false;
2018
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002019 /* Work is done so exit the polling mode and re-enable the interrupt */
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07002020 napi_complete_done(napi, work_done);
Alan Brady96db7762016-09-14 16:24:38 -07002021
2022 /* If we're prematurely stopping polling to fix the interrupt
2023 * affinity we want to make sure polling starts back up so we
2024 * issue a call to i40e_force_wb which triggers a SW interrupt.
2025 */
2026 if (!clean_complete)
2027 i40e_force_wb(vsi, q_vector);
2028 else if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED))
Jesse Brandeburg40d72a52016-01-13 16:51:45 -08002029 i40e_irq_dynamic_enable_icr0(vsi->back, false);
Alan Brady96db7762016-09-14 16:24:38 -07002030 else
2031 i40e_update_enable_itr(vsi, q_vector);
2032
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002033 return 0;
2034}
2035
2036/**
2037 * i40e_atr - Add a Flow Director ATR filter
2038 * @tx_ring: ring to add programming descriptor to
2039 * @skb: send buffer
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002040 * @tx_flags: send tx flags
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002041 **/
2042static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002043 u32 tx_flags)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002044{
2045 struct i40e_filter_program_desc *fdir_desc;
2046 struct i40e_pf *pf = tx_ring->vsi->back;
2047 union {
2048 unsigned char *network;
2049 struct iphdr *ipv4;
2050 struct ipv6hdr *ipv6;
2051 } hdr;
2052 struct tcphdr *th;
2053 unsigned int hlen;
2054 u32 flex_ptype, dtype_cmd;
Alexander Duyckffcc55c2016-01-25 19:32:54 -08002055 int l4_proto;
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002056 u16 i;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002057
2058 /* make sure ATR is enabled */
Jesse Brandeburg60ea5f82014-01-17 15:36:34 -08002059 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002060 return;
2061
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00002062 if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
2063 return;
2064
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002065 /* if sampling is disabled do nothing */
2066 if (!tx_ring->atr_sample_rate)
2067 return;
2068
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002069 /* Currently only IPv4/IPv6 with TCP is supported */
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002070 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002071 return;
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002072
Alexander Duyckffcc55c2016-01-25 19:32:54 -08002073 /* snag network header to get L4 type and address */
2074 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2075 skb_inner_network_header(skb) : skb_network_header(skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002076
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002077 /* Note: tx_flags gets modified to reflect inner protocols in
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002078 * tx_enable_csum function if encap is enabled.
2079 */
Alexander Duyckffcc55c2016-01-25 19:32:54 -08002080 if (tx_flags & I40E_TX_FLAGS_IPV4) {
2081 /* access ihl as u8 to avoid unaligned access on ia64 */
2082 hlen = (hdr.network[0] & 0x0F) << 2;
2083 l4_proto = hdr.ipv4->protocol;
2084 } else {
2085 hlen = hdr.network - skb->data;
2086 l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
2087 hlen -= hdr.network - skb->data;
2088 }
2089
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002090 if (l4_proto != IPPROTO_TCP)
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002091 return;
2092
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002093 th = (struct tcphdr *)(hdr.network + hlen);
2094
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00002095 /* Due to lack of space, no more new filters can be programmed */
2096 if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
2097 return;
Anjali Singhai Jain72b74862016-01-08 17:50:21 -08002098 if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
2099 (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) {
Anjali Singhai Jain52eb95e2015-06-05 12:20:33 -04002100 /* HW ATR eviction will take care of removing filters on FIN
2101 * and RST packets.
2102 */
2103 if (th->fin || th->rst)
2104 return;
2105 }
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00002106
2107 tx_ring->atr_count++;
2108
Anjali Singhai Jaince806782014-03-06 08:59:54 +00002109 /* sample on all syn/fin/rst packets or once every atr sample rate */
2110 if (!th->fin &&
2111 !th->syn &&
2112 !th->rst &&
2113 (tx_ring->atr_count < tx_ring->atr_sample_rate))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002114 return;
2115
2116 tx_ring->atr_count = 0;
2117
2118 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002119 i = tx_ring->next_to_use;
2120 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2121
2122 i++;
2123 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002124
2125 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2126 I40E_TXD_FLTR_QW0_QINDEX_MASK;
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002127 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002128 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2129 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2130 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2131 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2132
2133 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2134
2135 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2136
Anjali Singhai Jaince806782014-03-06 08:59:54 +00002137 dtype_cmd |= (th->fin || th->rst) ?
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002138 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2139 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2140 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2141 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2142
2143 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2144 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2145
2146 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2147 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2148
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +00002149 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
Singhai, Anjali6a899022015-12-14 12:21:18 -08002150 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
Anjali Singhai Jain60ccd452015-04-16 20:06:01 -04002151 dtype_cmd |=
2152 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2153 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2154 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2155 else
2156 dtype_cmd |=
2157 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2158 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2159 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +00002160
Anjali Singhai Jain72b74862016-01-08 17:50:21 -08002161 if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
2162 (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)))
Anjali Singhai Jain52eb95e2015-06-05 12:20:33 -04002163 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2164
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002165 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
Jesse Brandeburg99753ea2014-06-04 04:22:49 +00002166 fdir_desc->rsvd = cpu_to_le32(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002167 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
Jesse Brandeburg99753ea2014-06-04 04:22:49 +00002168 fdir_desc->fd_id = cpu_to_le32(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002169}
2170
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002171/**
2172 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2173 * @skb: send buffer
2174 * @tx_ring: ring to send buffer on
2175 * @flags: the tx flags to be set
2176 *
2177 * Checks the skb and set up correspondingly several generic transmit flags
2178 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2179 *
2180 * Returns error code indicate the frame should be dropped upon error and the
2181 * otherwise returns 0 to indicate the flags has been set properly.
2182 **/
Vasu Dev38e00432014-08-01 13:27:03 -07002183#ifdef I40E_FCOE
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002184inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002185 struct i40e_ring *tx_ring,
2186 u32 *flags)
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002187#else
2188static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2189 struct i40e_ring *tx_ring,
2190 u32 *flags)
Vasu Dev38e00432014-08-01 13:27:03 -07002191#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002192{
2193 __be16 protocol = skb->protocol;
2194 u32 tx_flags = 0;
2195
Greg Rose31eaacc2015-03-31 00:45:03 -07002196 if (protocol == htons(ETH_P_8021Q) &&
2197 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2198 /* When HW VLAN acceleration is turned off by the user the
2199 * stack sets the protocol to 8021q so that the driver
2200 * can take any steps required to support the SW only
2201 * VLAN handling. In our case the driver doesn't need
2202 * to take any further steps so just set the protocol
2203 * to the encapsulated ethertype.
2204 */
2205 skb->protocol = vlan_get_protocol(skb);
2206 goto out;
2207 }
2208
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002209 /* if we have a HW VLAN tag being added, default to the HW one */
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01002210 if (skb_vlan_tag_present(skb)) {
2211 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002212 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2213 /* else if it is a SW VLAN, check the next protocol and store the tag */
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00002214 } else if (protocol == htons(ETH_P_8021Q)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002215 struct vlan_hdr *vhdr, _vhdr;
Jesse Brandeburg6995b362015-08-28 17:55:54 -04002216
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002217 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2218 if (!vhdr)
2219 return -EINVAL;
2220
2221 protocol = vhdr->h_vlan_encapsulated_proto;
2222 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2223 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2224 }
2225
Neerav Parikhd40d00b2015-02-24 06:58:40 +00002226 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2227 goto out;
2228
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002229 /* Insert 802.1p priority into VLAN header */
Vasu Dev38e00432014-08-01 13:27:03 -07002230 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2231 (skb->priority != TC_PRIO_CONTROL)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002232 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2233 tx_flags |= (skb->priority & 0x7) <<
2234 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2235 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2236 struct vlan_ethhdr *vhdr;
Francois Romieudd225bc2014-03-30 03:14:48 +00002237 int rc;
2238
2239 rc = skb_cow_head(skb, 0);
2240 if (rc < 0)
2241 return rc;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002242 vhdr = (struct vlan_ethhdr *)skb->data;
2243 vhdr->h_vlan_TCI = htons(tx_flags >>
2244 I40E_TX_FLAGS_VLAN_SHIFT);
2245 } else {
2246 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2247 }
2248 }
Neerav Parikhd40d00b2015-02-24 06:58:40 +00002249
2250out:
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002251 *flags = tx_flags;
2252 return 0;
2253}
2254
2255/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002256 * i40e_tso - set up the tso context descriptor
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002257 * @skb: ptr to the skb we're sending
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002258 * @hdr_len: ptr to the size of the packet header
Shannon Nelson9c883bd2015-10-21 19:47:02 -04002259 * @cd_type_cmd_tso_mss: Quad Word 1
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002260 *
2261 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2262 **/
Jesse Brandeburg84b079922016-04-01 03:56:05 -07002263static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002264{
Alexander Duyck03f9d6a2016-01-24 21:16:20 -08002265 u64 cd_cmd, cd_tso_len, cd_mss;
Alexander Duyckc7770192016-01-24 21:16:35 -08002266 union {
2267 struct iphdr *v4;
2268 struct ipv6hdr *v6;
2269 unsigned char *hdr;
2270 } ip;
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002271 union {
2272 struct tcphdr *tcp;
Alexander Duyck54532052016-01-24 21:17:29 -08002273 struct udphdr *udp;
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002274 unsigned char *hdr;
2275 } l4;
2276 u32 paylen, l4_offset;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002277 int err;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002278
Shannon Nelsone9f65632016-01-04 10:33:04 -08002279 if (skb->ip_summed != CHECKSUM_PARTIAL)
2280 return 0;
2281
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002282 if (!skb_is_gso(skb))
2283 return 0;
2284
Francois Romieudd225bc2014-03-30 03:14:48 +00002285 err = skb_cow_head(skb, 0);
2286 if (err < 0)
2287 return err;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002288
Alexander Duyckc7770192016-01-24 21:16:35 -08002289 ip.hdr = skb_network_header(skb);
2290 l4.hdr = skb_transport_header(skb);
Anjali Singhaidf230752014-12-19 02:58:16 +00002291
Alexander Duyckc7770192016-01-24 21:16:35 -08002292 /* initialize outer IP header fields */
2293 if (ip.v4->version == 4) {
2294 ip.v4->tot_len = 0;
2295 ip.v4->check = 0;
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002296 } else {
Alexander Duyckc7770192016-01-24 21:16:35 -08002297 ip.v6->payload_len = 0;
2298 }
2299
Alexander Duyck577389a2016-04-02 00:06:56 -07002300 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04002301 SKB_GSO_GRE_CSUM |
Tom Herbert7e133182016-05-18 09:06:10 -07002302 SKB_GSO_IPXIP4 |
Alexander Duyckbf2d1df2016-05-18 10:44:53 -07002303 SKB_GSO_IPXIP6 |
Alexander Duyck577389a2016-04-02 00:06:56 -07002304 SKB_GSO_UDP_TUNNEL |
Alexander Duyck54532052016-01-24 21:17:29 -08002305 SKB_GSO_UDP_TUNNEL_CSUM)) {
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04002306 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2307 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2308 l4.udp->len = 0;
2309
Alexander Duyck54532052016-01-24 21:17:29 -08002310 /* determine offset of outer transport header */
2311 l4_offset = l4.hdr - skb->data;
2312
2313 /* remove payload length from outer checksum */
Alexander Duyck24d41e52016-03-18 16:06:47 -07002314 paylen = skb->len - l4_offset;
2315 csum_replace_by_diff(&l4.udp->check, htonl(paylen));
Alexander Duyck54532052016-01-24 21:17:29 -08002316 }
2317
Alexander Duyckc7770192016-01-24 21:16:35 -08002318 /* reset pointers to inner headers */
2319 ip.hdr = skb_inner_network_header(skb);
2320 l4.hdr = skb_inner_transport_header(skb);
2321
2322 /* initialize inner IP header fields */
2323 if (ip.v4->version == 4) {
2324 ip.v4->tot_len = 0;
2325 ip.v4->check = 0;
2326 } else {
2327 ip.v6->payload_len = 0;
2328 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002329 }
2330
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002331 /* determine offset of inner transport header */
2332 l4_offset = l4.hdr - skb->data;
2333
2334 /* remove payload length from inner checksum */
Alexander Duyck24d41e52016-03-18 16:06:47 -07002335 paylen = skb->len - l4_offset;
2336 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002337
2338 /* compute length of segmentation header */
2339 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002340
2341 /* find the field values */
2342 cd_cmd = I40E_TX_CTX_DESC_TSO;
2343 cd_tso_len = skb->len - *hdr_len;
2344 cd_mss = skb_shinfo(skb)->gso_size;
Alexander Duyck03f9d6a2016-01-24 21:16:20 -08002345 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2346 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2347 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002348 return 1;
2349}
2350
2351/**
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002352 * i40e_tsyn - set up the tsyn context descriptor
2353 * @tx_ring: ptr to the ring to send
2354 * @skb: ptr to the skb we're sending
2355 * @tx_flags: the collected send information
Shannon Nelson9c883bd2015-10-21 19:47:02 -04002356 * @cd_type_cmd_tso_mss: Quad Word 1
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002357 *
2358 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2359 **/
2360static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2361 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2362{
2363 struct i40e_pf *pf;
2364
2365 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2366 return 0;
2367
2368 /* Tx timestamps cannot be sampled when doing TSO */
2369 if (tx_flags & I40E_TX_FLAGS_TSO)
2370 return 0;
2371
2372 /* only timestamp the outbound packet if the user has requested it and
2373 * we are not already transmitting a packet to be timestamped
2374 */
2375 pf = i40e_netdev_to_pf(tx_ring->netdev);
Jacob Keller22b47772014-12-14 01:55:09 +00002376 if (!(pf->flags & I40E_FLAG_PTP))
2377 return 0;
2378
Jakub Kicinski9ce34f02014-03-15 14:55:42 +00002379 if (pf->ptp_tx &&
2380 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002381 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2382 pf->ptp_tx_skb = skb_get(skb);
2383 } else {
2384 return 0;
2385 }
2386
2387 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
2388 I40E_TXD_CTX_QW1_CMD_SHIFT;
2389
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002390 return 1;
2391}
2392
2393/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002394 * i40e_tx_enable_csum - Enable Tx checksum offloads
2395 * @skb: send buffer
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002396 * @tx_flags: pointer to Tx flags currently set
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002397 * @td_cmd: Tx descriptor command bits to set
2398 * @td_offset: Tx descriptor header offsets to set
Jean Sacren554f4542015-10-13 01:06:28 -06002399 * @tx_ring: Tx descriptor ring
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002400 * @cd_tunneling: ptr to context desc bits
2401 **/
Alexander Duyck529f1f62016-01-24 21:17:10 -08002402static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
2403 u32 *td_cmd, u32 *td_offset,
2404 struct i40e_ring *tx_ring,
2405 u32 *cd_tunneling)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002406{
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002407 union {
2408 struct iphdr *v4;
2409 struct ipv6hdr *v6;
2410 unsigned char *hdr;
2411 } ip;
2412 union {
2413 struct tcphdr *tcp;
2414 struct udphdr *udp;
2415 unsigned char *hdr;
2416 } l4;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002417 unsigned char *exthdr;
Jesse Brandeburgd1bd7432016-04-01 03:56:04 -07002418 u32 offset, cmd = 0;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002419 __be16 frag_off;
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002420 u8 l4_proto = 0;
2421
Alexander Duyck529f1f62016-01-24 21:17:10 -08002422 if (skb->ip_summed != CHECKSUM_PARTIAL)
2423 return 0;
2424
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002425 ip.hdr = skb_network_header(skb);
2426 l4.hdr = skb_transport_header(skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002427
Alexander Duyck475b4202016-01-24 21:17:01 -08002428 /* compute outer L2 header size */
2429 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
2430
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002431 if (skb->encapsulation) {
Jesse Brandeburgd1bd7432016-04-01 03:56:04 -07002432 u32 tunnel = 0;
Alexander Duycka0064722016-01-24 21:16:48 -08002433 /* define outer network header type */
2434 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
Alexander Duyck475b4202016-01-24 21:17:01 -08002435 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
2436 I40E_TX_CTX_EXT_IP_IPV4 :
2437 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2438
Alexander Duycka0064722016-01-24 21:16:48 -08002439 l4_proto = ip.v4->protocol;
2440 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
Alexander Duyck475b4202016-01-24 21:17:01 -08002441 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002442
2443 exthdr = ip.hdr + sizeof(*ip.v6);
Alexander Duycka0064722016-01-24 21:16:48 -08002444 l4_proto = ip.v6->nexthdr;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002445 if (l4.hdr != exthdr)
2446 ipv6_skip_exthdr(skb, exthdr - skb->data,
2447 &l4_proto, &frag_off);
Alexander Duycka0064722016-01-24 21:16:48 -08002448 }
2449
2450 /* define outer transport */
2451 switch (l4_proto) {
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002452 case IPPROTO_UDP:
Alexander Duyck475b4202016-01-24 21:17:01 -08002453 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
Singhai, Anjali6a899022015-12-14 12:21:18 -08002454 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002455 break;
Shannon Nelsonc1d17912015-09-25 19:26:04 +00002456 case IPPROTO_GRE:
Alexander Duyck475b4202016-01-24 21:17:01 -08002457 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
Alexander Duycka0064722016-01-24 21:16:48 -08002458 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
Shannon Nelsonc1d17912015-09-25 19:26:04 +00002459 break;
Alexander Duyck577389a2016-04-02 00:06:56 -07002460 case IPPROTO_IPIP:
2461 case IPPROTO_IPV6:
2462 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2463 l4.hdr = skb_inner_network_header(skb);
2464 break;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002465 default:
Alexander Duyck529f1f62016-01-24 21:17:10 -08002466 if (*tx_flags & I40E_TX_FLAGS_TSO)
2467 return -1;
2468
2469 skb_checksum_help(skb);
2470 return 0;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002471 }
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002472
Alexander Duyck577389a2016-04-02 00:06:56 -07002473 /* compute outer L3 header size */
2474 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
2475 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
2476
2477 /* switch IP header pointer from outer to inner header */
2478 ip.hdr = skb_inner_network_header(skb);
2479
Alexander Duyck475b4202016-01-24 21:17:01 -08002480 /* compute tunnel header size */
2481 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
2482 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
2483
Alexander Duyck54532052016-01-24 21:17:29 -08002484 /* indicate if we need to offload outer UDP header */
2485 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04002486 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
Alexander Duyck54532052016-01-24 21:17:29 -08002487 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
2488 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2489
Alexander Duyck475b4202016-01-24 21:17:01 -08002490 /* record tunnel offload values */
2491 *cd_tunneling |= tunnel;
2492
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002493 /* switch L4 header pointer from outer to inner */
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002494 l4.hdr = skb_inner_transport_header(skb);
Alexander Duycka0064722016-01-24 21:16:48 -08002495 l4_proto = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002496
Alexander Duycka0064722016-01-24 21:16:48 -08002497 /* reset type as we transition from outer to inner headers */
2498 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
2499 if (ip.v4->version == 4)
2500 *tx_flags |= I40E_TX_FLAGS_IPV4;
2501 if (ip.v6->version == 6)
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002502 *tx_flags |= I40E_TX_FLAGS_IPV6;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002503 }
2504
2505 /* Enable IP checksum offloads */
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002506 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002507 l4_proto = ip.v4->protocol;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002508 /* the stack computes the IP header already, the only time we
2509 * need the hardware to recompute it is in the case of TSO.
2510 */
Alexander Duyck475b4202016-01-24 21:17:01 -08002511 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
2512 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
2513 I40E_TX_DESC_CMD_IIPT_IPV4;
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002514 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
Alexander Duyck475b4202016-01-24 21:17:01 -08002515 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002516
2517 exthdr = ip.hdr + sizeof(*ip.v6);
2518 l4_proto = ip.v6->nexthdr;
2519 if (l4.hdr != exthdr)
2520 ipv6_skip_exthdr(skb, exthdr - skb->data,
2521 &l4_proto, &frag_off);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002522 }
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002523
Alexander Duyck475b4202016-01-24 21:17:01 -08002524 /* compute inner L3 header size */
2525 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002526
2527 /* Enable L4 checksum offloads */
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002528 switch (l4_proto) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002529 case IPPROTO_TCP:
2530 /* enable checksum offloads */
Alexander Duyck475b4202016-01-24 21:17:01 -08002531 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
2532 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002533 break;
2534 case IPPROTO_SCTP:
2535 /* enable SCTP checksum offload */
Alexander Duyck475b4202016-01-24 21:17:01 -08002536 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
2537 offset |= (sizeof(struct sctphdr) >> 2) <<
2538 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002539 break;
2540 case IPPROTO_UDP:
2541 /* enable UDP checksum offload */
Alexander Duyck475b4202016-01-24 21:17:01 -08002542 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
2543 offset |= (sizeof(struct udphdr) >> 2) <<
2544 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002545 break;
2546 default:
Alexander Duyck529f1f62016-01-24 21:17:10 -08002547 if (*tx_flags & I40E_TX_FLAGS_TSO)
2548 return -1;
2549 skb_checksum_help(skb);
2550 return 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002551 }
Alexander Duyck475b4202016-01-24 21:17:01 -08002552
2553 *td_cmd |= cmd;
2554 *td_offset |= offset;
Alexander Duyck529f1f62016-01-24 21:17:10 -08002555
2556 return 1;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002557}
2558
2559/**
2560 * i40e_create_tx_ctx Build the Tx context descriptor
2561 * @tx_ring: ring to create the descriptor on
2562 * @cd_type_cmd_tso_mss: Quad Word 1
2563 * @cd_tunneling: Quad Word 0 - bits 0-31
2564 * @cd_l2tag2: Quad Word 0 - bits 32-63
2565 **/
2566static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
2567 const u64 cd_type_cmd_tso_mss,
2568 const u32 cd_tunneling, const u32 cd_l2tag2)
2569{
2570 struct i40e_tx_context_desc *context_desc;
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002571 int i = tx_ring->next_to_use;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002572
Jesse Brandeburgff40dd52014-02-14 02:14:41 +00002573 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
2574 !cd_tunneling && !cd_l2tag2)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002575 return;
2576
2577 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002578 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
2579
2580 i++;
2581 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002582
2583 /* cpu_to_le32 and assign to struct fields */
2584 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2585 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
Jesse Brandeburg3efbbb22014-06-04 20:41:54 +00002586 context_desc->rsvd = cpu_to_le16(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002587 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2588}
2589
2590/**
Eric Dumazet4567dc12014-10-07 13:30:23 -07002591 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2592 * @tx_ring: the ring to be checked
2593 * @size: the size buffer we want to assure is available
2594 *
2595 * Returns -EBUSY if a stop is needed, else 0
2596 **/
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002597int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
Eric Dumazet4567dc12014-10-07 13:30:23 -07002598{
2599 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2600 /* Memory barrier before checking head and tail */
2601 smp_mb();
2602
2603 /* Check again in a case another CPU has just made room available. */
2604 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2605 return -EBUSY;
2606
2607 /* A reprieve! - use start_queue because it doesn't call schedule */
2608 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2609 ++tx_ring->tx_stats.restart_queue;
2610 return 0;
2611}
2612
2613/**
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002614 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
Anjali Singhai71da6192015-02-21 06:42:35 +00002615 * @skb: send buffer
Anjali Singhai71da6192015-02-21 06:42:35 +00002616 *
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002617 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
2618 * and so we need to figure out the cases where we need to linearize the skb.
2619 *
2620 * For TSO we need to count the TSO header and segment payload separately.
2621 * As such we need to check cases where we have 7 fragments or more as we
2622 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2623 * the segment payload in the first descriptor, and another 7 for the
2624 * fragments.
Anjali Singhai71da6192015-02-21 06:42:35 +00002625 **/
Alexander Duyck2d374902016-02-17 11:02:50 -08002626bool __i40e_chk_linearize(struct sk_buff *skb)
Anjali Singhai71da6192015-02-21 06:42:35 +00002627{
Alexander Duyck2d374902016-02-17 11:02:50 -08002628 const struct skb_frag_struct *frag, *stale;
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002629 int nr_frags, sum;
Anjali Singhai71da6192015-02-21 06:42:35 +00002630
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002631 /* no need to check if number of frags is less than 7 */
Alexander Duyck2d374902016-02-17 11:02:50 -08002632 nr_frags = skb_shinfo(skb)->nr_frags;
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002633 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
Alexander Duyck2d374902016-02-17 11:02:50 -08002634 return false;
Anjali Singhai71da6192015-02-21 06:42:35 +00002635
Alexander Duyck2d374902016-02-17 11:02:50 -08002636 /* We need to walk through the list and validate that each group
Alexander Duyck841493a2016-09-06 18:05:04 -07002637 * of 6 fragments totals at least gso_size.
Alexander Duyck2d374902016-02-17 11:02:50 -08002638 */
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002639 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
Alexander Duyck2d374902016-02-17 11:02:50 -08002640 frag = &skb_shinfo(skb)->frags[0];
2641
2642 /* Initialize size to the negative value of gso_size minus 1. We
2643 * use this as the worst case scenerio in which the frag ahead
2644 * of us only provides one byte which is why we are limited to 6
2645 * descriptors for a single transmit as the header and previous
2646 * fragment are already consuming 2 descriptors.
2647 */
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002648 sum = 1 - skb_shinfo(skb)->gso_size;
Alexander Duyck2d374902016-02-17 11:02:50 -08002649
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002650 /* Add size of frags 0 through 4 to create our initial sum */
2651 sum += skb_frag_size(frag++);
2652 sum += skb_frag_size(frag++);
2653 sum += skb_frag_size(frag++);
2654 sum += skb_frag_size(frag++);
2655 sum += skb_frag_size(frag++);
Alexander Duyck2d374902016-02-17 11:02:50 -08002656
2657 /* Walk through fragments adding latest fragment, testing it, and
2658 * then removing stale fragments from the sum.
2659 */
2660 stale = &skb_shinfo(skb)->frags[0];
2661 for (;;) {
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002662 sum += skb_frag_size(frag++);
Alexander Duyck2d374902016-02-17 11:02:50 -08002663
2664 /* if sum is negative we failed to make sufficient progress */
2665 if (sum < 0)
2666 return true;
2667
Alexander Duyck841493a2016-09-06 18:05:04 -07002668 if (!nr_frags--)
Alexander Duyck2d374902016-02-17 11:02:50 -08002669 break;
2670
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002671 sum -= skb_frag_size(stale++);
Anjali Singhai71da6192015-02-21 06:42:35 +00002672 }
2673
Alexander Duyck2d374902016-02-17 11:02:50 -08002674 return false;
Anjali Singhai71da6192015-02-21 06:42:35 +00002675}
2676
2677/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002678 * i40e_tx_map - Build the Tx descriptor
2679 * @tx_ring: ring to send buffer on
2680 * @skb: send buffer
2681 * @first: first buffer info buffer to use
2682 * @tx_flags: collected send information
2683 * @hdr_len: size of the packet header
2684 * @td_cmd: the command field in the descriptor
2685 * @td_offset: offset for checksum or crc
2686 **/
Vasu Dev38e00432014-08-01 13:27:03 -07002687#ifdef I40E_FCOE
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002688inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002689 struct i40e_tx_buffer *first, u32 tx_flags,
2690 const u8 hdr_len, u32 td_cmd, u32 td_offset)
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002691#else
2692static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2693 struct i40e_tx_buffer *first, u32 tx_flags,
2694 const u8 hdr_len, u32 td_cmd, u32 td_offset)
Vasu Dev38e00432014-08-01 13:27:03 -07002695#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002696{
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002697 unsigned int data_len = skb->data_len;
2698 unsigned int size = skb_headlen(skb);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002699 struct skb_frag_struct *frag;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002700 struct i40e_tx_buffer *tx_bi;
2701 struct i40e_tx_desc *tx_desc;
Alexander Duycka5e9c572013-09-28 06:00:27 +00002702 u16 i = tx_ring->next_to_use;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002703 u32 td_tag = 0;
2704 dma_addr_t dma;
2705 u16 gso_segs;
Anjali Singhai58044742015-09-25 18:26:13 -07002706 u16 desc_count = 0;
2707 bool tail_bump = true;
2708 bool do_rs = false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002709
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002710 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
2711 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
2712 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
2713 I40E_TX_FLAGS_VLAN_SHIFT;
2714 }
2715
Alexander Duycka5e9c572013-09-28 06:00:27 +00002716 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
2717 gso_segs = skb_shinfo(skb)->gso_segs;
2718 else
2719 gso_segs = 1;
2720
2721 /* multiply data chunks by size of headers */
2722 first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
2723 first->gso_segs = gso_segs;
2724 first->skb = skb;
2725 first->tx_flags = tx_flags;
2726
2727 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2728
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002729 tx_desc = I40E_TX_DESC(tx_ring, i);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002730 tx_bi = first;
2731
2732 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002733 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
2734
Alexander Duycka5e9c572013-09-28 06:00:27 +00002735 if (dma_mapping_error(tx_ring->dev, dma))
2736 goto dma_error;
2737
2738 /* record length, and DMA address */
2739 dma_unmap_len_set(tx_bi, len, size);
2740 dma_unmap_addr_set(tx_bi, dma, dma);
2741
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002742 /* align size to end of page */
2743 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002744 tx_desc->buffer_addr = cpu_to_le64(dma);
2745
2746 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002747 tx_desc->cmd_type_offset_bsz =
2748 build_ctob(td_cmd, td_offset,
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002749 max_data, td_tag);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002750
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002751 tx_desc++;
2752 i++;
Anjali Singhai58044742015-09-25 18:26:13 -07002753 desc_count++;
2754
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002755 if (i == tx_ring->count) {
2756 tx_desc = I40E_TX_DESC(tx_ring, 0);
2757 i = 0;
2758 }
Alexander Duycka5e9c572013-09-28 06:00:27 +00002759
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002760 dma += max_data;
2761 size -= max_data;
Alexander Duycka5e9c572013-09-28 06:00:27 +00002762
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002763 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
Alexander Duycka5e9c572013-09-28 06:00:27 +00002764 tx_desc->buffer_addr = cpu_to_le64(dma);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002765 }
2766
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002767 if (likely(!data_len))
2768 break;
2769
Alexander Duycka5e9c572013-09-28 06:00:27 +00002770 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2771 size, td_tag);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002772
2773 tx_desc++;
2774 i++;
Anjali Singhai58044742015-09-25 18:26:13 -07002775 desc_count++;
2776
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002777 if (i == tx_ring->count) {
2778 tx_desc = I40E_TX_DESC(tx_ring, 0);
2779 i = 0;
2780 }
2781
Alexander Duycka5e9c572013-09-28 06:00:27 +00002782 size = skb_frag_size(frag);
2783 data_len -= size;
2784
2785 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2786 DMA_TO_DEVICE);
2787
2788 tx_bi = &tx_ring->tx_bi[i];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002789 }
2790
Alexander Duycka5e9c572013-09-28 06:00:27 +00002791 /* set next_to_watch value indicating a packet is present */
2792 first->next_to_watch = tx_desc;
2793
2794 i++;
2795 if (i == tx_ring->count)
2796 i = 0;
2797
2798 tx_ring->next_to_use = i;
2799
Alexander Duycke486bdf2016-09-12 14:18:40 -07002800 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
Eric Dumazet4567dc12014-10-07 13:30:23 -07002801 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
Anjali Singhai58044742015-09-25 18:26:13 -07002802
2803 /* Algorithm to optimize tail and RS bit setting:
2804 * if xmit_more is supported
2805 * if xmit_more is true
2806 * do not update tail and do not mark RS bit.
2807 * if xmit_more is false and last xmit_more was false
2808 * if every packet spanned less than 4 desc
2809 * then set RS bit on 4th packet and update tail
2810 * on every packet
2811 * else
2812 * update tail and set RS bit on every packet.
2813 * if xmit_more is false and last_xmit_more was true
2814 * update tail and set RS bit.
2815 *
2816 * Optimization: wmb to be issued only in case of tail update.
2817 * Also optimize the Descriptor WB path for RS bit with the same
2818 * algorithm.
2819 *
2820 * Note: If there are less than 4 packets
2821 * pending and interrupts were disabled the service task will
2822 * trigger a force WB.
2823 */
2824 if (skb->xmit_more &&
Alexander Duycke486bdf2016-09-12 14:18:40 -07002825 !netif_xmit_stopped(txring_txq(tx_ring))) {
Anjali Singhai58044742015-09-25 18:26:13 -07002826 tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2827 tail_bump = false;
2828 } else if (!skb->xmit_more &&
Alexander Duycke486bdf2016-09-12 14:18:40 -07002829 !netif_xmit_stopped(txring_txq(tx_ring)) &&
Anjali Singhai58044742015-09-25 18:26:13 -07002830 (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
2831 (tx_ring->packet_stride < WB_STRIDE) &&
2832 (desc_count < WB_STRIDE)) {
2833 tx_ring->packet_stride++;
2834 } else {
2835 tx_ring->packet_stride = 0;
2836 tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2837 do_rs = true;
2838 }
2839 if (do_rs)
2840 tx_ring->packet_stride = 0;
2841
2842 tx_desc->cmd_type_offset_bsz =
2843 build_ctob(td_cmd, td_offset, size, td_tag) |
2844 cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
2845 I40E_TX_DESC_CMD_EOP) <<
2846 I40E_TXD_QW1_CMD_SHIFT);
2847
Alexander Duycka5e9c572013-09-28 06:00:27 +00002848 /* notify HW of packet */
Carolyn Wybornyffeac832016-08-04 11:37:03 -07002849 if (!tail_bump) {
Jesse Brandeburg489ce7a2015-04-27 14:57:08 -04002850 prefetchw(tx_desc + 1);
Carolyn Wybornyffeac832016-08-04 11:37:03 -07002851 } else {
Anjali Singhai58044742015-09-25 18:26:13 -07002852 /* Force memory writes to complete before letting h/w
2853 * know there are new descriptors to fetch. (Only
2854 * applicable for weak-ordered memory model archs,
2855 * such as IA-64).
2856 */
2857 wmb();
2858 writel(i, tx_ring->tail);
2859 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002860 return;
2861
2862dma_error:
Alexander Duycka5e9c572013-09-28 06:00:27 +00002863 dev_info(tx_ring->dev, "TX DMA map failed\n");
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002864
2865 /* clear dma mappings for failed tx_bi map */
2866 for (;;) {
2867 tx_bi = &tx_ring->tx_bi[i];
Alexander Duycka5e9c572013-09-28 06:00:27 +00002868 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002869 if (tx_bi == first)
2870 break;
2871 if (i == 0)
2872 i = tx_ring->count;
2873 i--;
2874 }
2875
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002876 tx_ring->next_to_use = i;
2877}
2878
2879/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002880 * i40e_xmit_frame_ring - Sends buffer on Tx ring
2881 * @skb: send buffer
2882 * @tx_ring: ring to send buffer on
2883 *
2884 * Returns NETDEV_TX_OK if sent, else an error code
2885 **/
2886static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2887 struct i40e_ring *tx_ring)
2888{
2889 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
2890 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2891 struct i40e_tx_buffer *first;
2892 u32 td_offset = 0;
2893 u32 tx_flags = 0;
2894 __be16 protocol;
2895 u32 td_cmd = 0;
2896 u8 hdr_len = 0;
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002897 int tso, count;
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002898 int tsyn;
Jesse Brandeburg6995b362015-08-28 17:55:54 -04002899
Jesse Brandeburgb74118f2015-10-26 19:44:30 -04002900 /* prefetch the data, we'll need it later */
2901 prefetch(skb->data);
2902
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002903 count = i40e_xmit_descriptor_count(skb);
Alexander Duyck2d374902016-02-17 11:02:50 -08002904 if (i40e_chk_linearize(skb, count)) {
2905 if (__skb_linearize(skb))
2906 goto out_drop;
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002907 count = i40e_txd_use_count(skb->len);
Alexander Duyck2d374902016-02-17 11:02:50 -08002908 tx_ring->tx_stats.tx_linearize++;
2909 }
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002910
2911 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2912 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
2913 * + 4 desc gap to avoid the cache line where head is,
2914 * + 1 desc for context descriptor,
2915 * otherwise try next time
2916 */
2917 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2918 tx_ring->tx_stats.tx_busy++;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002919 return NETDEV_TX_BUSY;
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002920 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002921
2922 /* prepare the xmit flags */
2923 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2924 goto out_drop;
2925
2926 /* obtain protocol of skb */
Vlad Yasevich3d34dd02014-08-25 10:34:52 -04002927 protocol = vlan_get_protocol(skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002928
2929 /* record the location of the first descriptor for this packet */
2930 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2931
2932 /* setup IPv4/IPv6 offloads */
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00002933 if (protocol == htons(ETH_P_IP))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002934 tx_flags |= I40E_TX_FLAGS_IPV4;
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00002935 else if (protocol == htons(ETH_P_IPV6))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002936 tx_flags |= I40E_TX_FLAGS_IPV6;
2937
Jesse Brandeburg84b079922016-04-01 03:56:05 -07002938 tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002939
2940 if (tso < 0)
2941 goto out_drop;
2942 else if (tso)
2943 tx_flags |= I40E_TX_FLAGS_TSO;
2944
Alexander Duyck3bc67972016-02-17 11:02:56 -08002945 /* Always offload the checksum, since it's in the data descriptor */
2946 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2947 tx_ring, &cd_tunneling);
2948 if (tso < 0)
2949 goto out_drop;
2950
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002951 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
2952
2953 if (tsyn)
2954 tx_flags |= I40E_TX_FLAGS_TSYN;
2955
Jakub Kicinski259afec2014-03-15 14:55:37 +00002956 skb_tx_timestamp(skb);
2957
Alexander Duyckb1941302013-09-28 06:00:32 +00002958 /* always enable CRC insertion offload */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002959 td_cmd |= I40E_TX_DESC_CMD_ICRC;
2960
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002961 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2962 cd_tunneling, cd_l2tag2);
2963
2964 /* Add Flow Director ATR if it's enabled.
2965 *
2966 * NOTE: this must always be directly before the data descriptor.
2967 */
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002968 i40e_atr(tx_ring, skb, tx_flags);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002969
2970 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2971 td_cmd, td_offset);
2972
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002973 return NETDEV_TX_OK;
2974
2975out_drop:
2976 dev_kfree_skb_any(skb);
2977 return NETDEV_TX_OK;
2978}
2979
2980/**
2981 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2982 * @skb: send buffer
2983 * @netdev: network interface device structure
2984 *
2985 * Returns NETDEV_TX_OK if sent, else an error code
2986 **/
2987netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2988{
2989 struct i40e_netdev_priv *np = netdev_priv(netdev);
2990 struct i40e_vsi *vsi = np->vsi;
Alexander Duyck9f65e152013-09-28 06:00:58 +00002991 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002992
2993 /* hardware can't handle really short frames, hardware padding works
2994 * beyond this point
2995 */
Alexander Duycka94d9e22014-12-03 08:17:39 -08002996 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
2997 return NETDEV_TX_OK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002998
2999 return i40e_xmit_frame_ring(skb, tx_ring);
3000}