blob: 4852445122d97b871646955ff8d12cf43dac8ef0 [file] [log] [blame]
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -08004 * Copyright(c) 2013 - 2016 Intel Corporation.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
Greg Rosedc641b72013-12-18 13:45:51 +000015 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000017 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
Mitch Williams1c112a62014-04-04 04:43:06 +000027#include <linux/prefetch.h>
Mitch Williamsa132af22015-01-24 09:58:35 +000028#include <net/busy_poll.h>
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000029#include "i40e.h"
Jesse Brandeburg206812b2014-02-12 01:45:33 +000030#include "i40e_prototype.h"
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000031
32static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
33 u32 td_tag)
34{
35 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
36 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
37 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
38 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
39 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
40}
41
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +000042#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
Alexander Duyck5e02f282016-09-12 14:18:41 -070043/**
44 * i40e_fdir - Generate a Flow Director descriptor based on fdata
45 * @tx_ring: Tx ring to send buffer on
46 * @fdata: Flow director filter data
47 * @add: Indicate if we are adding a rule or deleting one
48 *
49 **/
50static void i40e_fdir(struct i40e_ring *tx_ring,
51 struct i40e_fdir_filter *fdata, bool add)
52{
53 struct i40e_filter_program_desc *fdir_desc;
54 struct i40e_pf *pf = tx_ring->vsi->back;
55 u32 flex_ptype, dtype_cmd;
56 u16 i;
57
58 /* grab the next descriptor */
59 i = tx_ring->next_to_use;
60 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
61
62 i++;
63 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
64
65 flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
66 (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
67
68 flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
69 (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
70
71 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
72 (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
73
74 /* Use LAN VSI Id if not programmed by user */
75 flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
76 ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
77 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
78
79 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
80
81 dtype_cmd |= add ?
82 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
83 I40E_TXD_FLTR_QW1_PCMD_SHIFT :
84 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
85 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
86
87 dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
88 (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
89
90 dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
91 (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
92
93 if (fdata->cnt_index) {
94 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
95 dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
96 ((u32)fdata->cnt_index <<
97 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
98 }
99
100 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
101 fdir_desc->rsvd = cpu_to_le32(0);
102 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
103 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
104}
105
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000106#define I40E_FD_CLEAN_DELAY 10
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000107/**
108 * i40e_program_fdir_filter - Program a Flow Director filter
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000109 * @fdir_data: Packet data that will be filter parameters
110 * @raw_packet: the pre-allocated packet buffer for FDir
Jeff Kirsherb40c82e62015-02-27 09:18:34 +0000111 * @pf: The PF pointer
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000112 * @add: True for add/update, False for remove
113 **/
Alexander Duyck1eb846a2016-09-12 14:18:42 -0700114static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
115 u8 *raw_packet, struct i40e_pf *pf,
116 bool add)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000117{
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000118 struct i40e_tx_buffer *tx_buf, *first;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000119 struct i40e_tx_desc *tx_desc;
120 struct i40e_ring *tx_ring;
121 struct i40e_vsi *vsi;
122 struct device *dev;
123 dma_addr_t dma;
124 u32 td_cmd = 0;
125 u16 i;
126
127 /* find existing FDIR VSI */
Alexander Duyck4b816442016-10-11 15:26:53 -0700128 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000129 if (!vsi)
130 return -ENOENT;
131
Alexander Duyck9f65e152013-09-28 06:00:58 +0000132 tx_ring = vsi->tx_rings[0];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000133 dev = tx_ring->dev;
134
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000135 /* we need two descriptors to add/del a filter and we can wait */
Alexander Duycked245402016-09-14 16:24:32 -0700136 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
137 if (!i)
138 return -EAGAIN;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000139 msleep_interruptible(1);
Alexander Duycked245402016-09-14 16:24:32 -0700140 }
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000141
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000142 dma = dma_map_single(dev, raw_packet,
143 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000144 if (dma_mapping_error(dev, dma))
145 goto dma_fail;
146
147 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000148 i = tx_ring->next_to_use;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000149 first = &tx_ring->tx_bi[i];
Alexander Duyck5e02f282016-09-12 14:18:41 -0700150 i40e_fdir(tx_ring, fdir_data, add);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000151
152 /* Now program a dummy descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000153 i = tx_ring->next_to_use;
154 tx_desc = I40E_TX_DESC(tx_ring, i);
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000155 tx_buf = &tx_ring->tx_bi[i];
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000156
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000157 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
158
159 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000160
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000161 /* record length, and DMA address */
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000162 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000163 dma_unmap_addr_set(tx_buf, dma, dma);
164
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000165 tx_desc->buffer_addr = cpu_to_le64(dma);
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000166 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000167
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000168 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
169 tx_buf->raw_buf = (void *)raw_packet;
170
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000171 tx_desc->cmd_type_offset_bsz =
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000172 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000173
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000174 /* Force memory writes to complete before letting h/w
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000175 * know there are new descriptors to fetch.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000176 */
177 wmb();
178
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000179 /* Mark the data descriptor to be watched */
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000180 first->next_to_watch = tx_desc;
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000181
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000182 writel(tx_ring->next_to_use, tx_ring->tail);
183 return 0;
184
185dma_fail:
186 return -1;
187}
188
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000189#define IP_HEADER_OFFSET 14
190#define I40E_UDPIP_DUMMY_PACKET_LEN 42
191/**
192 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
193 * @vsi: pointer to the targeted VSI
194 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000195 * @add: true adds a filter, false removes it
196 *
197 * Returns 0 if the filters were successfully added or removed
198 **/
199static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
200 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000201 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000202{
203 struct i40e_pf *pf = vsi->back;
204 struct udphdr *udp;
205 struct iphdr *ip;
206 bool err = false;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000207 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000208 int ret;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000209 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
210 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
211 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
212
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000213 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
214 if (!raw_packet)
215 return -ENOMEM;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000216 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
217
218 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
219 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
220 + sizeof(struct iphdr));
221
222 ip->daddr = fd_data->dst_ip[0];
223 udp->dest = fd_data->dst_port;
224 ip->saddr = fd_data->src_ip[0];
225 udp->source = fd_data->src_port;
226
Kevin Scottb2d36c02014-04-09 05:58:59 +0000227 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
228 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
229 if (ret) {
230 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000231 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
232 fd_data->pctype, fd_data->fd_id, ret);
Kevin Scottb2d36c02014-04-09 05:58:59 +0000233 err = true;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000234 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000235 if (add)
236 dev_info(&pf->pdev->dev,
237 "Filter OK for PCTYPE %d loc = %d\n",
238 fd_data->pctype, fd_data->fd_id);
239 else
240 dev_info(&pf->pdev->dev,
241 "Filter deleted for PCTYPE %d loc = %d\n",
242 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000243 }
Kiran Patila42e7a32015-11-06 15:26:03 -0800244 if (err)
245 kfree(raw_packet);
246
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000247 return err ? -EOPNOTSUPP : 0;
248}
249
250#define I40E_TCPIP_DUMMY_PACKET_LEN 54
251/**
252 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
253 * @vsi: pointer to the targeted VSI
254 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000255 * @add: true adds a filter, false removes it
256 *
257 * Returns 0 if the filters were successfully added or removed
258 **/
259static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
260 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000261 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000262{
263 struct i40e_pf *pf = vsi->back;
264 struct tcphdr *tcp;
265 struct iphdr *ip;
266 bool err = false;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000267 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000268 int ret;
269 /* Dummy packet */
270 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
271 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
272 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
273 0x0, 0x72, 0, 0, 0, 0};
274
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000275 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
276 if (!raw_packet)
277 return -ENOMEM;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000278 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
279
280 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
281 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
282 + sizeof(struct iphdr));
283
284 ip->daddr = fd_data->dst_ip[0];
285 tcp->dest = fd_data->dst_port;
286 ip->saddr = fd_data->src_ip[0];
287 tcp->source = fd_data->src_port;
288
289 if (add) {
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000290 pf->fd_tcp_rule++;
Jacob Keller234dc4e2016-09-06 18:05:09 -0700291 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
292 I40E_DEBUG_FD & pf->hw.debug_mask)
293 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
294 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000295 } else {
296 pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
297 (pf->fd_tcp_rule - 1) : 0;
298 if (pf->fd_tcp_rule == 0) {
Jacob Keller234dc4e2016-09-06 18:05:09 -0700299 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
300 I40E_DEBUG_FD & pf->hw.debug_mask)
Anjali Singhai Jain2e4875e2015-04-16 20:06:06 -0400301 dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
Jacob Keller234dc4e2016-09-06 18:05:09 -0700302 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000303 }
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000304 }
305
Kevin Scottb2d36c02014-04-09 05:58:59 +0000306 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000307 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
308
309 if (ret) {
310 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000311 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
312 fd_data->pctype, fd_data->fd_id, ret);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000313 err = true;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000314 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000315 if (add)
316 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
317 fd_data->pctype, fd_data->fd_id);
318 else
319 dev_info(&pf->pdev->dev,
320 "Filter deleted for PCTYPE %d loc = %d\n",
321 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000322 }
323
Kiran Patila42e7a32015-11-06 15:26:03 -0800324 if (err)
325 kfree(raw_packet);
326
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000327 return err ? -EOPNOTSUPP : 0;
328}
329
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000330#define I40E_IP_DUMMY_PACKET_LEN 34
331/**
332 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
333 * a specific flow spec
334 * @vsi: pointer to the targeted VSI
335 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000336 * @add: true adds a filter, false removes it
337 *
338 * Returns 0 if the filters were successfully added or removed
339 **/
340static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
341 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000342 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000343{
344 struct i40e_pf *pf = vsi->back;
345 struct iphdr *ip;
346 bool err = false;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000347 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000348 int ret;
349 int i;
350 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
351 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
352 0, 0, 0, 0};
353
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000354 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
355 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000356 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
357 if (!raw_packet)
358 return -ENOMEM;
359 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
360 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
361
362 ip->saddr = fd_data->src_ip[0];
363 ip->daddr = fd_data->dst_ip[0];
364 ip->protocol = 0;
365
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000366 fd_data->pctype = i;
367 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
368
369 if (ret) {
370 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000371 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
372 fd_data->pctype, fd_data->fd_id, ret);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000373 err = true;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000374 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000375 if (add)
376 dev_info(&pf->pdev->dev,
377 "Filter OK for PCTYPE %d loc = %d\n",
378 fd_data->pctype, fd_data->fd_id);
379 else
380 dev_info(&pf->pdev->dev,
381 "Filter deleted for PCTYPE %d loc = %d\n",
382 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000383 }
384 }
385
Kiran Patila42e7a32015-11-06 15:26:03 -0800386 if (err)
387 kfree(raw_packet);
388
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000389 return err ? -EOPNOTSUPP : 0;
390}
391
392/**
393 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
394 * @vsi: pointer to the targeted VSI
395 * @cmd: command to get or set RX flow classification rules
396 * @add: true adds a filter, false removes it
397 *
398 **/
399int i40e_add_del_fdir(struct i40e_vsi *vsi,
400 struct i40e_fdir_filter *input, bool add)
401{
402 struct i40e_pf *pf = vsi->back;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000403 int ret;
404
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000405 switch (input->flow_type & ~FLOW_EXT) {
406 case TCP_V4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000407 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000408 break;
409 case UDP_V4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000410 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000411 break;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000412 case IP_USER_FLOW:
413 switch (input->ip4_proto) {
414 case IPPROTO_TCP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000415 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000416 break;
417 case IPPROTO_UDP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000418 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000419 break;
Alexander Duycke1da71c2016-09-14 16:24:35 -0700420 case IPPROTO_IP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000421 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000422 break;
Alexander Duycke1da71c2016-09-14 16:24:35 -0700423 default:
424 /* We cannot support masking based on protocol */
425 goto unsupported_flow;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000426 }
427 break;
428 default:
Alexander Duycke1da71c2016-09-14 16:24:35 -0700429unsupported_flow:
Jakub Kicinskic5ffe7e2014-04-02 10:33:22 +0000430 dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000431 input->flow_type);
432 ret = -EINVAL;
433 }
434
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000435 /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000436 return ret;
437}
438
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000439/**
440 * i40e_fd_handle_status - check the Programming Status for FD
441 * @rx_ring: the Rx ring for this descriptor
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000442 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000443 * @prog_id: the id originally used for programming
444 *
445 * This is used to verify if the FD programming or invalidation
446 * requested by SW to the HW is successful or not and take actions accordingly.
447 **/
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000448static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
449 union i40e_rx_desc *rx_desc, u8 prog_id)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000450{
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000451 struct i40e_pf *pf = rx_ring->vsi->back;
452 struct pci_dev *pdev = pf->pdev;
453 u32 fcnt_prog, fcnt_avail;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000454 u32 error;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000455 u64 qw;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000456
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000457 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000458 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
459 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
460
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400461 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
Carolyn Wyborny3487b6c2015-08-27 11:42:38 -0400462 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000463 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
464 (I40E_DEBUG_FD & pf->hw.debug_mask))
465 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
Carolyn Wyborny3487b6c2015-08-27 11:42:38 -0400466 pf->fd_inv);
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000467
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000468 /* Check if the programming error is for ATR.
469 * If so, auto disable ATR and set a state for
470 * flush in progress. Next time we come here if flush is in
471 * progress do nothing, once flush is complete the state will
472 * be cleared.
473 */
474 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
475 return;
476
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000477 pf->fd_add_err++;
478 /* store the current atr filter count */
479 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
480
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000481 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
482 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
483 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
484 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
485 }
486
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000487 /* filter programming failed most likely due to table full */
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000488 fcnt_prog = i40e_get_global_fd_count(pf);
Anjali Singhai Jain12957382014-06-04 04:22:47 +0000489 fcnt_avail = pf->fdir_pf_filter_count;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000490 /* If ATR is running fcnt_prog can quickly change,
491 * if we are very close to full, it makes sense to disable
492 * FD ATR/SB and then re-enable it when there is room.
493 */
494 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000495 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
Anjali Singhai Jainb814ba62014-06-04 20:41:48 +0000496 !(pf->auto_disable_flags &
Anjali Singhai Jainb814ba62014-06-04 20:41:48 +0000497 I40E_FLAG_FD_SB_ENABLED)) {
Anjali Singhai Jain2e4875e2015-04-16 20:06:06 -0400498 if (I40E_DEBUG_FD & pf->hw.debug_mask)
499 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000500 pf->auto_disable_flags |=
501 I40E_FLAG_FD_SB_ENABLED;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000502 }
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000503 }
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400504 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
Anjali Singhai Jain13c28842014-03-06 09:00:04 +0000505 if (I40E_DEBUG_FD & pf->hw.debug_mask)
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000506 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
Anjali Singhai Jain13c28842014-03-06 09:00:04 +0000507 rx_desc->wb.qword0.hi_dword.fd_id);
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000508 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000509}
510
511/**
Alexander Duycka5e9c572013-09-28 06:00:27 +0000512 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000513 * @ring: the ring that owns the buffer
514 * @tx_buffer: the buffer to free
515 **/
Alexander Duycka5e9c572013-09-28 06:00:27 +0000516static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
517 struct i40e_tx_buffer *tx_buffer)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000518{
Alexander Duycka5e9c572013-09-28 06:00:27 +0000519 if (tx_buffer->skb) {
Alexander Duyck64bfd682016-09-12 14:18:39 -0700520 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
521 kfree(tx_buffer->raw_buf);
522 else
523 dev_kfree_skb_any(tx_buffer->skb);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000524 if (dma_unmap_len(tx_buffer, len))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000525 dma_unmap_single(ring->dev,
Alexander Duyck35a1e2a2013-09-28 06:00:17 +0000526 dma_unmap_addr(tx_buffer, dma),
527 dma_unmap_len(tx_buffer, len),
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000528 DMA_TO_DEVICE);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000529 } else if (dma_unmap_len(tx_buffer, len)) {
530 dma_unmap_page(ring->dev,
531 dma_unmap_addr(tx_buffer, dma),
532 dma_unmap_len(tx_buffer, len),
533 DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000534 }
Kiran Patila42e7a32015-11-06 15:26:03 -0800535
Alexander Duycka5e9c572013-09-28 06:00:27 +0000536 tx_buffer->next_to_watch = NULL;
537 tx_buffer->skb = NULL;
Alexander Duyck35a1e2a2013-09-28 06:00:17 +0000538 dma_unmap_len_set(tx_buffer, len, 0);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000539 /* tx_buffer must be completely set up in the transmit path */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000540}
541
542/**
543 * i40e_clean_tx_ring - Free any empty Tx buffers
544 * @tx_ring: ring to be cleaned
545 **/
546void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
547{
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000548 unsigned long bi_size;
549 u16 i;
550
551 /* ring already cleared, nothing to do */
552 if (!tx_ring->tx_bi)
553 return;
554
555 /* Free all the Tx ring sk_buffs */
Alexander Duycka5e9c572013-09-28 06:00:27 +0000556 for (i = 0; i < tx_ring->count; i++)
557 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000558
559 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
560 memset(tx_ring->tx_bi, 0, bi_size);
561
562 /* Zero out the descriptor ring */
563 memset(tx_ring->desc, 0, tx_ring->size);
564
565 tx_ring->next_to_use = 0;
566 tx_ring->next_to_clean = 0;
Alexander Duyck7070ce02013-09-28 06:00:37 +0000567
568 if (!tx_ring->netdev)
569 return;
570
571 /* cleanup Tx queue statistics */
Alexander Duycke486bdf2016-09-12 14:18:40 -0700572 netdev_tx_reset_queue(txring_txq(tx_ring));
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000573}
574
575/**
576 * i40e_free_tx_resources - Free Tx resources per queue
577 * @tx_ring: Tx descriptor ring for a specific queue
578 *
579 * Free all transmit software resources
580 **/
581void i40e_free_tx_resources(struct i40e_ring *tx_ring)
582{
583 i40e_clean_tx_ring(tx_ring);
584 kfree(tx_ring->tx_bi);
585 tx_ring->tx_bi = NULL;
586
587 if (tx_ring->desc) {
588 dma_free_coherent(tx_ring->dev, tx_ring->size,
589 tx_ring->desc, tx_ring->dma);
590 tx_ring->desc = NULL;
591 }
592}
593
Jesse Brandeburga68de582015-02-24 05:26:03 +0000594/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000595 * i40e_get_tx_pending - how many tx descriptors not processed
596 * @tx_ring: the ring of descriptors
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800597 * @in_sw: is tx_pending being checked in SW or HW
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000598 *
599 * Since there is no access to the ring head register
600 * in XL710, we need to use our local copies
601 **/
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800602u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000603{
Jesse Brandeburga68de582015-02-24 05:26:03 +0000604 u32 head, tail;
605
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800606 if (!in_sw)
607 head = i40e_get_head(ring);
608 else
609 head = ring->next_to_clean;
Jesse Brandeburga68de582015-02-24 05:26:03 +0000610 tail = readl(ring->tail);
611
612 if (head != tail)
613 return (head < tail) ?
614 tail - head : (tail + ring->count - head);
615
616 return 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000617}
618
Alexander Duyck1dc8b532016-10-11 15:26:54 -0700619#define WB_STRIDE 4
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000620
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000621/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000622 * i40e_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duycka619afe2016-03-07 09:30:03 -0800623 * @vsi: the VSI we care about
624 * @tx_ring: Tx ring to clean
625 * @napi_budget: Used to determine if we are in netpoll
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000626 *
627 * Returns true if there's any budget left (e.g. the clean is finished)
628 **/
Alexander Duycka619afe2016-03-07 09:30:03 -0800629static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
630 struct i40e_ring *tx_ring, int napi_budget)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000631{
632 u16 i = tx_ring->next_to_clean;
633 struct i40e_tx_buffer *tx_buf;
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000634 struct i40e_tx_desc *tx_head;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000635 struct i40e_tx_desc *tx_desc;
Alexander Duycka619afe2016-03-07 09:30:03 -0800636 unsigned int total_bytes = 0, total_packets = 0;
637 unsigned int budget = vsi->work_limit;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000638
639 tx_buf = &tx_ring->tx_bi[i];
640 tx_desc = I40E_TX_DESC(tx_ring, i);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000641 i -= tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000642
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000643 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
644
Alexander Duycka5e9c572013-09-28 06:00:27 +0000645 do {
646 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000647
648 /* if next_to_watch is not set then there is no work pending */
649 if (!eop_desc)
650 break;
651
Alexander Duycka5e9c572013-09-28 06:00:27 +0000652 /* prevent any other reads prior to eop_desc */
653 read_barrier_depends();
654
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000655 /* we have caught up to head, no work left to do */
656 if (tx_head == tx_desc)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000657 break;
658
Alexander Duyckc304fda2013-09-28 06:00:12 +0000659 /* clear next_to_watch to prevent false hangs */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000660 tx_buf->next_to_watch = NULL;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000661
Alexander Duycka5e9c572013-09-28 06:00:27 +0000662 /* update the statistics for this packet */
663 total_bytes += tx_buf->bytecount;
664 total_packets += tx_buf->gso_segs;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000665
Alexander Duycka5e9c572013-09-28 06:00:27 +0000666 /* free the skb */
Alexander Duycka619afe2016-03-07 09:30:03 -0800667 napi_consume_skb(tx_buf->skb, napi_budget);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000668
Alexander Duycka5e9c572013-09-28 06:00:27 +0000669 /* unmap skb header data */
670 dma_unmap_single(tx_ring->dev,
671 dma_unmap_addr(tx_buf, dma),
672 dma_unmap_len(tx_buf, len),
673 DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000674
Alexander Duycka5e9c572013-09-28 06:00:27 +0000675 /* clear tx_buffer data */
676 tx_buf->skb = NULL;
677 dma_unmap_len_set(tx_buf, len, 0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000678
Alexander Duycka5e9c572013-09-28 06:00:27 +0000679 /* unmap remaining buffers */
680 while (tx_desc != eop_desc) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000681
682 tx_buf++;
683 tx_desc++;
684 i++;
Alexander Duycka5e9c572013-09-28 06:00:27 +0000685 if (unlikely(!i)) {
686 i -= tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000687 tx_buf = tx_ring->tx_bi;
688 tx_desc = I40E_TX_DESC(tx_ring, 0);
689 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000690
Alexander Duycka5e9c572013-09-28 06:00:27 +0000691 /* unmap any remaining paged data */
692 if (dma_unmap_len(tx_buf, len)) {
693 dma_unmap_page(tx_ring->dev,
694 dma_unmap_addr(tx_buf, dma),
695 dma_unmap_len(tx_buf, len),
696 DMA_TO_DEVICE);
697 dma_unmap_len_set(tx_buf, len, 0);
698 }
699 }
700
701 /* move us one more past the eop_desc for start of next pkt */
702 tx_buf++;
703 tx_desc++;
704 i++;
705 if (unlikely(!i)) {
706 i -= tx_ring->count;
707 tx_buf = tx_ring->tx_bi;
708 tx_desc = I40E_TX_DESC(tx_ring, 0);
709 }
710
Jesse Brandeburg016890b2015-02-27 09:15:31 +0000711 prefetch(tx_desc);
712
Alexander Duycka5e9c572013-09-28 06:00:27 +0000713 /* update budget accounting */
714 budget--;
715 } while (likely(budget));
716
717 i += tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000718 tx_ring->next_to_clean = i;
Alexander Duyck980e9b12013-09-28 06:01:03 +0000719 u64_stats_update_begin(&tx_ring->syncp);
Alexander Duycka114d0a2013-09-28 06:00:43 +0000720 tx_ring->stats.bytes += total_bytes;
721 tx_ring->stats.packets += total_packets;
Alexander Duyck980e9b12013-09-28 06:01:03 +0000722 u64_stats_update_end(&tx_ring->syncp);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000723 tx_ring->q_vector->tx.total_bytes += total_bytes;
724 tx_ring->q_vector->tx.total_packets += total_packets;
Alexander Duycka5e9c572013-09-28 06:00:27 +0000725
Anjali Singhai58044742015-09-25 18:26:13 -0700726 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
Anjali Singhai58044742015-09-25 18:26:13 -0700727 /* check to see if there are < 4 descriptors
728 * waiting to be written back, then kick the hardware to force
729 * them to be written back in case we stay in NAPI.
730 * In this mode on X722 we do not enable Interrupt.
731 */
Mitch Williams88dc9e62016-06-20 09:10:35 -0700732 unsigned int j = i40e_get_tx_pending(tx_ring, false);
Anjali Singhai58044742015-09-25 18:26:13 -0700733
734 if (budget &&
Alexander Duyck1dc8b532016-10-11 15:26:54 -0700735 ((j / WB_STRIDE) == 0) && (j > 0) &&
Alexander Duycka619afe2016-03-07 09:30:03 -0800736 !test_bit(__I40E_DOWN, &vsi->state) &&
Anjali Singhai58044742015-09-25 18:26:13 -0700737 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
738 tx_ring->arm_wb = true;
739 }
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000740
Alexander Duycke486bdf2016-09-12 14:18:40 -0700741 /* notify netdev of completed buffers */
742 netdev_tx_completed_queue(txring_txq(tx_ring),
Alexander Duyck7070ce02013-09-28 06:00:37 +0000743 total_packets, total_bytes);
744
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000745#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
746 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
747 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
748 /* Make sure that anybody stopping the queue after this
749 * sees the new next_to_clean.
750 */
751 smp_mb();
752 if (__netif_subqueue_stopped(tx_ring->netdev,
753 tx_ring->queue_index) &&
Alexander Duycka619afe2016-03-07 09:30:03 -0800754 !test_bit(__I40E_DOWN, &vsi->state)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000755 netif_wake_subqueue(tx_ring->netdev,
756 tx_ring->queue_index);
757 ++tx_ring->tx_stats.restart_queue;
758 }
759 }
760
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000761 return !!budget;
762}
763
764/**
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -0800765 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
766 * @vsi: the VSI we care about
767 * @q_vector: the vector on which to enable writeback
768 *
769 **/
770static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
771 struct i40e_q_vector *q_vector)
772{
773 u16 flags = q_vector->tx.ring[0].flags;
774 u32 val;
775
776 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
777 return;
778
779 if (q_vector->arm_wb_state)
780 return;
781
782 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
783 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
784 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
785
786 wr32(&vsi->back->hw,
787 I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
788 val);
789 } else {
790 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
791 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
792
793 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
794 }
795 q_vector->arm_wb_state = true;
796}
797
798/**
799 * i40e_force_wb - Issue SW Interrupt so HW does a wb
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000800 * @vsi: the VSI we care about
801 * @q_vector: the vector on which to force writeback
802 *
803 **/
Kiran Patilb03a8c12015-09-24 18:13:15 -0400804void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000805{
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -0800806 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
Anjali Singhai Jain8e0764b2015-06-05 12:20:30 -0400807 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
808 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
809 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
810 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
811 /* allow 00 to be written to the index */
812
813 wr32(&vsi->back->hw,
814 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
815 vsi->base_vector - 1), val);
816 } else {
817 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
818 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
819 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
820 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
821 /* allow 00 to be written to the index */
822
823 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
824 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000825}
826
827/**
828 * i40e_set_new_dynamic_itr - Find new ITR level
829 * @rc: structure containing ring performance data
830 *
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400831 * Returns true if ITR changed, false if not
832 *
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000833 * Stores a new ITR value based on packets and byte counts during
834 * the last interrupt. The advantage of per interrupt computation
835 * is faster updates and more accurate ITR for the current traffic
836 * pattern. Constants in this function were computed based on
837 * theoretical maximum wire speed and thresholds were set based on
838 * testing data as well as attempting to minimize response time
839 * while increasing bulk throughput.
840 **/
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400841static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000842{
843 enum i40e_latency_range new_latency_range = rc->latency_range;
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400844 struct i40e_q_vector *qv = rc->ring->q_vector;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000845 u32 new_itr = rc->itr;
846 int bytes_per_int;
Jesse Brandeburg51cc6d92015-09-28 14:16:52 -0400847 int usecs;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000848
849 if (rc->total_packets == 0 || !rc->itr)
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400850 return false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000851
852 /* simple throttlerate management
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400853 * 0-10MB/s lowest (50000 ints/s)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000854 * 10-20MB/s low (20000 ints/s)
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400855 * 20-1249MB/s bulk (18000 ints/s)
856 * > 40000 Rx packets per second (8000 ints/s)
Jesse Brandeburg51cc6d92015-09-28 14:16:52 -0400857 *
858 * The math works out because the divisor is in 10^(-6) which
859 * turns the bytes/us input value into MB/s values, but
860 * make sure to use usecs, as the register values written
Jesse Brandeburgee2319c2015-09-28 14:16:54 -0400861 * are in 2 usec increments in the ITR registers, and make sure
862 * to use the smoothed values that the countdown timer gives us.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000863 */
Jesse Brandeburgee2319c2015-09-28 14:16:54 -0400864 usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
Jesse Brandeburg51cc6d92015-09-28 14:16:52 -0400865 bytes_per_int = rc->total_bytes / usecs;
Jesse Brandeburgee2319c2015-09-28 14:16:54 -0400866
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400867 switch (new_latency_range) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000868 case I40E_LOWEST_LATENCY:
869 if (bytes_per_int > 10)
870 new_latency_range = I40E_LOW_LATENCY;
871 break;
872 case I40E_LOW_LATENCY:
873 if (bytes_per_int > 20)
874 new_latency_range = I40E_BULK_LATENCY;
875 else if (bytes_per_int <= 10)
876 new_latency_range = I40E_LOWEST_LATENCY;
877 break;
878 case I40E_BULK_LATENCY:
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400879 case I40E_ULTRA_LATENCY:
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400880 default:
881 if (bytes_per_int <= 20)
882 new_latency_range = I40E_LOW_LATENCY;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000883 break;
884 }
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400885
886 /* this is to adjust RX more aggressively when streaming small
887 * packets. The value of 40000 was picked as it is just beyond
888 * what the hardware can receive per second if in low latency
889 * mode.
890 */
891#define RX_ULTRA_PACKET_RATE 40000
892
893 if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
894 (&qv->rx == rc))
895 new_latency_range = I40E_ULTRA_LATENCY;
896
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400897 rc->latency_range = new_latency_range;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000898
899 switch (new_latency_range) {
900 case I40E_LOWEST_LATENCY:
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400901 new_itr = I40E_ITR_50K;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000902 break;
903 case I40E_LOW_LATENCY:
904 new_itr = I40E_ITR_20K;
905 break;
906 case I40E_BULK_LATENCY:
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400907 new_itr = I40E_ITR_18K;
908 break;
909 case I40E_ULTRA_LATENCY:
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000910 new_itr = I40E_ITR_8K;
911 break;
912 default:
913 break;
914 }
915
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000916 rc->total_bytes = 0;
917 rc->total_packets = 0;
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400918
919 if (new_itr != rc->itr) {
920 rc->itr = new_itr;
921 return true;
922 }
923
924 return false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000925}
926
927/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000928 * i40e_clean_programming_status - clean the programming status descriptor
929 * @rx_ring: the rx ring that has this descriptor
930 * @rx_desc: the rx descriptor written back by HW
931 *
932 * Flow director should handle FD_FILTER_STATUS to check its filter programming
933 * status being successful or not and take actions accordingly. FCoE should
934 * handle its context/filter programming/invalidation status and take actions.
935 *
936 **/
937static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
938 union i40e_rx_desc *rx_desc)
939{
940 u64 qw;
941 u8 id;
942
943 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
944 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
945 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
946
947 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000948 i40e_fd_handle_status(rx_ring, rx_desc, id);
Vasu Dev38e00432014-08-01 13:27:03 -0700949#ifdef I40E_FCOE
950 else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
951 (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
952 i40e_fcoe_handle_status(rx_ring, rx_desc, id);
953#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000954}
955
956/**
957 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
958 * @tx_ring: the tx ring to set up
959 *
960 * Return 0 on success, negative on error
961 **/
962int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
963{
964 struct device *dev = tx_ring->dev;
965 int bi_size;
966
967 if (!dev)
968 return -ENOMEM;
969
Jesse Brandeburge908f812015-07-23 16:54:42 -0400970 /* warn if we are about to overwrite the pointer */
971 WARN_ON(tx_ring->tx_bi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000972 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
973 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
974 if (!tx_ring->tx_bi)
975 goto err;
976
977 /* round up to nearest 4K */
978 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000979 /* add u32 for head writeback, align after this takes care of
980 * guaranteeing this is at least one cache line in size
981 */
982 tx_ring->size += sizeof(u32);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000983 tx_ring->size = ALIGN(tx_ring->size, 4096);
984 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
985 &tx_ring->dma, GFP_KERNEL);
986 if (!tx_ring->desc) {
987 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
988 tx_ring->size);
989 goto err;
990 }
991
992 tx_ring->next_to_use = 0;
993 tx_ring->next_to_clean = 0;
994 return 0;
995
996err:
997 kfree(tx_ring->tx_bi);
998 tx_ring->tx_bi = NULL;
999 return -ENOMEM;
1000}
1001
1002/**
1003 * i40e_clean_rx_ring - Free Rx buffers
1004 * @rx_ring: ring to be cleaned
1005 **/
1006void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1007{
1008 struct device *dev = rx_ring->dev;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001009 unsigned long bi_size;
1010 u16 i;
1011
1012 /* ring already cleared, nothing to do */
1013 if (!rx_ring->rx_bi)
1014 return;
1015
1016 /* Free all the Rx ring sk_buffs */
1017 for (i = 0; i < rx_ring->count; i++) {
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001018 struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
1019
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001020 if (rx_bi->skb) {
1021 dev_kfree_skb(rx_bi->skb);
1022 rx_bi->skb = NULL;
1023 }
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001024 if (!rx_bi->page)
1025 continue;
1026
1027 dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE);
1028 __free_pages(rx_bi->page, 0);
1029
1030 rx_bi->page = NULL;
1031 rx_bi->page_offset = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001032 }
1033
1034 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1035 memset(rx_ring->rx_bi, 0, bi_size);
1036
1037 /* Zero out the descriptor ring */
1038 memset(rx_ring->desc, 0, rx_ring->size);
1039
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001040 rx_ring->next_to_alloc = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001041 rx_ring->next_to_clean = 0;
1042 rx_ring->next_to_use = 0;
1043}
1044
1045/**
1046 * i40e_free_rx_resources - Free Rx resources
1047 * @rx_ring: ring to clean the resources from
1048 *
1049 * Free all receive software resources
1050 **/
1051void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1052{
1053 i40e_clean_rx_ring(rx_ring);
1054 kfree(rx_ring->rx_bi);
1055 rx_ring->rx_bi = NULL;
1056
1057 if (rx_ring->desc) {
1058 dma_free_coherent(rx_ring->dev, rx_ring->size,
1059 rx_ring->desc, rx_ring->dma);
1060 rx_ring->desc = NULL;
1061 }
1062}
1063
1064/**
1065 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1066 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1067 *
1068 * Returns 0 on success, negative on failure
1069 **/
1070int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1071{
1072 struct device *dev = rx_ring->dev;
1073 int bi_size;
1074
Jesse Brandeburge908f812015-07-23 16:54:42 -04001075 /* warn if we are about to overwrite the pointer */
1076 WARN_ON(rx_ring->rx_bi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001077 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1078 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1079 if (!rx_ring->rx_bi)
1080 goto err;
1081
Carolyn Wybornyf217d6c2015-02-09 17:42:31 -08001082 u64_stats_init(&rx_ring->syncp);
Carolyn Wyborny638702b2015-01-24 09:58:32 +00001083
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001084 /* Round up to nearest 4K */
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001085 rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001086 rx_ring->size = ALIGN(rx_ring->size, 4096);
1087 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1088 &rx_ring->dma, GFP_KERNEL);
1089
1090 if (!rx_ring->desc) {
1091 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1092 rx_ring->size);
1093 goto err;
1094 }
1095
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001096 rx_ring->next_to_alloc = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001097 rx_ring->next_to_clean = 0;
1098 rx_ring->next_to_use = 0;
1099
1100 return 0;
1101err:
1102 kfree(rx_ring->rx_bi);
1103 rx_ring->rx_bi = NULL;
1104 return -ENOMEM;
1105}
1106
1107/**
1108 * i40e_release_rx_desc - Store the new tail and head values
1109 * @rx_ring: ring to bump
1110 * @val: new head index
1111 **/
1112static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1113{
1114 rx_ring->next_to_use = val;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001115
1116 /* update next to alloc since we have filled the ring */
1117 rx_ring->next_to_alloc = val;
1118
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001119 /* Force memory writes to complete before letting h/w
1120 * know there are new descriptors to fetch. (Only
1121 * applicable for weak-ordered memory model archs,
1122 * such as IA-64).
1123 */
1124 wmb();
1125 writel(val, rx_ring->tail);
1126}
1127
1128/**
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001129 * i40e_alloc_mapped_page - recycle or make a new page
1130 * @rx_ring: ring to use
1131 * @bi: rx_buffer struct to modify
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001132 *
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001133 * Returns true if the page was successfully allocated or
1134 * reused.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001135 **/
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001136static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1137 struct i40e_rx_buffer *bi)
Mitch Williamsa132af22015-01-24 09:58:35 +00001138{
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001139 struct page *page = bi->page;
1140 dma_addr_t dma;
Mitch Williamsa132af22015-01-24 09:58:35 +00001141
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001142 /* since we are recycling buffers we should seldom need to alloc */
1143 if (likely(page)) {
1144 rx_ring->rx_stats.page_reuse_count++;
1145 return true;
Mitch Williamsa132af22015-01-24 09:58:35 +00001146 }
1147
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001148 /* alloc new page for storage */
1149 page = dev_alloc_page();
1150 if (unlikely(!page)) {
1151 rx_ring->rx_stats.alloc_page_failed++;
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001152 return false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001153 }
1154
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001155 /* map page for use */
1156 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001157
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001158 /* if mapping failed free memory back to system since
1159 * there isn't much point in holding memory we can't use
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001160 */
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001161 if (dma_mapping_error(rx_ring->dev, dma)) {
1162 __free_pages(page, 0);
1163 rx_ring->rx_stats.alloc_page_failed++;
1164 return false;
1165 }
1166
1167 bi->dma = dma;
1168 bi->page = page;
1169 bi->page_offset = 0;
1170
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001171 return true;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001172}
1173
1174/**
1175 * i40e_receive_skb - Send a completed packet up the stack
1176 * @rx_ring: rx ring in play
1177 * @skb: packet to send up
1178 * @vlan_tag: vlan tag for packet
1179 **/
1180static void i40e_receive_skb(struct i40e_ring *rx_ring,
1181 struct sk_buff *skb, u16 vlan_tag)
1182{
1183 struct i40e_q_vector *q_vector = rx_ring->q_vector;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001184
Jesse Brandeburga149f2c2016-04-12 08:30:49 -07001185 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1186 (vlan_tag & VLAN_VID_MASK))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001187 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1188
Alexander Duyck8b650352015-09-24 09:04:32 -07001189 napi_gro_receive(&q_vector->napi, skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001190}
1191
1192/**
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001193 * i40e_alloc_rx_buffers - Replace used receive buffers
1194 * @rx_ring: ring to place buffers on
1195 * @cleaned_count: number of buffers to replace
1196 *
1197 * Returns false if all allocations were successful, true if any fail
1198 **/
1199bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1200{
1201 u16 ntu = rx_ring->next_to_use;
1202 union i40e_rx_desc *rx_desc;
1203 struct i40e_rx_buffer *bi;
1204
1205 /* do nothing if no valid netdev defined */
1206 if (!rx_ring->netdev || !cleaned_count)
1207 return false;
1208
1209 rx_desc = I40E_RX_DESC(rx_ring, ntu);
1210 bi = &rx_ring->rx_bi[ntu];
1211
1212 do {
1213 if (!i40e_alloc_mapped_page(rx_ring, bi))
1214 goto no_buffers;
1215
1216 /* Refresh the desc even if buffer_addrs didn't change
1217 * because each write-back erases this info.
1218 */
1219 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001220
1221 rx_desc++;
1222 bi++;
1223 ntu++;
1224 if (unlikely(ntu == rx_ring->count)) {
1225 rx_desc = I40E_RX_DESC(rx_ring, 0);
1226 bi = rx_ring->rx_bi;
1227 ntu = 0;
1228 }
1229
1230 /* clear the status bits for the next_to_use descriptor */
1231 rx_desc->wb.qword1.status_error_len = 0;
1232
1233 cleaned_count--;
1234 } while (cleaned_count);
1235
1236 if (rx_ring->next_to_use != ntu)
1237 i40e_release_rx_desc(rx_ring, ntu);
1238
1239 return false;
1240
1241no_buffers:
1242 if (rx_ring->next_to_use != ntu)
1243 i40e_release_rx_desc(rx_ring, ntu);
1244
1245 /* make sure to come back via polling to try again after
1246 * allocation failure
1247 */
1248 return true;
1249}
1250
1251/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001252 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1253 * @vsi: the VSI we care about
1254 * @skb: skb currently being received and modified
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001255 * @rx_desc: the receive descriptor
1256 *
1257 * skb->protocol must be set before this function is called
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001258 **/
1259static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1260 struct sk_buff *skb,
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001261 union i40e_rx_desc *rx_desc)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001262{
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001263 struct i40e_rx_ptype_decoded decoded;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001264 u32 rx_error, rx_status;
Alexander Duyck858296c82016-06-14 15:45:42 -07001265 bool ipv4, ipv6;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001266 u8 ptype;
1267 u64 qword;
1268
1269 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1270 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
1271 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1272 I40E_RXD_QW1_ERROR_SHIFT;
1273 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1274 I40E_RXD_QW1_STATUS_SHIFT;
1275 decoded = decode_rx_desc_ptype(ptype);
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001276
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001277 skb->ip_summed = CHECKSUM_NONE;
1278
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001279 skb_checksum_none_assert(skb);
1280
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001281 /* Rx csum enabled and ip headers found? */
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001282 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001283 return;
1284
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001285 /* did the hardware decode the packet and checksum? */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001286 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001287 return;
1288
1289 /* both known and outer_ip must be set for the below code to work */
1290 if (!(decoded.known && decoded.outer_ip))
1291 return;
1292
Alexander Duyckfad57332016-01-24 21:17:22 -08001293 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1294 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1295 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1296 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001297
1298 if (ipv4 &&
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001299 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1300 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001301 goto checksum_fail;
1302
Jesse Brandeburgddf1d0d2014-02-13 03:48:39 -08001303 /* likely incorrect csum if alternate IP extension headers found */
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001304 if (ipv6 &&
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001305 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001306 /* don't increment checksum err here, non-fatal err */
Shannon Nelson8ee75a82013-12-21 05:44:46 +00001307 return;
1308
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001309 /* there was some L4 error, count error and punt packet to the stack */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001310 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001311 goto checksum_fail;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001312
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001313 /* handle packets that were not able to be checksummed due
1314 * to arrival speed, in this case the stack can compute
1315 * the csum.
1316 */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001317 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001318 return;
1319
Alexander Duyck858296c82016-06-14 15:45:42 -07001320 /* If there is an outer header present that might contain a checksum
1321 * we need to bump the checksum level by 1 to reflect the fact that
1322 * we are indicating we validated the inner checksum.
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001323 */
Alexander Duyck858296c82016-06-14 15:45:42 -07001324 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
1325 skb->csum_level = 1;
Alexander Duyckfad57332016-01-24 21:17:22 -08001326
Alexander Duyck858296c82016-06-14 15:45:42 -07001327 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
1328 switch (decoded.inner_prot) {
1329 case I40E_RX_PTYPE_INNER_PROT_TCP:
1330 case I40E_RX_PTYPE_INNER_PROT_UDP:
1331 case I40E_RX_PTYPE_INNER_PROT_SCTP:
1332 skb->ip_summed = CHECKSUM_UNNECESSARY;
1333 /* fall though */
1334 default:
1335 break;
1336 }
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001337
1338 return;
1339
1340checksum_fail:
1341 vsi->back->hw_csum_rx_error++;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001342}
1343
1344/**
Anjali Singhai Jain857942f2015-12-09 15:50:21 -08001345 * i40e_ptype_to_htype - get a hash type
Jesse Brandeburg206812b2014-02-12 01:45:33 +00001346 * @ptype: the ptype value from the descriptor
1347 *
1348 * Returns a hash type to be used by skb_set_hash
1349 **/
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001350static inline int i40e_ptype_to_htype(u8 ptype)
Jesse Brandeburg206812b2014-02-12 01:45:33 +00001351{
1352 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1353
1354 if (!decoded.known)
1355 return PKT_HASH_TYPE_NONE;
1356
1357 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1358 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1359 return PKT_HASH_TYPE_L4;
1360 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1361 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1362 return PKT_HASH_TYPE_L3;
1363 else
1364 return PKT_HASH_TYPE_L2;
1365}
1366
1367/**
Anjali Singhai Jain857942f2015-12-09 15:50:21 -08001368 * i40e_rx_hash - set the hash value in the skb
1369 * @ring: descriptor ring
1370 * @rx_desc: specific descriptor
1371 **/
1372static inline void i40e_rx_hash(struct i40e_ring *ring,
1373 union i40e_rx_desc *rx_desc,
1374 struct sk_buff *skb,
1375 u8 rx_ptype)
1376{
1377 u32 hash;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001378 const __le64 rss_mask =
Anjali Singhai Jain857942f2015-12-09 15:50:21 -08001379 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1380 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1381
Mitch Williamsa876c3b2016-05-03 15:13:18 -07001382 if (!(ring->netdev->features & NETIF_F_RXHASH))
Anjali Singhai Jain857942f2015-12-09 15:50:21 -08001383 return;
1384
1385 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1386 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1387 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1388 }
1389}
1390
1391/**
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001392 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
1393 * @rx_ring: rx descriptor ring packet is being transacted on
1394 * @rx_desc: pointer to the EOP Rx descriptor
1395 * @skb: pointer to current skb being populated
1396 * @rx_ptype: the packet type decoded by hardware
Mitch Williamsa132af22015-01-24 09:58:35 +00001397 *
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001398 * This function checks the ring, descriptor, and packet information in
1399 * order to populate the hash, checksum, VLAN, protocol, and
1400 * other fields within the skb.
Mitch Williamsa132af22015-01-24 09:58:35 +00001401 **/
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001402static inline
1403void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1404 union i40e_rx_desc *rx_desc, struct sk_buff *skb,
1405 u8 rx_ptype)
1406{
1407 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1408 u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1409 I40E_RXD_QW1_STATUS_SHIFT;
Jacob Keller144ed172016-10-05 09:30:42 -07001410 u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
1411 u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001412 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
1413
Jacob Keller12490502016-10-05 09:30:44 -07001414 if (unlikely(tsynvalid))
Jacob Keller144ed172016-10-05 09:30:42 -07001415 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001416
1417 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1418
1419 /* modifies the skb - consumes the enet header */
1420 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1421
1422 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1423
1424 skb_record_rx_queue(skb, rx_ring->queue_index);
1425}
1426
1427/**
1428 * i40e_pull_tail - i40e specific version of skb_pull_tail
1429 * @rx_ring: rx descriptor ring packet is being transacted on
1430 * @skb: pointer to current skb being adjusted
1431 *
1432 * This function is an i40e specific version of __pskb_pull_tail. The
1433 * main difference between this version and the original function is that
1434 * this function can make several assumptions about the state of things
1435 * that allow for significant optimizations versus the standard function.
1436 * As a result we can do things like drop a frag and maintain an accurate
1437 * truesize for the skb.
1438 */
1439static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
1440{
1441 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1442 unsigned char *va;
1443 unsigned int pull_len;
1444
1445 /* it is valid to use page_address instead of kmap since we are
1446 * working with pages allocated out of the lomem pool per
1447 * alloc_page(GFP_ATOMIC)
1448 */
1449 va = skb_frag_address(frag);
1450
1451 /* we need the header to contain the greater of either ETH_HLEN or
1452 * 60 bytes if the skb->len is less than 60 for skb_pad.
1453 */
1454 pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
1455
1456 /* align pull length to size of long to optimize memcpy performance */
1457 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1458
1459 /* update all of the pointers */
1460 skb_frag_size_sub(frag, pull_len);
1461 frag->page_offset += pull_len;
1462 skb->data_len -= pull_len;
1463 skb->tail += pull_len;
1464}
1465
1466/**
1467 * i40e_cleanup_headers - Correct empty headers
1468 * @rx_ring: rx descriptor ring packet is being transacted on
1469 * @skb: pointer to current skb being fixed
1470 *
1471 * Also address the case where we are pulling data in on pages only
1472 * and as such no data is present in the skb header.
1473 *
1474 * In addition if skb is not at least 60 bytes we need to pad it so that
1475 * it is large enough to qualify as a valid Ethernet frame.
1476 *
1477 * Returns true if an error was encountered and skb was freed.
1478 **/
1479static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
1480{
1481 /* place header in linear portion of buffer */
1482 if (skb_is_nonlinear(skb))
1483 i40e_pull_tail(rx_ring, skb);
1484
1485 /* if eth_skb_pad returns an error the skb was freed */
1486 if (eth_skb_pad(skb))
1487 return true;
1488
1489 return false;
1490}
1491
1492/**
1493 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1494 * @rx_ring: rx descriptor ring to store buffers on
1495 * @old_buff: donor buffer to have page reused
1496 *
1497 * Synchronizes page for reuse by the adapter
1498 **/
1499static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1500 struct i40e_rx_buffer *old_buff)
1501{
1502 struct i40e_rx_buffer *new_buff;
1503 u16 nta = rx_ring->next_to_alloc;
1504
1505 new_buff = &rx_ring->rx_bi[nta];
1506
1507 /* update, and store next to alloc */
1508 nta++;
1509 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1510
1511 /* transfer page from old buffer to new buffer */
1512 *new_buff = *old_buff;
1513}
1514
1515/**
1516 * i40e_page_is_reserved - check if reuse is possible
1517 * @page: page struct to check
1518 */
1519static inline bool i40e_page_is_reserved(struct page *page)
1520{
1521 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
1522}
1523
1524/**
1525 * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
1526 * @rx_ring: rx descriptor ring to transact packets on
1527 * @rx_buffer: buffer containing page to add
Scott Peterson7987dcd2017-02-09 23:37:28 -08001528 * @size: packet length from rx_desc
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001529 * @skb: sk_buff to place the data into
1530 *
1531 * This function will add the data contained in rx_buffer->page to the skb.
1532 * This is done either through a direct copy if the data in the buffer is
1533 * less than the skb header size, otherwise it will just attach the page as
1534 * a frag to the skb.
1535 *
1536 * The function will then update the page offset if necessary and return
1537 * true if the buffer can be reused by the adapter.
1538 **/
1539static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
1540 struct i40e_rx_buffer *rx_buffer,
Scott Peterson7987dcd2017-02-09 23:37:28 -08001541 unsigned int size,
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001542 struct sk_buff *skb)
1543{
1544 struct page *page = rx_buffer->page;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001545#if (PAGE_SIZE < 8192)
1546 unsigned int truesize = I40E_RXBUFFER_2048;
1547#else
1548 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
1549 unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
1550#endif
1551
1552 /* will the data fit in the skb we allocated? if so, just
1553 * copy it as it is pretty small anyway
1554 */
1555 if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
1556 unsigned char *va = page_address(page) + rx_buffer->page_offset;
1557
1558 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
1559
1560 /* page is not reserved, we can reuse buffer as-is */
1561 if (likely(!i40e_page_is_reserved(page)))
1562 return true;
1563
1564 /* this page cannot be reused so discard it */
1565 __free_pages(page, 0);
1566 return false;
1567 }
1568
1569 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1570 rx_buffer->page_offset, size, truesize);
1571
1572 /* avoid re-using remote pages */
1573 if (unlikely(i40e_page_is_reserved(page)))
1574 return false;
1575
1576#if (PAGE_SIZE < 8192)
1577 /* if we are only owner of page we can reuse it */
1578 if (unlikely(page_count(page) != 1))
1579 return false;
1580
1581 /* flip page offset to other buffer */
1582 rx_buffer->page_offset ^= truesize;
1583#else
1584 /* move offset up to the next cache line */
1585 rx_buffer->page_offset += truesize;
1586
1587 if (rx_buffer->page_offset > last_offset)
1588 return false;
1589#endif
1590
1591 /* Even if we own the page, we are not allowed to use atomic_set()
1592 * This would break get_page_unless_zero() users.
1593 */
1594 get_page(rx_buffer->page);
1595
1596 return true;
1597}
1598
1599/**
1600 * i40e_fetch_rx_buffer - Allocate skb and populate it
1601 * @rx_ring: rx descriptor ring to transact packets on
1602 * @rx_desc: descriptor containing info written by hardware
1603 *
1604 * This function allocates an skb on the fly, and populates it with the page
1605 * data from the current receive descriptor, taking care to set up the skb
1606 * correctly, as well as handling calling the page recycle function if
1607 * necessary.
1608 */
1609static inline
1610struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
1611 union i40e_rx_desc *rx_desc)
1612{
Scott Peterson7987dcd2017-02-09 23:37:28 -08001613 u64 local_status_error_len =
1614 le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1615 unsigned int size =
1616 (local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1617 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001618 struct i40e_rx_buffer *rx_buffer;
1619 struct sk_buff *skb;
1620 struct page *page;
1621
1622 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
1623 page = rx_buffer->page;
1624 prefetchw(page);
1625
1626 skb = rx_buffer->skb;
1627
1628 if (likely(!skb)) {
1629 void *page_addr = page_address(page) + rx_buffer->page_offset;
1630
1631 /* prefetch first cache line of first page */
1632 prefetch(page_addr);
1633#if L1_CACHE_BYTES < 128
1634 prefetch(page_addr + L1_CACHE_BYTES);
1635#endif
1636
1637 /* allocate a skb to store the frags */
1638 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
1639 I40E_RX_HDR_SIZE,
1640 GFP_ATOMIC | __GFP_NOWARN);
1641 if (unlikely(!skb)) {
1642 rx_ring->rx_stats.alloc_buff_failed++;
1643 return NULL;
1644 }
1645
1646 /* we will be copying header into skb->data in
1647 * pskb_may_pull so it is in our interest to prefetch
1648 * it now to avoid a possible cache miss
1649 */
1650 prefetchw(skb->data);
1651 } else {
1652 rx_buffer->skb = NULL;
1653 }
1654
1655 /* we are reusing so sync this buffer for CPU use */
1656 dma_sync_single_range_for_cpu(rx_ring->dev,
1657 rx_buffer->dma,
1658 rx_buffer->page_offset,
Scott Peterson7987dcd2017-02-09 23:37:28 -08001659 size,
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001660 DMA_FROM_DEVICE);
1661
1662 /* pull page into skb */
Scott Peterson7987dcd2017-02-09 23:37:28 -08001663 if (i40e_add_rx_frag(rx_ring, rx_buffer, size, skb)) {
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001664 /* hand second half of page back to the ring */
1665 i40e_reuse_rx_page(rx_ring, rx_buffer);
1666 rx_ring->rx_stats.page_reuse_count++;
1667 } else {
1668 /* we are not reusing the buffer so unmap it */
1669 dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
1670 DMA_FROM_DEVICE);
1671 }
1672
1673 /* clear contents of buffer_info */
1674 rx_buffer->page = NULL;
1675
1676 return skb;
1677}
1678
1679/**
1680 * i40e_is_non_eop - process handling of non-EOP buffers
1681 * @rx_ring: Rx ring being processed
1682 * @rx_desc: Rx descriptor for current buffer
1683 * @skb: Current socket buffer containing buffer in progress
1684 *
1685 * This function updates next to clean. If the buffer is an EOP buffer
1686 * this function exits returning false, otherwise it will place the
1687 * sk_buff in the next buffer to be chained and return true indicating
1688 * that this is in fact a non-EOP buffer.
1689 **/
1690static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
1691 union i40e_rx_desc *rx_desc,
1692 struct sk_buff *skb)
1693{
1694 u32 ntc = rx_ring->next_to_clean + 1;
1695
1696 /* fetch, update, and store next to clean */
1697 ntc = (ntc < rx_ring->count) ? ntc : 0;
1698 rx_ring->next_to_clean = ntc;
1699
1700 prefetch(I40E_RX_DESC(rx_ring, ntc));
1701
1702#define staterrlen rx_desc->wb.qword1.status_error_len
1703 if (unlikely(i40e_rx_is_programming_status(le64_to_cpu(staterrlen)))) {
1704 i40e_clean_programming_status(rx_ring, rx_desc);
1705 rx_ring->rx_bi[ntc].skb = skb;
1706 return true;
1707 }
1708 /* if we are the last buffer then there is nothing else to do */
1709#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
1710 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
1711 return false;
1712
1713 /* place skb in next buffer to be received */
1714 rx_ring->rx_bi[ntc].skb = skb;
1715 rx_ring->rx_stats.non_eop_descs++;
1716
1717 return true;
1718}
1719
1720/**
1721 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1722 * @rx_ring: rx descriptor ring to transact packets on
1723 * @budget: Total limit on number of packets to process
1724 *
1725 * This function provides a "bounce buffer" approach to Rx interrupt
1726 * processing. The advantage to this is that on systems that have
1727 * expensive overhead for IOMMU access this provides a means of avoiding
1728 * it by maintaining the mapping of the page to the system.
1729 *
1730 * Returns amount of work completed
1731 **/
1732static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
Mitch Williamsa132af22015-01-24 09:58:35 +00001733{
1734 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1735 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001736 bool failure = false;
Mitch Williamsa132af22015-01-24 09:58:35 +00001737
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001738 while (likely(total_rx_packets < budget)) {
1739 union i40e_rx_desc *rx_desc;
Mitch Williamsa132af22015-01-24 09:58:35 +00001740 struct sk_buff *skb;
1741 u16 vlan_tag;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001742 u8 rx_ptype;
1743 u64 qword;
1744
Mitch Williamsa132af22015-01-24 09:58:35 +00001745 /* return some buffers to hardware, one at a time is too slow */
1746 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001747 failure = failure ||
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001748 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
Mitch Williamsa132af22015-01-24 09:58:35 +00001749 cleaned_count = 0;
1750 }
1751
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001752 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
1753
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001754 /* status_error_len will always be zero for unused descriptors
1755 * because it's cleared in cleanup, and overlaps with hdr_addr
1756 * which is always zero because packet split isn't used, if the
1757 * hardware wrote DD then it will be non-zero
1758 */
Alexander Duyck99dad8b2016-09-27 11:28:50 -07001759 if (!i40e_test_staterr(rx_desc,
1760 BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001761 break;
1762
Mitch Williamsa132af22015-01-24 09:58:35 +00001763 /* This memory barrier is needed to keep us from reading
1764 * any other fields out of the rx_desc until we know the
1765 * DD bit is set.
1766 */
Alexander Duyck67317162015-04-08 18:49:43 -07001767 dma_rmb();
Mitch Williamsa132af22015-01-24 09:58:35 +00001768
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001769 skb = i40e_fetch_rx_buffer(rx_ring, rx_desc);
1770 if (!skb)
1771 break;
Mitch Williamsa132af22015-01-24 09:58:35 +00001772
Mitch Williamsa132af22015-01-24 09:58:35 +00001773 cleaned_count++;
1774
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001775 if (i40e_is_non_eop(rx_ring, rx_desc, skb))
Mitch Williamsa132af22015-01-24 09:58:35 +00001776 continue;
Mitch Williamsa132af22015-01-24 09:58:35 +00001777
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001778 /* ERR_MASK will only have valid bits if EOP set, and
1779 * what we are doing here is actually checking
1780 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1781 * the error field
1782 */
1783 if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
Mitch Williamsa132af22015-01-24 09:58:35 +00001784 dev_kfree_skb_any(skb);
Mitch Williamsa132af22015-01-24 09:58:35 +00001785 continue;
1786 }
1787
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001788 if (i40e_cleanup_headers(rx_ring, skb))
1789 continue;
Mitch Williamsa132af22015-01-24 09:58:35 +00001790
1791 /* probably a little skewed due to removing CRC */
1792 total_rx_bytes += skb->len;
Mitch Williamsa132af22015-01-24 09:58:35 +00001793
Alexander Duyck99dad8b2016-09-27 11:28:50 -07001794 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1795 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1796 I40E_RXD_QW1_PTYPE_SHIFT;
1797
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001798 /* populate checksum, VLAN, and protocol */
1799 i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
Mitch Williamsa132af22015-01-24 09:58:35 +00001800
Mitch Williamsa132af22015-01-24 09:58:35 +00001801#ifdef I40E_FCOE
Jesse Brandeburg1f15d662016-04-01 03:56:06 -07001802 if (unlikely(
1803 i40e_rx_is_fcoe(rx_ptype) &&
1804 !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) {
Mitch Williamsa132af22015-01-24 09:58:35 +00001805 dev_kfree_skb_any(skb);
1806 continue;
1807 }
1808#endif
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001809
1810 vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
1811 le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
1812
Mitch Williamsa132af22015-01-24 09:58:35 +00001813 i40e_receive_skb(rx_ring, skb, vlan_tag);
1814
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001815 /* update budget accounting */
1816 total_rx_packets++;
1817 }
Mitch Williamsa132af22015-01-24 09:58:35 +00001818
1819 u64_stats_update_begin(&rx_ring->syncp);
1820 rx_ring->stats.packets += total_rx_packets;
1821 rx_ring->stats.bytes += total_rx_bytes;
1822 u64_stats_update_end(&rx_ring->syncp);
1823 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1824 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1825
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001826 /* guarantee a trip back through this routine if there was a failure */
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001827 return failure ? budget : total_rx_packets;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001828}
1829
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001830static u32 i40e_buildreg_itr(const int type, const u16 itr)
1831{
1832 u32 val;
1833
1834 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
Jesse Brandeburg40d72a52016-01-13 16:51:45 -08001835 /* Don't clear PBA because that can cause lost interrupts that
1836 * came in while we were cleaning/polling
1837 */
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001838 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1839 (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
1840
1841 return val;
1842}
1843
1844/* a small macro to shorten up some long lines */
1845#define INTREG I40E_PFINT_DYN_CTLN
Jacob Keller65e87c02016-09-12 14:18:44 -07001846static inline int get_rx_itr_enabled(struct i40e_vsi *vsi, int idx)
1847{
1848 return !!(vsi->rx_rings[idx]->rx_itr_setting);
1849}
1850
1851static inline int get_tx_itr_enabled(struct i40e_vsi *vsi, int idx)
1852{
1853 return !!(vsi->tx_rings[idx]->tx_itr_setting);
1854}
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001855
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001856/**
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001857 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
1858 * @vsi: the VSI we care about
1859 * @q_vector: q_vector for which itr is being updated and interrupt enabled
1860 *
1861 **/
1862static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
1863 struct i40e_q_vector *q_vector)
1864{
1865 struct i40e_hw *hw = &vsi->back->hw;
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001866 bool rx = false, tx = false;
1867 u32 rxval, txval;
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001868 int vector;
Kan Lianga75e8002016-02-19 09:24:04 -05001869 int idx = q_vector->v_idx;
Jacob Keller65e87c02016-09-12 14:18:44 -07001870 int rx_itr_setting, tx_itr_setting;
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001871
1872 vector = (q_vector->v_idx + vsi->base_vector);
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001873
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001874 /* avoid dynamic calculation if in countdown mode OR if
1875 * all dynamic is disabled
1876 */
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001877 rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
1878
Jacob Keller65e87c02016-09-12 14:18:44 -07001879 rx_itr_setting = get_rx_itr_enabled(vsi, idx);
1880 tx_itr_setting = get_tx_itr_enabled(vsi, idx);
1881
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001882 if (q_vector->itr_countdown > 0 ||
Jacob Keller65e87c02016-09-12 14:18:44 -07001883 (!ITR_IS_DYNAMIC(rx_itr_setting) &&
1884 !ITR_IS_DYNAMIC(tx_itr_setting))) {
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001885 goto enable_int;
1886 }
1887
Jacob Keller65e87c02016-09-12 14:18:44 -07001888 if (ITR_IS_DYNAMIC(tx_itr_setting)) {
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001889 rx = i40e_set_new_dynamic_itr(&q_vector->rx);
1890 rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001891 }
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001892
Jacob Keller65e87c02016-09-12 14:18:44 -07001893 if (ITR_IS_DYNAMIC(tx_itr_setting)) {
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001894 tx = i40e_set_new_dynamic_itr(&q_vector->tx);
1895 txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001896 }
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001897
1898 if (rx || tx) {
1899 /* get the higher of the two ITR adjustments and
1900 * use the same value for both ITR registers
1901 * when in adaptive mode (Rx and/or Tx)
1902 */
1903 u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
1904
1905 q_vector->tx.itr = q_vector->rx.itr = itr;
1906 txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
1907 tx = true;
1908 rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
1909 rx = true;
1910 }
1911
1912 /* only need to enable the interrupt once, but need
1913 * to possibly update both ITR values
1914 */
1915 if (rx) {
1916 /* set the INTENA_MSK_MASK so that this first write
1917 * won't actually enable the interrupt, instead just
1918 * updating the ITR (it's bit 31 PF and VF)
1919 */
1920 rxval |= BIT(31);
1921 /* don't check _DOWN because interrupt isn't being enabled */
1922 wr32(hw, INTREG(vector - 1), rxval);
1923 }
1924
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001925enable_int:
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001926 if (!test_bit(__I40E_DOWN, &vsi->state))
1927 wr32(hw, INTREG(vector - 1), txval);
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001928
1929 if (q_vector->itr_countdown)
1930 q_vector->itr_countdown--;
1931 else
1932 q_vector->itr_countdown = ITR_COUNTDOWN_START;
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001933}
1934
1935/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001936 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1937 * @napi: napi struct with our devices info in it
1938 * @budget: amount of work driver is allowed to do this pass, in packets
1939 *
1940 * This function will clean all queues associated with a q_vector.
1941 *
1942 * Returns the amount of work done
1943 **/
1944int i40e_napi_poll(struct napi_struct *napi, int budget)
1945{
1946 struct i40e_q_vector *q_vector =
1947 container_of(napi, struct i40e_q_vector, napi);
1948 struct i40e_vsi *vsi = q_vector->vsi;
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001949 struct i40e_ring *ring;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001950 bool clean_complete = true;
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001951 bool arm_wb = false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001952 int budget_per_ring;
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07001953 int work_done = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001954
1955 if (test_bit(__I40E_DOWN, &vsi->state)) {
1956 napi_complete(napi);
1957 return 0;
1958 }
1959
Kiran Patil9c6c1252015-11-06 15:26:02 -08001960 /* Clear hung_detected bit */
1961 clear_bit(I40E_Q_VECTOR_HUNG_DETECT, &q_vector->hung_detected);
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001962 /* Since the actual Tx work is minimal, we can give the Tx a larger
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001963 * budget and be more aggressive about cleaning up the Tx descriptors.
1964 */
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001965 i40e_for_each_ring(ring, q_vector->tx) {
Alexander Duycka619afe2016-03-07 09:30:03 -08001966 if (!i40e_clean_tx_irq(vsi, ring, budget)) {
Alexander Duyckf2edaaa2016-03-07 09:29:57 -08001967 clean_complete = false;
1968 continue;
1969 }
1970 arm_wb |= ring->arm_wb;
Jesse Brandeburg0deda862015-07-23 16:54:34 -04001971 ring->arm_wb = false;
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001972 }
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001973
Alexander Duyckc67cace2015-09-24 09:04:26 -07001974 /* Handle case where we are called by netpoll with a budget of 0 */
1975 if (budget <= 0)
1976 goto tx_only;
1977
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001978 /* We attempt to distribute budget to each Rx queue fairly, but don't
1979 * allow the budget to go below 1 because that would exit polling early.
1980 */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001981 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001982
Mitch Williamsa132af22015-01-24 09:58:35 +00001983 i40e_for_each_ring(ring, q_vector->rx) {
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001984 int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07001985
1986 work_done += cleaned;
Alexander Duyckf2edaaa2016-03-07 09:29:57 -08001987 /* if we clean as many as budgeted, we must not be done */
1988 if (cleaned >= budget_per_ring)
1989 clean_complete = false;
Mitch Williamsa132af22015-01-24 09:58:35 +00001990 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001991
1992 /* If work not completed, return budget and polling will return */
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001993 if (!clean_complete) {
Alan Brady96db7762016-09-14 16:24:38 -07001994 const cpumask_t *aff_mask = &q_vector->affinity_mask;
1995 int cpu_id = smp_processor_id();
1996
1997 /* It is possible that the interrupt affinity has changed but,
1998 * if the cpu is pegged at 100%, polling will never exit while
1999 * traffic continues and the interrupt will be stuck on this
2000 * cpu. We check to make sure affinity is correct before we
2001 * continue to poll, otherwise we must stop polling so the
2002 * interrupt can move to the correct cpu.
2003 */
2004 if (likely(cpumask_test_cpu(cpu_id, aff_mask) ||
2005 !(vsi->back->flags & I40E_FLAG_MSIX_ENABLED))) {
Alexander Duyckc67cace2015-09-24 09:04:26 -07002006tx_only:
Alan Brady96db7762016-09-14 16:24:38 -07002007 if (arm_wb) {
2008 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2009 i40e_enable_wb_on_itr(vsi, q_vector);
2010 }
2011 return budget;
Anjali Singhai Jain164c9f52015-10-21 19:47:08 -04002012 }
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00002013 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002014
Anjali Singhai Jain8e0764b2015-06-05 12:20:30 -04002015 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
2016 q_vector->arm_wb_state = false;
2017
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002018 /* Work is done so exit the polling mode and re-enable the interrupt */
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07002019 napi_complete_done(napi, work_done);
Alan Brady96db7762016-09-14 16:24:38 -07002020
2021 /* If we're prematurely stopping polling to fix the interrupt
2022 * affinity we want to make sure polling starts back up so we
2023 * issue a call to i40e_force_wb which triggers a SW interrupt.
2024 */
2025 if (!clean_complete)
2026 i40e_force_wb(vsi, q_vector);
2027 else if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED))
Jesse Brandeburg40d72a52016-01-13 16:51:45 -08002028 i40e_irq_dynamic_enable_icr0(vsi->back, false);
Alan Brady96db7762016-09-14 16:24:38 -07002029 else
2030 i40e_update_enable_itr(vsi, q_vector);
2031
Alexander Duyck6beb84a2016-11-08 13:05:16 -08002032 return min(work_done, budget - 1);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002033}
2034
2035/**
2036 * i40e_atr - Add a Flow Director ATR filter
2037 * @tx_ring: ring to add programming descriptor to
2038 * @skb: send buffer
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002039 * @tx_flags: send tx flags
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002040 **/
2041static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002042 u32 tx_flags)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002043{
2044 struct i40e_filter_program_desc *fdir_desc;
2045 struct i40e_pf *pf = tx_ring->vsi->back;
2046 union {
2047 unsigned char *network;
2048 struct iphdr *ipv4;
2049 struct ipv6hdr *ipv6;
2050 } hdr;
2051 struct tcphdr *th;
2052 unsigned int hlen;
2053 u32 flex_ptype, dtype_cmd;
Alexander Duyckffcc55c2016-01-25 19:32:54 -08002054 int l4_proto;
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002055 u16 i;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002056
2057 /* make sure ATR is enabled */
Jesse Brandeburg60ea5f82014-01-17 15:36:34 -08002058 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002059 return;
2060
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00002061 if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
2062 return;
2063
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002064 /* if sampling is disabled do nothing */
2065 if (!tx_ring->atr_sample_rate)
2066 return;
2067
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002068 /* Currently only IPv4/IPv6 with TCP is supported */
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002069 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002070 return;
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002071
Alexander Duyckffcc55c2016-01-25 19:32:54 -08002072 /* snag network header to get L4 type and address */
2073 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2074 skb_inner_network_header(skb) : skb_network_header(skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002075
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002076 /* Note: tx_flags gets modified to reflect inner protocols in
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002077 * tx_enable_csum function if encap is enabled.
2078 */
Alexander Duyckffcc55c2016-01-25 19:32:54 -08002079 if (tx_flags & I40E_TX_FLAGS_IPV4) {
2080 /* access ihl as u8 to avoid unaligned access on ia64 */
2081 hlen = (hdr.network[0] & 0x0F) << 2;
2082 l4_proto = hdr.ipv4->protocol;
2083 } else {
2084 hlen = hdr.network - skb->data;
2085 l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
2086 hlen -= hdr.network - skb->data;
2087 }
2088
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002089 if (l4_proto != IPPROTO_TCP)
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002090 return;
2091
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002092 th = (struct tcphdr *)(hdr.network + hlen);
2093
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00002094 /* Due to lack of space, no more new filters can be programmed */
2095 if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
2096 return;
Anjali Singhai Jain72b74862016-01-08 17:50:21 -08002097 if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
2098 (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) {
Anjali Singhai Jain52eb95e2015-06-05 12:20:33 -04002099 /* HW ATR eviction will take care of removing filters on FIN
2100 * and RST packets.
2101 */
2102 if (th->fin || th->rst)
2103 return;
2104 }
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00002105
2106 tx_ring->atr_count++;
2107
Anjali Singhai Jaince806782014-03-06 08:59:54 +00002108 /* sample on all syn/fin/rst packets or once every atr sample rate */
2109 if (!th->fin &&
2110 !th->syn &&
2111 !th->rst &&
2112 (tx_ring->atr_count < tx_ring->atr_sample_rate))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002113 return;
2114
2115 tx_ring->atr_count = 0;
2116
2117 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002118 i = tx_ring->next_to_use;
2119 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2120
2121 i++;
2122 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002123
2124 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2125 I40E_TXD_FLTR_QW0_QINDEX_MASK;
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002126 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002127 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2128 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2129 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2130 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2131
2132 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2133
2134 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2135
Anjali Singhai Jaince806782014-03-06 08:59:54 +00002136 dtype_cmd |= (th->fin || th->rst) ?
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002137 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2138 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2139 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2140 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2141
2142 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2143 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2144
2145 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2146 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2147
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +00002148 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
Singhai, Anjali6a899022015-12-14 12:21:18 -08002149 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
Anjali Singhai Jain60ccd452015-04-16 20:06:01 -04002150 dtype_cmd |=
2151 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2152 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2153 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2154 else
2155 dtype_cmd |=
2156 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2157 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2158 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +00002159
Anjali Singhai Jain72b74862016-01-08 17:50:21 -08002160 if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
2161 (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)))
Anjali Singhai Jain52eb95e2015-06-05 12:20:33 -04002162 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2163
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002164 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
Jesse Brandeburg99753ea2014-06-04 04:22:49 +00002165 fdir_desc->rsvd = cpu_to_le32(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002166 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
Jesse Brandeburg99753ea2014-06-04 04:22:49 +00002167 fdir_desc->fd_id = cpu_to_le32(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002168}
2169
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002170/**
2171 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2172 * @skb: send buffer
2173 * @tx_ring: ring to send buffer on
2174 * @flags: the tx flags to be set
2175 *
2176 * Checks the skb and set up correspondingly several generic transmit flags
2177 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2178 *
2179 * Returns error code indicate the frame should be dropped upon error and the
2180 * otherwise returns 0 to indicate the flags has been set properly.
2181 **/
Vasu Dev38e00432014-08-01 13:27:03 -07002182#ifdef I40E_FCOE
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002183inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002184 struct i40e_ring *tx_ring,
2185 u32 *flags)
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002186#else
2187static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2188 struct i40e_ring *tx_ring,
2189 u32 *flags)
Vasu Dev38e00432014-08-01 13:27:03 -07002190#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002191{
2192 __be16 protocol = skb->protocol;
2193 u32 tx_flags = 0;
2194
Greg Rose31eaacc2015-03-31 00:45:03 -07002195 if (protocol == htons(ETH_P_8021Q) &&
2196 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2197 /* When HW VLAN acceleration is turned off by the user the
2198 * stack sets the protocol to 8021q so that the driver
2199 * can take any steps required to support the SW only
2200 * VLAN handling. In our case the driver doesn't need
2201 * to take any further steps so just set the protocol
2202 * to the encapsulated ethertype.
2203 */
2204 skb->protocol = vlan_get_protocol(skb);
2205 goto out;
2206 }
2207
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002208 /* if we have a HW VLAN tag being added, default to the HW one */
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01002209 if (skb_vlan_tag_present(skb)) {
2210 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002211 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2212 /* else if it is a SW VLAN, check the next protocol and store the tag */
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00002213 } else if (protocol == htons(ETH_P_8021Q)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002214 struct vlan_hdr *vhdr, _vhdr;
Jesse Brandeburg6995b362015-08-28 17:55:54 -04002215
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002216 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2217 if (!vhdr)
2218 return -EINVAL;
2219
2220 protocol = vhdr->h_vlan_encapsulated_proto;
2221 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2222 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2223 }
2224
Neerav Parikhd40d00b2015-02-24 06:58:40 +00002225 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2226 goto out;
2227
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002228 /* Insert 802.1p priority into VLAN header */
Vasu Dev38e00432014-08-01 13:27:03 -07002229 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2230 (skb->priority != TC_PRIO_CONTROL)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002231 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2232 tx_flags |= (skb->priority & 0x7) <<
2233 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2234 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2235 struct vlan_ethhdr *vhdr;
Francois Romieudd225bc2014-03-30 03:14:48 +00002236 int rc;
2237
2238 rc = skb_cow_head(skb, 0);
2239 if (rc < 0)
2240 return rc;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002241 vhdr = (struct vlan_ethhdr *)skb->data;
2242 vhdr->h_vlan_TCI = htons(tx_flags >>
2243 I40E_TX_FLAGS_VLAN_SHIFT);
2244 } else {
2245 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2246 }
2247 }
Neerav Parikhd40d00b2015-02-24 06:58:40 +00002248
2249out:
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002250 *flags = tx_flags;
2251 return 0;
2252}
2253
2254/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002255 * i40e_tso - set up the tso context descriptor
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002256 * @first: pointer to first Tx buffer for xmit
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002257 * @hdr_len: ptr to the size of the packet header
Shannon Nelson9c883bd2015-10-21 19:47:02 -04002258 * @cd_type_cmd_tso_mss: Quad Word 1
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002259 *
2260 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2261 **/
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002262static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
2263 u64 *cd_type_cmd_tso_mss)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002264{
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002265 struct sk_buff *skb = first->skb;
Alexander Duyck03f9d6a2016-01-24 21:16:20 -08002266 u64 cd_cmd, cd_tso_len, cd_mss;
Alexander Duyckc7770192016-01-24 21:16:35 -08002267 union {
2268 struct iphdr *v4;
2269 struct ipv6hdr *v6;
2270 unsigned char *hdr;
2271 } ip;
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002272 union {
2273 struct tcphdr *tcp;
Alexander Duyck54532052016-01-24 21:17:29 -08002274 struct udphdr *udp;
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002275 unsigned char *hdr;
2276 } l4;
2277 u32 paylen, l4_offset;
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002278 u16 gso_segs, gso_size;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002279 int err;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002280
Shannon Nelsone9f65632016-01-04 10:33:04 -08002281 if (skb->ip_summed != CHECKSUM_PARTIAL)
2282 return 0;
2283
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002284 if (!skb_is_gso(skb))
2285 return 0;
2286
Francois Romieudd225bc2014-03-30 03:14:48 +00002287 err = skb_cow_head(skb, 0);
2288 if (err < 0)
2289 return err;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002290
Alexander Duyckc7770192016-01-24 21:16:35 -08002291 ip.hdr = skb_network_header(skb);
2292 l4.hdr = skb_transport_header(skb);
Anjali Singhaidf230752014-12-19 02:58:16 +00002293
Alexander Duyckc7770192016-01-24 21:16:35 -08002294 /* initialize outer IP header fields */
2295 if (ip.v4->version == 4) {
2296 ip.v4->tot_len = 0;
2297 ip.v4->check = 0;
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002298 } else {
Alexander Duyckc7770192016-01-24 21:16:35 -08002299 ip.v6->payload_len = 0;
2300 }
2301
Alexander Duyck577389a2016-04-02 00:06:56 -07002302 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04002303 SKB_GSO_GRE_CSUM |
Tom Herbert7e133182016-05-18 09:06:10 -07002304 SKB_GSO_IPXIP4 |
Alexander Duyckbf2d1df2016-05-18 10:44:53 -07002305 SKB_GSO_IPXIP6 |
Alexander Duyck577389a2016-04-02 00:06:56 -07002306 SKB_GSO_UDP_TUNNEL |
Alexander Duyck54532052016-01-24 21:17:29 -08002307 SKB_GSO_UDP_TUNNEL_CSUM)) {
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04002308 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2309 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2310 l4.udp->len = 0;
2311
Alexander Duyck54532052016-01-24 21:17:29 -08002312 /* determine offset of outer transport header */
2313 l4_offset = l4.hdr - skb->data;
2314
2315 /* remove payload length from outer checksum */
Alexander Duyck24d41e52016-03-18 16:06:47 -07002316 paylen = skb->len - l4_offset;
2317 csum_replace_by_diff(&l4.udp->check, htonl(paylen));
Alexander Duyck54532052016-01-24 21:17:29 -08002318 }
2319
Alexander Duyckc7770192016-01-24 21:16:35 -08002320 /* reset pointers to inner headers */
2321 ip.hdr = skb_inner_network_header(skb);
2322 l4.hdr = skb_inner_transport_header(skb);
2323
2324 /* initialize inner IP header fields */
2325 if (ip.v4->version == 4) {
2326 ip.v4->tot_len = 0;
2327 ip.v4->check = 0;
2328 } else {
2329 ip.v6->payload_len = 0;
2330 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002331 }
2332
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002333 /* determine offset of inner transport header */
2334 l4_offset = l4.hdr - skb->data;
2335
2336 /* remove payload length from inner checksum */
Alexander Duyck24d41e52016-03-18 16:06:47 -07002337 paylen = skb->len - l4_offset;
2338 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002339
2340 /* compute length of segmentation header */
2341 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002342
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002343 /* pull values out of skb_shinfo */
2344 gso_size = skb_shinfo(skb)->gso_size;
2345 gso_segs = skb_shinfo(skb)->gso_segs;
2346
2347 /* update GSO size and bytecount with header size */
2348 first->gso_segs = gso_segs;
2349 first->bytecount += (first->gso_segs - 1) * *hdr_len;
2350
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002351 /* find the field values */
2352 cd_cmd = I40E_TX_CTX_DESC_TSO;
2353 cd_tso_len = skb->len - *hdr_len;
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002354 cd_mss = gso_size;
Alexander Duyck03f9d6a2016-01-24 21:16:20 -08002355 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2356 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2357 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002358 return 1;
2359}
2360
2361/**
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002362 * i40e_tsyn - set up the tsyn context descriptor
2363 * @tx_ring: ptr to the ring to send
2364 * @skb: ptr to the skb we're sending
2365 * @tx_flags: the collected send information
Shannon Nelson9c883bd2015-10-21 19:47:02 -04002366 * @cd_type_cmd_tso_mss: Quad Word 1
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002367 *
2368 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2369 **/
2370static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2371 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2372{
2373 struct i40e_pf *pf;
2374
2375 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2376 return 0;
2377
2378 /* Tx timestamps cannot be sampled when doing TSO */
2379 if (tx_flags & I40E_TX_FLAGS_TSO)
2380 return 0;
2381
2382 /* only timestamp the outbound packet if the user has requested it and
2383 * we are not already transmitting a packet to be timestamped
2384 */
2385 pf = i40e_netdev_to_pf(tx_ring->netdev);
Jacob Keller22b47772014-12-14 01:55:09 +00002386 if (!(pf->flags & I40E_FLAG_PTP))
2387 return 0;
2388
Jakub Kicinski9ce34f02014-03-15 14:55:42 +00002389 if (pf->ptp_tx &&
2390 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002391 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2392 pf->ptp_tx_skb = skb_get(skb);
2393 } else {
2394 return 0;
2395 }
2396
2397 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
2398 I40E_TXD_CTX_QW1_CMD_SHIFT;
2399
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002400 return 1;
2401}
2402
2403/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002404 * i40e_tx_enable_csum - Enable Tx checksum offloads
2405 * @skb: send buffer
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002406 * @tx_flags: pointer to Tx flags currently set
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002407 * @td_cmd: Tx descriptor command bits to set
2408 * @td_offset: Tx descriptor header offsets to set
Jean Sacren554f4542015-10-13 01:06:28 -06002409 * @tx_ring: Tx descriptor ring
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002410 * @cd_tunneling: ptr to context desc bits
2411 **/
Alexander Duyck529f1f62016-01-24 21:17:10 -08002412static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
2413 u32 *td_cmd, u32 *td_offset,
2414 struct i40e_ring *tx_ring,
2415 u32 *cd_tunneling)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002416{
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002417 union {
2418 struct iphdr *v4;
2419 struct ipv6hdr *v6;
2420 unsigned char *hdr;
2421 } ip;
2422 union {
2423 struct tcphdr *tcp;
2424 struct udphdr *udp;
2425 unsigned char *hdr;
2426 } l4;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002427 unsigned char *exthdr;
Jesse Brandeburgd1bd7432016-04-01 03:56:04 -07002428 u32 offset, cmd = 0;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002429 __be16 frag_off;
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002430 u8 l4_proto = 0;
2431
Alexander Duyck529f1f62016-01-24 21:17:10 -08002432 if (skb->ip_summed != CHECKSUM_PARTIAL)
2433 return 0;
2434
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002435 ip.hdr = skb_network_header(skb);
2436 l4.hdr = skb_transport_header(skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002437
Alexander Duyck475b4202016-01-24 21:17:01 -08002438 /* compute outer L2 header size */
2439 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
2440
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002441 if (skb->encapsulation) {
Jesse Brandeburgd1bd7432016-04-01 03:56:04 -07002442 u32 tunnel = 0;
Alexander Duycka0064722016-01-24 21:16:48 -08002443 /* define outer network header type */
2444 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
Alexander Duyck475b4202016-01-24 21:17:01 -08002445 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
2446 I40E_TX_CTX_EXT_IP_IPV4 :
2447 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2448
Alexander Duycka0064722016-01-24 21:16:48 -08002449 l4_proto = ip.v4->protocol;
2450 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
Alexander Duyck475b4202016-01-24 21:17:01 -08002451 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002452
2453 exthdr = ip.hdr + sizeof(*ip.v6);
Alexander Duycka0064722016-01-24 21:16:48 -08002454 l4_proto = ip.v6->nexthdr;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002455 if (l4.hdr != exthdr)
2456 ipv6_skip_exthdr(skb, exthdr - skb->data,
2457 &l4_proto, &frag_off);
Alexander Duycka0064722016-01-24 21:16:48 -08002458 }
2459
2460 /* define outer transport */
2461 switch (l4_proto) {
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002462 case IPPROTO_UDP:
Alexander Duyck475b4202016-01-24 21:17:01 -08002463 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
Singhai, Anjali6a899022015-12-14 12:21:18 -08002464 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002465 break;
Shannon Nelsonc1d17912015-09-25 19:26:04 +00002466 case IPPROTO_GRE:
Alexander Duyck475b4202016-01-24 21:17:01 -08002467 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
Alexander Duycka0064722016-01-24 21:16:48 -08002468 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
Shannon Nelsonc1d17912015-09-25 19:26:04 +00002469 break;
Alexander Duyck577389a2016-04-02 00:06:56 -07002470 case IPPROTO_IPIP:
2471 case IPPROTO_IPV6:
2472 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2473 l4.hdr = skb_inner_network_header(skb);
2474 break;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002475 default:
Alexander Duyck529f1f62016-01-24 21:17:10 -08002476 if (*tx_flags & I40E_TX_FLAGS_TSO)
2477 return -1;
2478
2479 skb_checksum_help(skb);
2480 return 0;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002481 }
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002482
Alexander Duyck577389a2016-04-02 00:06:56 -07002483 /* compute outer L3 header size */
2484 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
2485 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
2486
2487 /* switch IP header pointer from outer to inner header */
2488 ip.hdr = skb_inner_network_header(skb);
2489
Alexander Duyck475b4202016-01-24 21:17:01 -08002490 /* compute tunnel header size */
2491 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
2492 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
2493
Alexander Duyck54532052016-01-24 21:17:29 -08002494 /* indicate if we need to offload outer UDP header */
2495 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04002496 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
Alexander Duyck54532052016-01-24 21:17:29 -08002497 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
2498 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2499
Alexander Duyck475b4202016-01-24 21:17:01 -08002500 /* record tunnel offload values */
2501 *cd_tunneling |= tunnel;
2502
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002503 /* switch L4 header pointer from outer to inner */
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002504 l4.hdr = skb_inner_transport_header(skb);
Alexander Duycka0064722016-01-24 21:16:48 -08002505 l4_proto = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002506
Alexander Duycka0064722016-01-24 21:16:48 -08002507 /* reset type as we transition from outer to inner headers */
2508 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
2509 if (ip.v4->version == 4)
2510 *tx_flags |= I40E_TX_FLAGS_IPV4;
2511 if (ip.v6->version == 6)
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002512 *tx_flags |= I40E_TX_FLAGS_IPV6;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002513 }
2514
2515 /* Enable IP checksum offloads */
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002516 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002517 l4_proto = ip.v4->protocol;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002518 /* the stack computes the IP header already, the only time we
2519 * need the hardware to recompute it is in the case of TSO.
2520 */
Alexander Duyck475b4202016-01-24 21:17:01 -08002521 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
2522 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
2523 I40E_TX_DESC_CMD_IIPT_IPV4;
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002524 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
Alexander Duyck475b4202016-01-24 21:17:01 -08002525 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002526
2527 exthdr = ip.hdr + sizeof(*ip.v6);
2528 l4_proto = ip.v6->nexthdr;
2529 if (l4.hdr != exthdr)
2530 ipv6_skip_exthdr(skb, exthdr - skb->data,
2531 &l4_proto, &frag_off);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002532 }
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002533
Alexander Duyck475b4202016-01-24 21:17:01 -08002534 /* compute inner L3 header size */
2535 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002536
2537 /* Enable L4 checksum offloads */
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002538 switch (l4_proto) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002539 case IPPROTO_TCP:
2540 /* enable checksum offloads */
Alexander Duyck475b4202016-01-24 21:17:01 -08002541 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
2542 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002543 break;
2544 case IPPROTO_SCTP:
2545 /* enable SCTP checksum offload */
Alexander Duyck475b4202016-01-24 21:17:01 -08002546 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
2547 offset |= (sizeof(struct sctphdr) >> 2) <<
2548 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002549 break;
2550 case IPPROTO_UDP:
2551 /* enable UDP checksum offload */
Alexander Duyck475b4202016-01-24 21:17:01 -08002552 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
2553 offset |= (sizeof(struct udphdr) >> 2) <<
2554 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002555 break;
2556 default:
Alexander Duyck529f1f62016-01-24 21:17:10 -08002557 if (*tx_flags & I40E_TX_FLAGS_TSO)
2558 return -1;
2559 skb_checksum_help(skb);
2560 return 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002561 }
Alexander Duyck475b4202016-01-24 21:17:01 -08002562
2563 *td_cmd |= cmd;
2564 *td_offset |= offset;
Alexander Duyck529f1f62016-01-24 21:17:10 -08002565
2566 return 1;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002567}
2568
2569/**
2570 * i40e_create_tx_ctx Build the Tx context descriptor
2571 * @tx_ring: ring to create the descriptor on
2572 * @cd_type_cmd_tso_mss: Quad Word 1
2573 * @cd_tunneling: Quad Word 0 - bits 0-31
2574 * @cd_l2tag2: Quad Word 0 - bits 32-63
2575 **/
2576static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
2577 const u64 cd_type_cmd_tso_mss,
2578 const u32 cd_tunneling, const u32 cd_l2tag2)
2579{
2580 struct i40e_tx_context_desc *context_desc;
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002581 int i = tx_ring->next_to_use;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002582
Jesse Brandeburgff40dd52014-02-14 02:14:41 +00002583 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
2584 !cd_tunneling && !cd_l2tag2)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002585 return;
2586
2587 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002588 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
2589
2590 i++;
2591 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002592
2593 /* cpu_to_le32 and assign to struct fields */
2594 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2595 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
Jesse Brandeburg3efbbb22014-06-04 20:41:54 +00002596 context_desc->rsvd = cpu_to_le16(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002597 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2598}
2599
2600/**
Eric Dumazet4567dc12014-10-07 13:30:23 -07002601 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2602 * @tx_ring: the ring to be checked
2603 * @size: the size buffer we want to assure is available
2604 *
2605 * Returns -EBUSY if a stop is needed, else 0
2606 **/
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002607int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
Eric Dumazet4567dc12014-10-07 13:30:23 -07002608{
2609 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2610 /* Memory barrier before checking head and tail */
2611 smp_mb();
2612
2613 /* Check again in a case another CPU has just made room available. */
2614 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2615 return -EBUSY;
2616
2617 /* A reprieve! - use start_queue because it doesn't call schedule */
2618 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2619 ++tx_ring->tx_stats.restart_queue;
2620 return 0;
2621}
2622
2623/**
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002624 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
Anjali Singhai71da6192015-02-21 06:42:35 +00002625 * @skb: send buffer
Anjali Singhai71da6192015-02-21 06:42:35 +00002626 *
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002627 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
2628 * and so we need to figure out the cases where we need to linearize the skb.
2629 *
2630 * For TSO we need to count the TSO header and segment payload separately.
2631 * As such we need to check cases where we have 7 fragments or more as we
2632 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2633 * the segment payload in the first descriptor, and another 7 for the
2634 * fragments.
Anjali Singhai71da6192015-02-21 06:42:35 +00002635 **/
Alexander Duyck2d374902016-02-17 11:02:50 -08002636bool __i40e_chk_linearize(struct sk_buff *skb)
Anjali Singhai71da6192015-02-21 06:42:35 +00002637{
Alexander Duyck2d374902016-02-17 11:02:50 -08002638 const struct skb_frag_struct *frag, *stale;
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002639 int nr_frags, sum;
Anjali Singhai71da6192015-02-21 06:42:35 +00002640
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002641 /* no need to check if number of frags is less than 7 */
Alexander Duyck2d374902016-02-17 11:02:50 -08002642 nr_frags = skb_shinfo(skb)->nr_frags;
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002643 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
Alexander Duyck2d374902016-02-17 11:02:50 -08002644 return false;
Anjali Singhai71da6192015-02-21 06:42:35 +00002645
Alexander Duyck2d374902016-02-17 11:02:50 -08002646 /* We need to walk through the list and validate that each group
Alexander Duyck841493a2016-09-06 18:05:04 -07002647 * of 6 fragments totals at least gso_size.
Alexander Duyck2d374902016-02-17 11:02:50 -08002648 */
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002649 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
Alexander Duyck2d374902016-02-17 11:02:50 -08002650 frag = &skb_shinfo(skb)->frags[0];
2651
2652 /* Initialize size to the negative value of gso_size minus 1. We
2653 * use this as the worst case scenerio in which the frag ahead
2654 * of us only provides one byte which is why we are limited to 6
2655 * descriptors for a single transmit as the header and previous
2656 * fragment are already consuming 2 descriptors.
2657 */
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002658 sum = 1 - skb_shinfo(skb)->gso_size;
Alexander Duyck2d374902016-02-17 11:02:50 -08002659
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002660 /* Add size of frags 0 through 4 to create our initial sum */
2661 sum += skb_frag_size(frag++);
2662 sum += skb_frag_size(frag++);
2663 sum += skb_frag_size(frag++);
2664 sum += skb_frag_size(frag++);
2665 sum += skb_frag_size(frag++);
Alexander Duyck2d374902016-02-17 11:02:50 -08002666
2667 /* Walk through fragments adding latest fragment, testing it, and
2668 * then removing stale fragments from the sum.
2669 */
2670 stale = &skb_shinfo(skb)->frags[0];
2671 for (;;) {
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002672 sum += skb_frag_size(frag++);
Alexander Duyck2d374902016-02-17 11:02:50 -08002673
2674 /* if sum is negative we failed to make sufficient progress */
2675 if (sum < 0)
2676 return true;
2677
Alexander Duyck841493a2016-09-06 18:05:04 -07002678 if (!nr_frags--)
Alexander Duyck2d374902016-02-17 11:02:50 -08002679 break;
2680
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002681 sum -= skb_frag_size(stale++);
Anjali Singhai71da6192015-02-21 06:42:35 +00002682 }
2683
Alexander Duyck2d374902016-02-17 11:02:50 -08002684 return false;
Anjali Singhai71da6192015-02-21 06:42:35 +00002685}
2686
2687/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002688 * i40e_tx_map - Build the Tx descriptor
2689 * @tx_ring: ring to send buffer on
2690 * @skb: send buffer
2691 * @first: first buffer info buffer to use
2692 * @tx_flags: collected send information
2693 * @hdr_len: size of the packet header
2694 * @td_cmd: the command field in the descriptor
2695 * @td_offset: offset for checksum or crc
2696 **/
Vasu Dev38e00432014-08-01 13:27:03 -07002697#ifdef I40E_FCOE
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002698inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002699 struct i40e_tx_buffer *first, u32 tx_flags,
2700 const u8 hdr_len, u32 td_cmd, u32 td_offset)
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002701#else
2702static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2703 struct i40e_tx_buffer *first, u32 tx_flags,
2704 const u8 hdr_len, u32 td_cmd, u32 td_offset)
Vasu Dev38e00432014-08-01 13:27:03 -07002705#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002706{
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002707 unsigned int data_len = skb->data_len;
2708 unsigned int size = skb_headlen(skb);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002709 struct skb_frag_struct *frag;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002710 struct i40e_tx_buffer *tx_bi;
2711 struct i40e_tx_desc *tx_desc;
Alexander Duycka5e9c572013-09-28 06:00:27 +00002712 u16 i = tx_ring->next_to_use;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002713 u32 td_tag = 0;
2714 dma_addr_t dma;
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002715 u16 desc_count = 1;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002716
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002717 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
2718 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
2719 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
2720 I40E_TX_FLAGS_VLAN_SHIFT;
2721 }
2722
Alexander Duycka5e9c572013-09-28 06:00:27 +00002723 first->tx_flags = tx_flags;
2724
2725 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2726
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002727 tx_desc = I40E_TX_DESC(tx_ring, i);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002728 tx_bi = first;
2729
2730 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002731 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
2732
Alexander Duycka5e9c572013-09-28 06:00:27 +00002733 if (dma_mapping_error(tx_ring->dev, dma))
2734 goto dma_error;
2735
2736 /* record length, and DMA address */
2737 dma_unmap_len_set(tx_bi, len, size);
2738 dma_unmap_addr_set(tx_bi, dma, dma);
2739
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002740 /* align size to end of page */
2741 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002742 tx_desc->buffer_addr = cpu_to_le64(dma);
2743
2744 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002745 tx_desc->cmd_type_offset_bsz =
2746 build_ctob(td_cmd, td_offset,
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002747 max_data, td_tag);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002748
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002749 tx_desc++;
2750 i++;
Anjali Singhai58044742015-09-25 18:26:13 -07002751 desc_count++;
2752
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002753 if (i == tx_ring->count) {
2754 tx_desc = I40E_TX_DESC(tx_ring, 0);
2755 i = 0;
2756 }
Alexander Duycka5e9c572013-09-28 06:00:27 +00002757
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002758 dma += max_data;
2759 size -= max_data;
Alexander Duycka5e9c572013-09-28 06:00:27 +00002760
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002761 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
Alexander Duycka5e9c572013-09-28 06:00:27 +00002762 tx_desc->buffer_addr = cpu_to_le64(dma);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002763 }
2764
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002765 if (likely(!data_len))
2766 break;
2767
Alexander Duycka5e9c572013-09-28 06:00:27 +00002768 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2769 size, td_tag);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002770
2771 tx_desc++;
2772 i++;
Anjali Singhai58044742015-09-25 18:26:13 -07002773 desc_count++;
2774
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002775 if (i == tx_ring->count) {
2776 tx_desc = I40E_TX_DESC(tx_ring, 0);
2777 i = 0;
2778 }
2779
Alexander Duycka5e9c572013-09-28 06:00:27 +00002780 size = skb_frag_size(frag);
2781 data_len -= size;
2782
2783 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2784 DMA_TO_DEVICE);
2785
2786 tx_bi = &tx_ring->tx_bi[i];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002787 }
2788
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002789 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002790
2791 i++;
2792 if (i == tx_ring->count)
2793 i = 0;
2794
2795 tx_ring->next_to_use = i;
2796
Eric Dumazet4567dc12014-10-07 13:30:23 -07002797 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
Anjali Singhai58044742015-09-25 18:26:13 -07002798
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002799 /* write last descriptor with EOP bit */
2800 td_cmd |= I40E_TX_DESC_CMD_EOP;
2801
2802 /* We can OR these values together as they both are checked against
2803 * 4 below and at this point desc_count will be used as a boolean value
2804 * after this if/else block.
2805 */
2806 desc_count |= ++tx_ring->packet_stride;
2807
Anjali Singhai58044742015-09-25 18:26:13 -07002808 /* Algorithm to optimize tail and RS bit setting:
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002809 * if queue is stopped
2810 * mark RS bit
2811 * reset packet counter
2812 * else if xmit_more is supported and is true
2813 * advance packet counter to 4
2814 * reset desc_count to 0
Anjali Singhai58044742015-09-25 18:26:13 -07002815 *
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002816 * if desc_count >= 4
2817 * mark RS bit
2818 * reset packet counter
2819 * if desc_count > 0
2820 * update tail
Anjali Singhai58044742015-09-25 18:26:13 -07002821 *
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002822 * Note: If there are less than 4 descriptors
Anjali Singhai58044742015-09-25 18:26:13 -07002823 * pending and interrupts were disabled the service task will
2824 * trigger a force WB.
2825 */
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002826 if (netif_xmit_stopped(txring_txq(tx_ring))) {
2827 goto do_rs;
2828 } else if (skb->xmit_more) {
2829 /* set stride to arm on next packet and reset desc_count */
2830 tx_ring->packet_stride = WB_STRIDE;
2831 desc_count = 0;
2832 } else if (desc_count >= WB_STRIDE) {
2833do_rs:
2834 /* write last descriptor with RS bit set */
2835 td_cmd |= I40E_TX_DESC_CMD_RS;
Anjali Singhai58044742015-09-25 18:26:13 -07002836 tx_ring->packet_stride = 0;
Anjali Singhai58044742015-09-25 18:26:13 -07002837 }
Anjali Singhai58044742015-09-25 18:26:13 -07002838
2839 tx_desc->cmd_type_offset_bsz =
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002840 build_ctob(td_cmd, td_offset, size, td_tag);
2841
2842 /* Force memory writes to complete before letting h/w know there
2843 * are new descriptors to fetch.
2844 *
2845 * We also use this memory barrier to make certain all of the
2846 * status bits have been updated before next_to_watch is written.
2847 */
2848 wmb();
2849
2850 /* set next_to_watch value indicating a packet is present */
2851 first->next_to_watch = tx_desc;
Anjali Singhai58044742015-09-25 18:26:13 -07002852
Alexander Duycka5e9c572013-09-28 06:00:27 +00002853 /* notify HW of packet */
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002854 if (desc_count) {
Anjali Singhai58044742015-09-25 18:26:13 -07002855 writel(i, tx_ring->tail);
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002856
2857 /* we need this if more than one processor can write to our tail
2858 * at a time, it synchronizes IO on IA64/Altix systems
2859 */
2860 mmiowb();
Anjali Singhai58044742015-09-25 18:26:13 -07002861 }
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002862
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002863 return;
2864
2865dma_error:
Alexander Duycka5e9c572013-09-28 06:00:27 +00002866 dev_info(tx_ring->dev, "TX DMA map failed\n");
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002867
2868 /* clear dma mappings for failed tx_bi map */
2869 for (;;) {
2870 tx_bi = &tx_ring->tx_bi[i];
Alexander Duycka5e9c572013-09-28 06:00:27 +00002871 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002872 if (tx_bi == first)
2873 break;
2874 if (i == 0)
2875 i = tx_ring->count;
2876 i--;
2877 }
2878
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002879 tx_ring->next_to_use = i;
2880}
2881
2882/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002883 * i40e_xmit_frame_ring - Sends buffer on Tx ring
2884 * @skb: send buffer
2885 * @tx_ring: ring to send buffer on
2886 *
2887 * Returns NETDEV_TX_OK if sent, else an error code
2888 **/
2889static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2890 struct i40e_ring *tx_ring)
2891{
2892 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
2893 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2894 struct i40e_tx_buffer *first;
2895 u32 td_offset = 0;
2896 u32 tx_flags = 0;
2897 __be16 protocol;
2898 u32 td_cmd = 0;
2899 u8 hdr_len = 0;
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002900 int tso, count;
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002901 int tsyn;
Jesse Brandeburg6995b362015-08-28 17:55:54 -04002902
Jesse Brandeburgb74118f2015-10-26 19:44:30 -04002903 /* prefetch the data, we'll need it later */
2904 prefetch(skb->data);
2905
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002906 count = i40e_xmit_descriptor_count(skb);
Alexander Duyck2d374902016-02-17 11:02:50 -08002907 if (i40e_chk_linearize(skb, count)) {
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002908 if (__skb_linearize(skb)) {
2909 dev_kfree_skb_any(skb);
2910 return NETDEV_TX_OK;
2911 }
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002912 count = i40e_txd_use_count(skb->len);
Alexander Duyck2d374902016-02-17 11:02:50 -08002913 tx_ring->tx_stats.tx_linearize++;
2914 }
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002915
2916 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2917 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
2918 * + 4 desc gap to avoid the cache line where head is,
2919 * + 1 desc for context descriptor,
2920 * otherwise try next time
2921 */
2922 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2923 tx_ring->tx_stats.tx_busy++;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002924 return NETDEV_TX_BUSY;
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002925 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002926
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002927 /* record the location of the first descriptor for this packet */
2928 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2929 first->skb = skb;
2930 first->bytecount = skb->len;
2931 first->gso_segs = 1;
2932
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002933 /* prepare the xmit flags */
2934 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2935 goto out_drop;
2936
2937 /* obtain protocol of skb */
Vlad Yasevich3d34dd02014-08-25 10:34:52 -04002938 protocol = vlan_get_protocol(skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002939
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002940 /* setup IPv4/IPv6 offloads */
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00002941 if (protocol == htons(ETH_P_IP))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002942 tx_flags |= I40E_TX_FLAGS_IPV4;
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00002943 else if (protocol == htons(ETH_P_IPV6))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002944 tx_flags |= I40E_TX_FLAGS_IPV6;
2945
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002946 tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002947
2948 if (tso < 0)
2949 goto out_drop;
2950 else if (tso)
2951 tx_flags |= I40E_TX_FLAGS_TSO;
2952
Alexander Duyck3bc67972016-02-17 11:02:56 -08002953 /* Always offload the checksum, since it's in the data descriptor */
2954 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2955 tx_ring, &cd_tunneling);
2956 if (tso < 0)
2957 goto out_drop;
2958
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002959 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
2960
2961 if (tsyn)
2962 tx_flags |= I40E_TX_FLAGS_TSYN;
2963
Jakub Kicinski259afec2014-03-15 14:55:37 +00002964 skb_tx_timestamp(skb);
2965
Alexander Duyckb1941302013-09-28 06:00:32 +00002966 /* always enable CRC insertion offload */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002967 td_cmd |= I40E_TX_DESC_CMD_ICRC;
2968
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002969 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2970 cd_tunneling, cd_l2tag2);
2971
2972 /* Add Flow Director ATR if it's enabled.
2973 *
2974 * NOTE: this must always be directly before the data descriptor.
2975 */
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002976 i40e_atr(tx_ring, skb, tx_flags);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002977
2978 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2979 td_cmd, td_offset);
2980
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002981 return NETDEV_TX_OK;
2982
2983out_drop:
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002984 dev_kfree_skb_any(first->skb);
2985 first->skb = NULL;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002986 return NETDEV_TX_OK;
2987}
2988
2989/**
2990 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2991 * @skb: send buffer
2992 * @netdev: network interface device structure
2993 *
2994 * Returns NETDEV_TX_OK if sent, else an error code
2995 **/
2996netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2997{
2998 struct i40e_netdev_priv *np = netdev_priv(netdev);
2999 struct i40e_vsi *vsi = np->vsi;
Alexander Duyck9f65e152013-09-28 06:00:58 +00003000 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003001
3002 /* hardware can't handle really short frames, hardware padding works
3003 * beyond this point
3004 */
Alexander Duycka94d9e22014-12-03 08:17:39 -08003005 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
3006 return NETDEV_TX_OK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003007
3008 return i40e_xmit_frame_ring(skb, tx_ring);
3009}