blob: 1202b4c74da76e19b779783d9bdc94401dd6998e [file] [log] [blame]
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -08004 * Copyright(c) 2013 - 2016 Intel Corporation.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
Greg Rosedc641b72013-12-18 13:45:51 +000015 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000017 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
Mitch Williams1c112a62014-04-04 04:43:06 +000027#include <linux/prefetch.h>
Mitch Williamsa132af22015-01-24 09:58:35 +000028#include <net/busy_poll.h>
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000029#include "i40e.h"
Jesse Brandeburg206812b2014-02-12 01:45:33 +000030#include "i40e_prototype.h"
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000031
32static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
33 u32 td_tag)
34{
35 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
36 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
37 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
38 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
39 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
40}
41
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +000042#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
Alexander Duyck5e02f282016-09-12 14:18:41 -070043/**
44 * i40e_fdir - Generate a Flow Director descriptor based on fdata
45 * @tx_ring: Tx ring to send buffer on
46 * @fdata: Flow director filter data
47 * @add: Indicate if we are adding a rule or deleting one
48 *
49 **/
50static void i40e_fdir(struct i40e_ring *tx_ring,
51 struct i40e_fdir_filter *fdata, bool add)
52{
53 struct i40e_filter_program_desc *fdir_desc;
54 struct i40e_pf *pf = tx_ring->vsi->back;
55 u32 flex_ptype, dtype_cmd;
56 u16 i;
57
58 /* grab the next descriptor */
59 i = tx_ring->next_to_use;
60 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
61
62 i++;
63 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
64
65 flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
66 (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
67
68 flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
69 (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
70
71 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
72 (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
73
74 /* Use LAN VSI Id if not programmed by user */
75 flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
76 ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
77 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
78
79 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
80
81 dtype_cmd |= add ?
82 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
83 I40E_TXD_FLTR_QW1_PCMD_SHIFT :
84 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
85 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
86
87 dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
88 (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
89
90 dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
91 (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
92
93 if (fdata->cnt_index) {
94 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
95 dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
96 ((u32)fdata->cnt_index <<
97 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
98 }
99
100 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
101 fdir_desc->rsvd = cpu_to_le32(0);
102 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
103 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
104}
105
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000106#define I40E_FD_CLEAN_DELAY 10
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000107/**
108 * i40e_program_fdir_filter - Program a Flow Director filter
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000109 * @fdir_data: Packet data that will be filter parameters
110 * @raw_packet: the pre-allocated packet buffer for FDir
Jeff Kirsherb40c82e62015-02-27 09:18:34 +0000111 * @pf: The PF pointer
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000112 * @add: True for add/update, False for remove
113 **/
Alexander Duyck1eb846a2016-09-12 14:18:42 -0700114static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
115 u8 *raw_packet, struct i40e_pf *pf,
116 bool add)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000117{
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000118 struct i40e_tx_buffer *tx_buf, *first;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000119 struct i40e_tx_desc *tx_desc;
120 struct i40e_ring *tx_ring;
121 struct i40e_vsi *vsi;
122 struct device *dev;
123 dma_addr_t dma;
124 u32 td_cmd = 0;
125 u16 i;
126
127 /* find existing FDIR VSI */
Alexander Duyck4b816442016-10-11 15:26:53 -0700128 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000129 if (!vsi)
130 return -ENOENT;
131
Alexander Duyck9f65e152013-09-28 06:00:58 +0000132 tx_ring = vsi->tx_rings[0];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000133 dev = tx_ring->dev;
134
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000135 /* we need two descriptors to add/del a filter and we can wait */
Alexander Duycked245402016-09-14 16:24:32 -0700136 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
137 if (!i)
138 return -EAGAIN;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000139 msleep_interruptible(1);
Alexander Duycked245402016-09-14 16:24:32 -0700140 }
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000141
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000142 dma = dma_map_single(dev, raw_packet,
143 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000144 if (dma_mapping_error(dev, dma))
145 goto dma_fail;
146
147 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000148 i = tx_ring->next_to_use;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000149 first = &tx_ring->tx_bi[i];
Alexander Duyck5e02f282016-09-12 14:18:41 -0700150 i40e_fdir(tx_ring, fdir_data, add);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000151
152 /* Now program a dummy descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000153 i = tx_ring->next_to_use;
154 tx_desc = I40E_TX_DESC(tx_ring, i);
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000155 tx_buf = &tx_ring->tx_bi[i];
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000156
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000157 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
158
159 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000160
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000161 /* record length, and DMA address */
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000162 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000163 dma_unmap_addr_set(tx_buf, dma, dma);
164
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000165 tx_desc->buffer_addr = cpu_to_le64(dma);
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000166 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000167
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000168 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
169 tx_buf->raw_buf = (void *)raw_packet;
170
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000171 tx_desc->cmd_type_offset_bsz =
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000172 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000173
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000174 /* Force memory writes to complete before letting h/w
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000175 * know there are new descriptors to fetch.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000176 */
177 wmb();
178
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000179 /* Mark the data descriptor to be watched */
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000180 first->next_to_watch = tx_desc;
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000181
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000182 writel(tx_ring->next_to_use, tx_ring->tail);
183 return 0;
184
185dma_fail:
186 return -1;
187}
188
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000189#define IP_HEADER_OFFSET 14
190#define I40E_UDPIP_DUMMY_PACKET_LEN 42
191/**
192 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
193 * @vsi: pointer to the targeted VSI
194 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000195 * @add: true adds a filter, false removes it
196 *
197 * Returns 0 if the filters were successfully added or removed
198 **/
199static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
200 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000201 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000202{
203 struct i40e_pf *pf = vsi->back;
204 struct udphdr *udp;
205 struct iphdr *ip;
206 bool err = false;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000207 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000208 int ret;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000209 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
210 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
211 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
212
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000213 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
214 if (!raw_packet)
215 return -ENOMEM;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000216 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
217
218 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
219 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
220 + sizeof(struct iphdr));
221
222 ip->daddr = fd_data->dst_ip[0];
223 udp->dest = fd_data->dst_port;
224 ip->saddr = fd_data->src_ip[0];
225 udp->source = fd_data->src_port;
226
Kevin Scottb2d36c02014-04-09 05:58:59 +0000227 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
228 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
229 if (ret) {
230 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000231 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
232 fd_data->pctype, fd_data->fd_id, ret);
Kevin Scottb2d36c02014-04-09 05:58:59 +0000233 err = true;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000234 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000235 if (add)
236 dev_info(&pf->pdev->dev,
237 "Filter OK for PCTYPE %d loc = %d\n",
238 fd_data->pctype, fd_data->fd_id);
239 else
240 dev_info(&pf->pdev->dev,
241 "Filter deleted for PCTYPE %d loc = %d\n",
242 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000243 }
Kiran Patila42e7a32015-11-06 15:26:03 -0800244 if (err)
245 kfree(raw_packet);
246
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000247 return err ? -EOPNOTSUPP : 0;
248}
249
250#define I40E_TCPIP_DUMMY_PACKET_LEN 54
251/**
252 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
253 * @vsi: pointer to the targeted VSI
254 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000255 * @add: true adds a filter, false removes it
256 *
257 * Returns 0 if the filters were successfully added or removed
258 **/
259static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
260 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000261 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000262{
263 struct i40e_pf *pf = vsi->back;
264 struct tcphdr *tcp;
265 struct iphdr *ip;
266 bool err = false;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000267 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000268 int ret;
269 /* Dummy packet */
270 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
271 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
272 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
273 0x0, 0x72, 0, 0, 0, 0};
274
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000275 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
276 if (!raw_packet)
277 return -ENOMEM;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000278 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
279
280 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
281 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
282 + sizeof(struct iphdr));
283
284 ip->daddr = fd_data->dst_ip[0];
285 tcp->dest = fd_data->dst_port;
286 ip->saddr = fd_data->src_ip[0];
287 tcp->source = fd_data->src_port;
288
289 if (add) {
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000290 pf->fd_tcp_rule++;
Jacob Keller234dc4e2016-09-06 18:05:09 -0700291 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
292 I40E_DEBUG_FD & pf->hw.debug_mask)
293 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
294 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000295 } else {
296 pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
297 (pf->fd_tcp_rule - 1) : 0;
298 if (pf->fd_tcp_rule == 0) {
Jacob Keller234dc4e2016-09-06 18:05:09 -0700299 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
300 I40E_DEBUG_FD & pf->hw.debug_mask)
Anjali Singhai Jain2e4875e2015-04-16 20:06:06 -0400301 dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
Jacob Keller234dc4e2016-09-06 18:05:09 -0700302 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000303 }
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000304 }
305
Kevin Scottb2d36c02014-04-09 05:58:59 +0000306 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000307 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
308
309 if (ret) {
310 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000311 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
312 fd_data->pctype, fd_data->fd_id, ret);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000313 err = true;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000314 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000315 if (add)
316 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
317 fd_data->pctype, fd_data->fd_id);
318 else
319 dev_info(&pf->pdev->dev,
320 "Filter deleted for PCTYPE %d loc = %d\n",
321 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000322 }
323
Kiran Patila42e7a32015-11-06 15:26:03 -0800324 if (err)
325 kfree(raw_packet);
326
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000327 return err ? -EOPNOTSUPP : 0;
328}
329
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000330#define I40E_IP_DUMMY_PACKET_LEN 34
331/**
332 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
333 * a specific flow spec
334 * @vsi: pointer to the targeted VSI
335 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000336 * @add: true adds a filter, false removes it
337 *
338 * Returns 0 if the filters were successfully added or removed
339 **/
340static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
341 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000342 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000343{
344 struct i40e_pf *pf = vsi->back;
345 struct iphdr *ip;
346 bool err = false;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000347 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000348 int ret;
349 int i;
350 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
351 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
352 0, 0, 0, 0};
353
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000354 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
355 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000356 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
357 if (!raw_packet)
358 return -ENOMEM;
359 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
360 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
361
362 ip->saddr = fd_data->src_ip[0];
363 ip->daddr = fd_data->dst_ip[0];
364 ip->protocol = 0;
365
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000366 fd_data->pctype = i;
367 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
368
369 if (ret) {
370 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000371 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
372 fd_data->pctype, fd_data->fd_id, ret);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000373 err = true;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000374 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000375 if (add)
376 dev_info(&pf->pdev->dev,
377 "Filter OK for PCTYPE %d loc = %d\n",
378 fd_data->pctype, fd_data->fd_id);
379 else
380 dev_info(&pf->pdev->dev,
381 "Filter deleted for PCTYPE %d loc = %d\n",
382 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000383 }
384 }
385
Kiran Patila42e7a32015-11-06 15:26:03 -0800386 if (err)
387 kfree(raw_packet);
388
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000389 return err ? -EOPNOTSUPP : 0;
390}
391
392/**
393 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
394 * @vsi: pointer to the targeted VSI
395 * @cmd: command to get or set RX flow classification rules
396 * @add: true adds a filter, false removes it
397 *
398 **/
399int i40e_add_del_fdir(struct i40e_vsi *vsi,
400 struct i40e_fdir_filter *input, bool add)
401{
402 struct i40e_pf *pf = vsi->back;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000403 int ret;
404
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000405 switch (input->flow_type & ~FLOW_EXT) {
406 case TCP_V4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000407 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000408 break;
409 case UDP_V4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000410 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000411 break;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000412 case IP_USER_FLOW:
413 switch (input->ip4_proto) {
414 case IPPROTO_TCP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000415 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000416 break;
417 case IPPROTO_UDP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000418 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000419 break;
Alexander Duycke1da71c2016-09-14 16:24:35 -0700420 case IPPROTO_IP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000421 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000422 break;
Alexander Duycke1da71c2016-09-14 16:24:35 -0700423 default:
424 /* We cannot support masking based on protocol */
425 goto unsupported_flow;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000426 }
427 break;
428 default:
Alexander Duycke1da71c2016-09-14 16:24:35 -0700429unsupported_flow:
Jakub Kicinskic5ffe7e2014-04-02 10:33:22 +0000430 dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000431 input->flow_type);
432 ret = -EINVAL;
433 }
434
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000435 /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000436 return ret;
437}
438
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000439/**
440 * i40e_fd_handle_status - check the Programming Status for FD
441 * @rx_ring: the Rx ring for this descriptor
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000442 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000443 * @prog_id: the id originally used for programming
444 *
445 * This is used to verify if the FD programming or invalidation
446 * requested by SW to the HW is successful or not and take actions accordingly.
447 **/
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000448static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
449 union i40e_rx_desc *rx_desc, u8 prog_id)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000450{
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000451 struct i40e_pf *pf = rx_ring->vsi->back;
452 struct pci_dev *pdev = pf->pdev;
453 u32 fcnt_prog, fcnt_avail;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000454 u32 error;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000455 u64 qw;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000456
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000457 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000458 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
459 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
460
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400461 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
Carolyn Wyborny3487b6c2015-08-27 11:42:38 -0400462 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000463 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
464 (I40E_DEBUG_FD & pf->hw.debug_mask))
465 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
Carolyn Wyborny3487b6c2015-08-27 11:42:38 -0400466 pf->fd_inv);
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000467
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000468 /* Check if the programming error is for ATR.
469 * If so, auto disable ATR and set a state for
470 * flush in progress. Next time we come here if flush is in
471 * progress do nothing, once flush is complete the state will
472 * be cleared.
473 */
474 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
475 return;
476
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000477 pf->fd_add_err++;
478 /* store the current atr filter count */
479 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
480
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000481 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
482 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
483 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
484 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
485 }
486
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000487 /* filter programming failed most likely due to table full */
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000488 fcnt_prog = i40e_get_global_fd_count(pf);
Anjali Singhai Jain12957382014-06-04 04:22:47 +0000489 fcnt_avail = pf->fdir_pf_filter_count;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000490 /* If ATR is running fcnt_prog can quickly change,
491 * if we are very close to full, it makes sense to disable
492 * FD ATR/SB and then re-enable it when there is room.
493 */
494 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000495 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
Anjali Singhai Jainb814ba62014-06-04 20:41:48 +0000496 !(pf->auto_disable_flags &
Anjali Singhai Jainb814ba62014-06-04 20:41:48 +0000497 I40E_FLAG_FD_SB_ENABLED)) {
Anjali Singhai Jain2e4875e2015-04-16 20:06:06 -0400498 if (I40E_DEBUG_FD & pf->hw.debug_mask)
499 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000500 pf->auto_disable_flags |=
501 I40E_FLAG_FD_SB_ENABLED;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000502 }
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000503 }
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400504 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
Anjali Singhai Jain13c28842014-03-06 09:00:04 +0000505 if (I40E_DEBUG_FD & pf->hw.debug_mask)
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000506 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
Anjali Singhai Jain13c28842014-03-06 09:00:04 +0000507 rx_desc->wb.qword0.hi_dword.fd_id);
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000508 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000509}
510
511/**
Alexander Duycka5e9c572013-09-28 06:00:27 +0000512 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000513 * @ring: the ring that owns the buffer
514 * @tx_buffer: the buffer to free
515 **/
Alexander Duycka5e9c572013-09-28 06:00:27 +0000516static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
517 struct i40e_tx_buffer *tx_buffer)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000518{
Alexander Duycka5e9c572013-09-28 06:00:27 +0000519 if (tx_buffer->skb) {
Alexander Duyck64bfd682016-09-12 14:18:39 -0700520 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
521 kfree(tx_buffer->raw_buf);
522 else
523 dev_kfree_skb_any(tx_buffer->skb);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000524 if (dma_unmap_len(tx_buffer, len))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000525 dma_unmap_single(ring->dev,
Alexander Duyck35a1e2a2013-09-28 06:00:17 +0000526 dma_unmap_addr(tx_buffer, dma),
527 dma_unmap_len(tx_buffer, len),
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000528 DMA_TO_DEVICE);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000529 } else if (dma_unmap_len(tx_buffer, len)) {
530 dma_unmap_page(ring->dev,
531 dma_unmap_addr(tx_buffer, dma),
532 dma_unmap_len(tx_buffer, len),
533 DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000534 }
Kiran Patila42e7a32015-11-06 15:26:03 -0800535
Alexander Duycka5e9c572013-09-28 06:00:27 +0000536 tx_buffer->next_to_watch = NULL;
537 tx_buffer->skb = NULL;
Alexander Duyck35a1e2a2013-09-28 06:00:17 +0000538 dma_unmap_len_set(tx_buffer, len, 0);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000539 /* tx_buffer must be completely set up in the transmit path */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000540}
541
542/**
543 * i40e_clean_tx_ring - Free any empty Tx buffers
544 * @tx_ring: ring to be cleaned
545 **/
546void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
547{
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000548 unsigned long bi_size;
549 u16 i;
550
551 /* ring already cleared, nothing to do */
552 if (!tx_ring->tx_bi)
553 return;
554
555 /* Free all the Tx ring sk_buffs */
Alexander Duycka5e9c572013-09-28 06:00:27 +0000556 for (i = 0; i < tx_ring->count; i++)
557 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000558
559 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
560 memset(tx_ring->tx_bi, 0, bi_size);
561
562 /* Zero out the descriptor ring */
563 memset(tx_ring->desc, 0, tx_ring->size);
564
565 tx_ring->next_to_use = 0;
566 tx_ring->next_to_clean = 0;
Alexander Duyck7070ce02013-09-28 06:00:37 +0000567
568 if (!tx_ring->netdev)
569 return;
570
571 /* cleanup Tx queue statistics */
Alexander Duycke486bdf2016-09-12 14:18:40 -0700572 netdev_tx_reset_queue(txring_txq(tx_ring));
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000573}
574
575/**
576 * i40e_free_tx_resources - Free Tx resources per queue
577 * @tx_ring: Tx descriptor ring for a specific queue
578 *
579 * Free all transmit software resources
580 **/
581void i40e_free_tx_resources(struct i40e_ring *tx_ring)
582{
583 i40e_clean_tx_ring(tx_ring);
584 kfree(tx_ring->tx_bi);
585 tx_ring->tx_bi = NULL;
586
587 if (tx_ring->desc) {
588 dma_free_coherent(tx_ring->dev, tx_ring->size,
589 tx_ring->desc, tx_ring->dma);
590 tx_ring->desc = NULL;
591 }
592}
593
Jesse Brandeburga68de582015-02-24 05:26:03 +0000594/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000595 * i40e_get_tx_pending - how many tx descriptors not processed
596 * @tx_ring: the ring of descriptors
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800597 * @in_sw: is tx_pending being checked in SW or HW
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000598 *
599 * Since there is no access to the ring head register
600 * in XL710, we need to use our local copies
601 **/
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800602u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000603{
Jesse Brandeburga68de582015-02-24 05:26:03 +0000604 u32 head, tail;
605
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800606 if (!in_sw)
607 head = i40e_get_head(ring);
608 else
609 head = ring->next_to_clean;
Jesse Brandeburga68de582015-02-24 05:26:03 +0000610 tail = readl(ring->tail);
611
612 if (head != tail)
613 return (head < tail) ?
614 tail - head : (tail + ring->count - head);
615
616 return 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000617}
618
Alexander Duyck1dc8b532016-10-11 15:26:54 -0700619#define WB_STRIDE 4
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000620
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000621/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000622 * i40e_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duycka619afe2016-03-07 09:30:03 -0800623 * @vsi: the VSI we care about
624 * @tx_ring: Tx ring to clean
625 * @napi_budget: Used to determine if we are in netpoll
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000626 *
627 * Returns true if there's any budget left (e.g. the clean is finished)
628 **/
Alexander Duycka619afe2016-03-07 09:30:03 -0800629static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
630 struct i40e_ring *tx_ring, int napi_budget)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000631{
632 u16 i = tx_ring->next_to_clean;
633 struct i40e_tx_buffer *tx_buf;
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000634 struct i40e_tx_desc *tx_head;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000635 struct i40e_tx_desc *tx_desc;
Alexander Duycka619afe2016-03-07 09:30:03 -0800636 unsigned int total_bytes = 0, total_packets = 0;
637 unsigned int budget = vsi->work_limit;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000638
639 tx_buf = &tx_ring->tx_bi[i];
640 tx_desc = I40E_TX_DESC(tx_ring, i);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000641 i -= tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000642
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000643 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
644
Alexander Duycka5e9c572013-09-28 06:00:27 +0000645 do {
646 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000647
648 /* if next_to_watch is not set then there is no work pending */
649 if (!eop_desc)
650 break;
651
Alexander Duycka5e9c572013-09-28 06:00:27 +0000652 /* prevent any other reads prior to eop_desc */
653 read_barrier_depends();
654
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000655 /* we have caught up to head, no work left to do */
656 if (tx_head == tx_desc)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000657 break;
658
Alexander Duyckc304fda2013-09-28 06:00:12 +0000659 /* clear next_to_watch to prevent false hangs */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000660 tx_buf->next_to_watch = NULL;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000661
Alexander Duycka5e9c572013-09-28 06:00:27 +0000662 /* update the statistics for this packet */
663 total_bytes += tx_buf->bytecount;
664 total_packets += tx_buf->gso_segs;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000665
Alexander Duycka5e9c572013-09-28 06:00:27 +0000666 /* free the skb */
Alexander Duycka619afe2016-03-07 09:30:03 -0800667 napi_consume_skb(tx_buf->skb, napi_budget);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000668
Alexander Duycka5e9c572013-09-28 06:00:27 +0000669 /* unmap skb header data */
670 dma_unmap_single(tx_ring->dev,
671 dma_unmap_addr(tx_buf, dma),
672 dma_unmap_len(tx_buf, len),
673 DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000674
Alexander Duycka5e9c572013-09-28 06:00:27 +0000675 /* clear tx_buffer data */
676 tx_buf->skb = NULL;
677 dma_unmap_len_set(tx_buf, len, 0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000678
Alexander Duycka5e9c572013-09-28 06:00:27 +0000679 /* unmap remaining buffers */
680 while (tx_desc != eop_desc) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000681
682 tx_buf++;
683 tx_desc++;
684 i++;
Alexander Duycka5e9c572013-09-28 06:00:27 +0000685 if (unlikely(!i)) {
686 i -= tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000687 tx_buf = tx_ring->tx_bi;
688 tx_desc = I40E_TX_DESC(tx_ring, 0);
689 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000690
Alexander Duycka5e9c572013-09-28 06:00:27 +0000691 /* unmap any remaining paged data */
692 if (dma_unmap_len(tx_buf, len)) {
693 dma_unmap_page(tx_ring->dev,
694 dma_unmap_addr(tx_buf, dma),
695 dma_unmap_len(tx_buf, len),
696 DMA_TO_DEVICE);
697 dma_unmap_len_set(tx_buf, len, 0);
698 }
699 }
700
701 /* move us one more past the eop_desc for start of next pkt */
702 tx_buf++;
703 tx_desc++;
704 i++;
705 if (unlikely(!i)) {
706 i -= tx_ring->count;
707 tx_buf = tx_ring->tx_bi;
708 tx_desc = I40E_TX_DESC(tx_ring, 0);
709 }
710
Jesse Brandeburg016890b2015-02-27 09:15:31 +0000711 prefetch(tx_desc);
712
Alexander Duycka5e9c572013-09-28 06:00:27 +0000713 /* update budget accounting */
714 budget--;
715 } while (likely(budget));
716
717 i += tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000718 tx_ring->next_to_clean = i;
Alexander Duyck980e9b12013-09-28 06:01:03 +0000719 u64_stats_update_begin(&tx_ring->syncp);
Alexander Duycka114d0a2013-09-28 06:00:43 +0000720 tx_ring->stats.bytes += total_bytes;
721 tx_ring->stats.packets += total_packets;
Alexander Duyck980e9b12013-09-28 06:01:03 +0000722 u64_stats_update_end(&tx_ring->syncp);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000723 tx_ring->q_vector->tx.total_bytes += total_bytes;
724 tx_ring->q_vector->tx.total_packets += total_packets;
Alexander Duycka5e9c572013-09-28 06:00:27 +0000725
Anjali Singhai58044742015-09-25 18:26:13 -0700726 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
Anjali Singhai58044742015-09-25 18:26:13 -0700727 /* check to see if there are < 4 descriptors
728 * waiting to be written back, then kick the hardware to force
729 * them to be written back in case we stay in NAPI.
730 * In this mode on X722 we do not enable Interrupt.
731 */
Mitch Williams88dc9e62016-06-20 09:10:35 -0700732 unsigned int j = i40e_get_tx_pending(tx_ring, false);
Anjali Singhai58044742015-09-25 18:26:13 -0700733
734 if (budget &&
Alexander Duyck1dc8b532016-10-11 15:26:54 -0700735 ((j / WB_STRIDE) == 0) && (j > 0) &&
Alexander Duycka619afe2016-03-07 09:30:03 -0800736 !test_bit(__I40E_DOWN, &vsi->state) &&
Anjali Singhai58044742015-09-25 18:26:13 -0700737 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
738 tx_ring->arm_wb = true;
739 }
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000740
Alexander Duycke486bdf2016-09-12 14:18:40 -0700741 /* notify netdev of completed buffers */
742 netdev_tx_completed_queue(txring_txq(tx_ring),
Alexander Duyck7070ce02013-09-28 06:00:37 +0000743 total_packets, total_bytes);
744
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000745#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
746 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
747 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
748 /* Make sure that anybody stopping the queue after this
749 * sees the new next_to_clean.
750 */
751 smp_mb();
752 if (__netif_subqueue_stopped(tx_ring->netdev,
753 tx_ring->queue_index) &&
Alexander Duycka619afe2016-03-07 09:30:03 -0800754 !test_bit(__I40E_DOWN, &vsi->state)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000755 netif_wake_subqueue(tx_ring->netdev,
756 tx_ring->queue_index);
757 ++tx_ring->tx_stats.restart_queue;
758 }
759 }
760
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000761 return !!budget;
762}
763
764/**
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -0800765 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
766 * @vsi: the VSI we care about
767 * @q_vector: the vector on which to enable writeback
768 *
769 **/
770static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
771 struct i40e_q_vector *q_vector)
772{
773 u16 flags = q_vector->tx.ring[0].flags;
774 u32 val;
775
776 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
777 return;
778
779 if (q_vector->arm_wb_state)
780 return;
781
782 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
783 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
784 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
785
786 wr32(&vsi->back->hw,
787 I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
788 val);
789 } else {
790 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
791 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
792
793 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
794 }
795 q_vector->arm_wb_state = true;
796}
797
798/**
799 * i40e_force_wb - Issue SW Interrupt so HW does a wb
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000800 * @vsi: the VSI we care about
801 * @q_vector: the vector on which to force writeback
802 *
803 **/
Kiran Patilb03a8c12015-09-24 18:13:15 -0400804void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000805{
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -0800806 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
Anjali Singhai Jain8e0764b2015-06-05 12:20:30 -0400807 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
808 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
809 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
810 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
811 /* allow 00 to be written to the index */
812
813 wr32(&vsi->back->hw,
814 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
815 vsi->base_vector - 1), val);
816 } else {
817 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
818 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
819 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
820 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
821 /* allow 00 to be written to the index */
822
823 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
824 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000825}
826
827/**
828 * i40e_set_new_dynamic_itr - Find new ITR level
829 * @rc: structure containing ring performance data
830 *
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400831 * Returns true if ITR changed, false if not
832 *
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000833 * Stores a new ITR value based on packets and byte counts during
834 * the last interrupt. The advantage of per interrupt computation
835 * is faster updates and more accurate ITR for the current traffic
836 * pattern. Constants in this function were computed based on
837 * theoretical maximum wire speed and thresholds were set based on
838 * testing data as well as attempting to minimize response time
839 * while increasing bulk throughput.
840 **/
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400841static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000842{
843 enum i40e_latency_range new_latency_range = rc->latency_range;
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400844 struct i40e_q_vector *qv = rc->ring->q_vector;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000845 u32 new_itr = rc->itr;
846 int bytes_per_int;
Jesse Brandeburg51cc6d92015-09-28 14:16:52 -0400847 int usecs;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000848
849 if (rc->total_packets == 0 || !rc->itr)
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400850 return false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000851
852 /* simple throttlerate management
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400853 * 0-10MB/s lowest (50000 ints/s)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000854 * 10-20MB/s low (20000 ints/s)
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400855 * 20-1249MB/s bulk (18000 ints/s)
856 * > 40000 Rx packets per second (8000 ints/s)
Jesse Brandeburg51cc6d92015-09-28 14:16:52 -0400857 *
858 * The math works out because the divisor is in 10^(-6) which
859 * turns the bytes/us input value into MB/s values, but
860 * make sure to use usecs, as the register values written
Jesse Brandeburgee2319c2015-09-28 14:16:54 -0400861 * are in 2 usec increments in the ITR registers, and make sure
862 * to use the smoothed values that the countdown timer gives us.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000863 */
Jesse Brandeburgee2319c2015-09-28 14:16:54 -0400864 usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
Jesse Brandeburg51cc6d92015-09-28 14:16:52 -0400865 bytes_per_int = rc->total_bytes / usecs;
Jesse Brandeburgee2319c2015-09-28 14:16:54 -0400866
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400867 switch (new_latency_range) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000868 case I40E_LOWEST_LATENCY:
869 if (bytes_per_int > 10)
870 new_latency_range = I40E_LOW_LATENCY;
871 break;
872 case I40E_LOW_LATENCY:
873 if (bytes_per_int > 20)
874 new_latency_range = I40E_BULK_LATENCY;
875 else if (bytes_per_int <= 10)
876 new_latency_range = I40E_LOWEST_LATENCY;
877 break;
878 case I40E_BULK_LATENCY:
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400879 case I40E_ULTRA_LATENCY:
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400880 default:
881 if (bytes_per_int <= 20)
882 new_latency_range = I40E_LOW_LATENCY;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000883 break;
884 }
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400885
886 /* this is to adjust RX more aggressively when streaming small
887 * packets. The value of 40000 was picked as it is just beyond
888 * what the hardware can receive per second if in low latency
889 * mode.
890 */
891#define RX_ULTRA_PACKET_RATE 40000
892
893 if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
894 (&qv->rx == rc))
895 new_latency_range = I40E_ULTRA_LATENCY;
896
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400897 rc->latency_range = new_latency_range;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000898
899 switch (new_latency_range) {
900 case I40E_LOWEST_LATENCY:
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400901 new_itr = I40E_ITR_50K;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000902 break;
903 case I40E_LOW_LATENCY:
904 new_itr = I40E_ITR_20K;
905 break;
906 case I40E_BULK_LATENCY:
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400907 new_itr = I40E_ITR_18K;
908 break;
909 case I40E_ULTRA_LATENCY:
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000910 new_itr = I40E_ITR_8K;
911 break;
912 default:
913 break;
914 }
915
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000916 rc->total_bytes = 0;
917 rc->total_packets = 0;
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400918
919 if (new_itr != rc->itr) {
920 rc->itr = new_itr;
921 return true;
922 }
923
924 return false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000925}
926
927/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000928 * i40e_clean_programming_status - clean the programming status descriptor
929 * @rx_ring: the rx ring that has this descriptor
930 * @rx_desc: the rx descriptor written back by HW
931 *
932 * Flow director should handle FD_FILTER_STATUS to check its filter programming
933 * status being successful or not and take actions accordingly. FCoE should
934 * handle its context/filter programming/invalidation status and take actions.
935 *
936 **/
937static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
938 union i40e_rx_desc *rx_desc)
939{
940 u64 qw;
941 u8 id;
942
943 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
944 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
945 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
946
947 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000948 i40e_fd_handle_status(rx_ring, rx_desc, id);
Vasu Dev38e00432014-08-01 13:27:03 -0700949#ifdef I40E_FCOE
950 else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
951 (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
952 i40e_fcoe_handle_status(rx_ring, rx_desc, id);
953#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000954}
955
956/**
957 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
958 * @tx_ring: the tx ring to set up
959 *
960 * Return 0 on success, negative on error
961 **/
962int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
963{
964 struct device *dev = tx_ring->dev;
965 int bi_size;
966
967 if (!dev)
968 return -ENOMEM;
969
Jesse Brandeburge908f812015-07-23 16:54:42 -0400970 /* warn if we are about to overwrite the pointer */
971 WARN_ON(tx_ring->tx_bi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000972 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
973 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
974 if (!tx_ring->tx_bi)
975 goto err;
976
977 /* round up to nearest 4K */
978 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000979 /* add u32 for head writeback, align after this takes care of
980 * guaranteeing this is at least one cache line in size
981 */
982 tx_ring->size += sizeof(u32);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000983 tx_ring->size = ALIGN(tx_ring->size, 4096);
984 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
985 &tx_ring->dma, GFP_KERNEL);
986 if (!tx_ring->desc) {
987 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
988 tx_ring->size);
989 goto err;
990 }
991
992 tx_ring->next_to_use = 0;
993 tx_ring->next_to_clean = 0;
994 return 0;
995
996err:
997 kfree(tx_ring->tx_bi);
998 tx_ring->tx_bi = NULL;
999 return -ENOMEM;
1000}
1001
1002/**
1003 * i40e_clean_rx_ring - Free Rx buffers
1004 * @rx_ring: ring to be cleaned
1005 **/
1006void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1007{
1008 struct device *dev = rx_ring->dev;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001009 unsigned long bi_size;
1010 u16 i;
1011
1012 /* ring already cleared, nothing to do */
1013 if (!rx_ring->rx_bi)
1014 return;
1015
Scott Petersone72e5652017-02-09 23:40:25 -08001016 if (rx_ring->skb) {
1017 dev_kfree_skb(rx_ring->skb);
1018 rx_ring->skb = NULL;
1019 }
1020
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001021 /* Free all the Rx ring sk_buffs */
1022 for (i = 0; i < rx_ring->count; i++) {
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001023 struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
1024
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001025 if (!rx_bi->page)
1026 continue;
1027
1028 dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE);
1029 __free_pages(rx_bi->page, 0);
1030
1031 rx_bi->page = NULL;
1032 rx_bi->page_offset = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001033 }
1034
1035 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1036 memset(rx_ring->rx_bi, 0, bi_size);
1037
1038 /* Zero out the descriptor ring */
1039 memset(rx_ring->desc, 0, rx_ring->size);
1040
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001041 rx_ring->next_to_alloc = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001042 rx_ring->next_to_clean = 0;
1043 rx_ring->next_to_use = 0;
1044}
1045
1046/**
1047 * i40e_free_rx_resources - Free Rx resources
1048 * @rx_ring: ring to clean the resources from
1049 *
1050 * Free all receive software resources
1051 **/
1052void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1053{
1054 i40e_clean_rx_ring(rx_ring);
1055 kfree(rx_ring->rx_bi);
1056 rx_ring->rx_bi = NULL;
1057
1058 if (rx_ring->desc) {
1059 dma_free_coherent(rx_ring->dev, rx_ring->size,
1060 rx_ring->desc, rx_ring->dma);
1061 rx_ring->desc = NULL;
1062 }
1063}
1064
1065/**
1066 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1067 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1068 *
1069 * Returns 0 on success, negative on failure
1070 **/
1071int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1072{
1073 struct device *dev = rx_ring->dev;
1074 int bi_size;
1075
Jesse Brandeburge908f812015-07-23 16:54:42 -04001076 /* warn if we are about to overwrite the pointer */
1077 WARN_ON(rx_ring->rx_bi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001078 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1079 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1080 if (!rx_ring->rx_bi)
1081 goto err;
1082
Carolyn Wybornyf217d6c2015-02-09 17:42:31 -08001083 u64_stats_init(&rx_ring->syncp);
Carolyn Wyborny638702b2015-01-24 09:58:32 +00001084
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001085 /* Round up to nearest 4K */
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001086 rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001087 rx_ring->size = ALIGN(rx_ring->size, 4096);
1088 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1089 &rx_ring->dma, GFP_KERNEL);
1090
1091 if (!rx_ring->desc) {
1092 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1093 rx_ring->size);
1094 goto err;
1095 }
1096
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001097 rx_ring->next_to_alloc = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001098 rx_ring->next_to_clean = 0;
1099 rx_ring->next_to_use = 0;
1100
1101 return 0;
1102err:
1103 kfree(rx_ring->rx_bi);
1104 rx_ring->rx_bi = NULL;
1105 return -ENOMEM;
1106}
1107
1108/**
1109 * i40e_release_rx_desc - Store the new tail and head values
1110 * @rx_ring: ring to bump
1111 * @val: new head index
1112 **/
1113static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1114{
1115 rx_ring->next_to_use = val;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001116
1117 /* update next to alloc since we have filled the ring */
1118 rx_ring->next_to_alloc = val;
1119
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001120 /* Force memory writes to complete before letting h/w
1121 * know there are new descriptors to fetch. (Only
1122 * applicable for weak-ordered memory model archs,
1123 * such as IA-64).
1124 */
1125 wmb();
1126 writel(val, rx_ring->tail);
1127}
1128
1129/**
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001130 * i40e_alloc_mapped_page - recycle or make a new page
1131 * @rx_ring: ring to use
1132 * @bi: rx_buffer struct to modify
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001133 *
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001134 * Returns true if the page was successfully allocated or
1135 * reused.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001136 **/
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001137static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1138 struct i40e_rx_buffer *bi)
Mitch Williamsa132af22015-01-24 09:58:35 +00001139{
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001140 struct page *page = bi->page;
1141 dma_addr_t dma;
Mitch Williamsa132af22015-01-24 09:58:35 +00001142
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001143 /* since we are recycling buffers we should seldom need to alloc */
1144 if (likely(page)) {
1145 rx_ring->rx_stats.page_reuse_count++;
1146 return true;
Mitch Williamsa132af22015-01-24 09:58:35 +00001147 }
1148
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001149 /* alloc new page for storage */
1150 page = dev_alloc_page();
1151 if (unlikely(!page)) {
1152 rx_ring->rx_stats.alloc_page_failed++;
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001153 return false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001154 }
1155
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001156 /* map page for use */
1157 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001158
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001159 /* if mapping failed free memory back to system since
1160 * there isn't much point in holding memory we can't use
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001161 */
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001162 if (dma_mapping_error(rx_ring->dev, dma)) {
1163 __free_pages(page, 0);
1164 rx_ring->rx_stats.alloc_page_failed++;
1165 return false;
1166 }
1167
1168 bi->dma = dma;
1169 bi->page = page;
1170 bi->page_offset = 0;
1171
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001172 return true;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001173}
1174
1175/**
1176 * i40e_receive_skb - Send a completed packet up the stack
1177 * @rx_ring: rx ring in play
1178 * @skb: packet to send up
1179 * @vlan_tag: vlan tag for packet
1180 **/
1181static void i40e_receive_skb(struct i40e_ring *rx_ring,
1182 struct sk_buff *skb, u16 vlan_tag)
1183{
1184 struct i40e_q_vector *q_vector = rx_ring->q_vector;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001185
Jesse Brandeburga149f2c2016-04-12 08:30:49 -07001186 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1187 (vlan_tag & VLAN_VID_MASK))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001188 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1189
Alexander Duyck8b650352015-09-24 09:04:32 -07001190 napi_gro_receive(&q_vector->napi, skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001191}
1192
1193/**
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001194 * i40e_alloc_rx_buffers - Replace used receive buffers
1195 * @rx_ring: ring to place buffers on
1196 * @cleaned_count: number of buffers to replace
1197 *
1198 * Returns false if all allocations were successful, true if any fail
1199 **/
1200bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1201{
1202 u16 ntu = rx_ring->next_to_use;
1203 union i40e_rx_desc *rx_desc;
1204 struct i40e_rx_buffer *bi;
1205
1206 /* do nothing if no valid netdev defined */
1207 if (!rx_ring->netdev || !cleaned_count)
1208 return false;
1209
1210 rx_desc = I40E_RX_DESC(rx_ring, ntu);
1211 bi = &rx_ring->rx_bi[ntu];
1212
1213 do {
1214 if (!i40e_alloc_mapped_page(rx_ring, bi))
1215 goto no_buffers;
1216
1217 /* Refresh the desc even if buffer_addrs didn't change
1218 * because each write-back erases this info.
1219 */
1220 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001221
1222 rx_desc++;
1223 bi++;
1224 ntu++;
1225 if (unlikely(ntu == rx_ring->count)) {
1226 rx_desc = I40E_RX_DESC(rx_ring, 0);
1227 bi = rx_ring->rx_bi;
1228 ntu = 0;
1229 }
1230
1231 /* clear the status bits for the next_to_use descriptor */
1232 rx_desc->wb.qword1.status_error_len = 0;
1233
1234 cleaned_count--;
1235 } while (cleaned_count);
1236
1237 if (rx_ring->next_to_use != ntu)
1238 i40e_release_rx_desc(rx_ring, ntu);
1239
1240 return false;
1241
1242no_buffers:
1243 if (rx_ring->next_to_use != ntu)
1244 i40e_release_rx_desc(rx_ring, ntu);
1245
1246 /* make sure to come back via polling to try again after
1247 * allocation failure
1248 */
1249 return true;
1250}
1251
1252/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001253 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1254 * @vsi: the VSI we care about
1255 * @skb: skb currently being received and modified
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001256 * @rx_desc: the receive descriptor
1257 *
1258 * skb->protocol must be set before this function is called
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001259 **/
1260static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1261 struct sk_buff *skb,
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001262 union i40e_rx_desc *rx_desc)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001263{
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001264 struct i40e_rx_ptype_decoded decoded;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001265 u32 rx_error, rx_status;
Alexander Duyck858296c82016-06-14 15:45:42 -07001266 bool ipv4, ipv6;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001267 u8 ptype;
1268 u64 qword;
1269
1270 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1271 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
1272 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1273 I40E_RXD_QW1_ERROR_SHIFT;
1274 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1275 I40E_RXD_QW1_STATUS_SHIFT;
1276 decoded = decode_rx_desc_ptype(ptype);
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001277
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001278 skb->ip_summed = CHECKSUM_NONE;
1279
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001280 skb_checksum_none_assert(skb);
1281
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001282 /* Rx csum enabled and ip headers found? */
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001283 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001284 return;
1285
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001286 /* did the hardware decode the packet and checksum? */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001287 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001288 return;
1289
1290 /* both known and outer_ip must be set for the below code to work */
1291 if (!(decoded.known && decoded.outer_ip))
1292 return;
1293
Alexander Duyckfad57332016-01-24 21:17:22 -08001294 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1295 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1296 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1297 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001298
1299 if (ipv4 &&
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001300 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1301 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001302 goto checksum_fail;
1303
Jesse Brandeburgddf1d0d2014-02-13 03:48:39 -08001304 /* likely incorrect csum if alternate IP extension headers found */
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001305 if (ipv6 &&
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001306 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001307 /* don't increment checksum err here, non-fatal err */
Shannon Nelson8ee75a82013-12-21 05:44:46 +00001308 return;
1309
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001310 /* there was some L4 error, count error and punt packet to the stack */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001311 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001312 goto checksum_fail;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001313
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001314 /* handle packets that were not able to be checksummed due
1315 * to arrival speed, in this case the stack can compute
1316 * the csum.
1317 */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001318 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001319 return;
1320
Alexander Duyck858296c82016-06-14 15:45:42 -07001321 /* If there is an outer header present that might contain a checksum
1322 * we need to bump the checksum level by 1 to reflect the fact that
1323 * we are indicating we validated the inner checksum.
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001324 */
Alexander Duyck858296c82016-06-14 15:45:42 -07001325 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
1326 skb->csum_level = 1;
Alexander Duyckfad57332016-01-24 21:17:22 -08001327
Alexander Duyck858296c82016-06-14 15:45:42 -07001328 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
1329 switch (decoded.inner_prot) {
1330 case I40E_RX_PTYPE_INNER_PROT_TCP:
1331 case I40E_RX_PTYPE_INNER_PROT_UDP:
1332 case I40E_RX_PTYPE_INNER_PROT_SCTP:
1333 skb->ip_summed = CHECKSUM_UNNECESSARY;
1334 /* fall though */
1335 default:
1336 break;
1337 }
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001338
1339 return;
1340
1341checksum_fail:
1342 vsi->back->hw_csum_rx_error++;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001343}
1344
1345/**
Anjali Singhai Jain857942f2015-12-09 15:50:21 -08001346 * i40e_ptype_to_htype - get a hash type
Jesse Brandeburg206812b2014-02-12 01:45:33 +00001347 * @ptype: the ptype value from the descriptor
1348 *
1349 * Returns a hash type to be used by skb_set_hash
1350 **/
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001351static inline int i40e_ptype_to_htype(u8 ptype)
Jesse Brandeburg206812b2014-02-12 01:45:33 +00001352{
1353 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1354
1355 if (!decoded.known)
1356 return PKT_HASH_TYPE_NONE;
1357
1358 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1359 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1360 return PKT_HASH_TYPE_L4;
1361 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1362 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1363 return PKT_HASH_TYPE_L3;
1364 else
1365 return PKT_HASH_TYPE_L2;
1366}
1367
1368/**
Anjali Singhai Jain857942f2015-12-09 15:50:21 -08001369 * i40e_rx_hash - set the hash value in the skb
1370 * @ring: descriptor ring
1371 * @rx_desc: specific descriptor
1372 **/
1373static inline void i40e_rx_hash(struct i40e_ring *ring,
1374 union i40e_rx_desc *rx_desc,
1375 struct sk_buff *skb,
1376 u8 rx_ptype)
1377{
1378 u32 hash;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001379 const __le64 rss_mask =
Anjali Singhai Jain857942f2015-12-09 15:50:21 -08001380 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1381 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1382
Mitch Williamsa876c3b2016-05-03 15:13:18 -07001383 if (!(ring->netdev->features & NETIF_F_RXHASH))
Anjali Singhai Jain857942f2015-12-09 15:50:21 -08001384 return;
1385
1386 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1387 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1388 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1389 }
1390}
1391
1392/**
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001393 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
1394 * @rx_ring: rx descriptor ring packet is being transacted on
1395 * @rx_desc: pointer to the EOP Rx descriptor
1396 * @skb: pointer to current skb being populated
1397 * @rx_ptype: the packet type decoded by hardware
Mitch Williamsa132af22015-01-24 09:58:35 +00001398 *
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001399 * This function checks the ring, descriptor, and packet information in
1400 * order to populate the hash, checksum, VLAN, protocol, and
1401 * other fields within the skb.
Mitch Williamsa132af22015-01-24 09:58:35 +00001402 **/
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001403static inline
1404void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1405 union i40e_rx_desc *rx_desc, struct sk_buff *skb,
1406 u8 rx_ptype)
1407{
1408 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1409 u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1410 I40E_RXD_QW1_STATUS_SHIFT;
Jacob Keller144ed172016-10-05 09:30:42 -07001411 u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
1412 u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001413 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
1414
Jacob Keller12490502016-10-05 09:30:44 -07001415 if (unlikely(tsynvalid))
Jacob Keller144ed172016-10-05 09:30:42 -07001416 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001417
1418 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1419
1420 /* modifies the skb - consumes the enet header */
1421 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1422
1423 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1424
1425 skb_record_rx_queue(skb, rx_ring->queue_index);
1426}
1427
1428/**
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001429 * i40e_cleanup_headers - Correct empty headers
1430 * @rx_ring: rx descriptor ring packet is being transacted on
1431 * @skb: pointer to current skb being fixed
1432 *
1433 * Also address the case where we are pulling data in on pages only
1434 * and as such no data is present in the skb header.
1435 *
1436 * In addition if skb is not at least 60 bytes we need to pad it so that
1437 * it is large enough to qualify as a valid Ethernet frame.
1438 *
1439 * Returns true if an error was encountered and skb was freed.
1440 **/
1441static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
1442{
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001443 /* if eth_skb_pad returns an error the skb was freed */
1444 if (eth_skb_pad(skb))
1445 return true;
1446
1447 return false;
1448}
1449
1450/**
1451 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1452 * @rx_ring: rx descriptor ring to store buffers on
1453 * @old_buff: donor buffer to have page reused
1454 *
1455 * Synchronizes page for reuse by the adapter
1456 **/
1457static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1458 struct i40e_rx_buffer *old_buff)
1459{
1460 struct i40e_rx_buffer *new_buff;
1461 u16 nta = rx_ring->next_to_alloc;
1462
1463 new_buff = &rx_ring->rx_bi[nta];
1464
1465 /* update, and store next to alloc */
1466 nta++;
1467 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1468
1469 /* transfer page from old buffer to new buffer */
1470 *new_buff = *old_buff;
1471}
1472
1473/**
Scott Peterson9b37c932017-02-09 23:43:30 -08001474 * i40e_page_is_reusable - check if any reuse is possible
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001475 * @page: page struct to check
Scott Peterson9b37c932017-02-09 23:43:30 -08001476 *
1477 * A page is not reusable if it was allocated under low memory
1478 * conditions, or it's not in the same NUMA node as this CPU.
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001479 */
Scott Peterson9b37c932017-02-09 23:43:30 -08001480static inline bool i40e_page_is_reusable(struct page *page)
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001481{
Scott Peterson9b37c932017-02-09 23:43:30 -08001482 return (page_to_nid(page) == numa_mem_id()) &&
1483 !page_is_pfmemalloc(page);
1484}
1485
1486/**
1487 * i40e_can_reuse_rx_page - Determine if this page can be reused by
1488 * the adapter for another receive
1489 *
1490 * @rx_buffer: buffer containing the page
1491 * @page: page address from rx_buffer
1492 * @truesize: actual size of the buffer in this page
1493 *
1494 * If page is reusable, rx_buffer->page_offset is adjusted to point to
1495 * an unused region in the page.
1496 *
1497 * For small pages, @truesize will be a constant value, half the size
1498 * of the memory at page. We'll attempt to alternate between high and
1499 * low halves of the page, with one half ready for use by the hardware
1500 * and the other half being consumed by the stack. We use the page
1501 * ref count to determine whether the stack has finished consuming the
1502 * portion of this page that was passed up with a previous packet. If
1503 * the page ref count is >1, we'll assume the "other" half page is
1504 * still busy, and this page cannot be reused.
1505 *
1506 * For larger pages, @truesize will be the actual space used by the
1507 * received packet (adjusted upward to an even multiple of the cache
1508 * line size). This will advance through the page by the amount
1509 * actually consumed by the received packets while there is still
1510 * space for a buffer. Each region of larger pages will be used at
1511 * most once, after which the page will not be reused.
1512 *
1513 * In either case, if the page is reusable its refcount is increased.
1514 **/
1515static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
1516 struct page *page,
1517 const unsigned int truesize)
1518{
1519#if (PAGE_SIZE >= 8192)
1520 unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
1521#endif
1522
1523 /* Is any reuse possible? */
1524 if (unlikely(!i40e_page_is_reusable(page)))
1525 return false;
1526
1527#if (PAGE_SIZE < 8192)
1528 /* if we are only owner of page we can reuse it */
1529 if (unlikely(page_count(page) != 1))
1530 return false;
1531
1532 /* flip page offset to other buffer */
1533 rx_buffer->page_offset ^= truesize;
1534#else
1535 /* move offset up to the next cache line */
1536 rx_buffer->page_offset += truesize;
1537
1538 if (rx_buffer->page_offset > last_offset)
1539 return false;
1540#endif
1541
1542 /* Inc ref count on page before passing it up to the stack */
1543 get_page(page);
1544
1545 return true;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001546}
1547
1548/**
1549 * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
1550 * @rx_ring: rx descriptor ring to transact packets on
1551 * @rx_buffer: buffer containing page to add
Scott Peterson7987dcd2017-02-09 23:37:28 -08001552 * @size: packet length from rx_desc
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001553 * @skb: sk_buff to place the data into
1554 *
1555 * This function will add the data contained in rx_buffer->page to the skb.
1556 * This is done either through a direct copy if the data in the buffer is
1557 * less than the skb header size, otherwise it will just attach the page as
1558 * a frag to the skb.
1559 *
1560 * The function will then update the page offset if necessary and return
1561 * true if the buffer can be reused by the adapter.
1562 **/
1563static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
1564 struct i40e_rx_buffer *rx_buffer,
Scott Peterson7987dcd2017-02-09 23:37:28 -08001565 unsigned int size,
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001566 struct sk_buff *skb)
1567{
1568 struct page *page = rx_buffer->page;
Scott Peterson9b37c932017-02-09 23:43:30 -08001569 unsigned char *va = page_address(page) + rx_buffer->page_offset;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001570#if (PAGE_SIZE < 8192)
1571 unsigned int truesize = I40E_RXBUFFER_2048;
1572#else
1573 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001574#endif
Scott Peterson9b37c932017-02-09 23:43:30 -08001575 unsigned int pull_len;
1576
1577 if (unlikely(skb_is_nonlinear(skb)))
1578 goto add_tail_frag;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001579
1580 /* will the data fit in the skb we allocated? if so, just
1581 * copy it as it is pretty small anyway
1582 */
Scott Peterson9b37c932017-02-09 23:43:30 -08001583 if (size <= I40E_RX_HDR_SIZE) {
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001584 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
1585
Scott Peterson9b37c932017-02-09 23:43:30 -08001586 /* page is reusable, we can reuse buffer as-is */
1587 if (likely(i40e_page_is_reusable(page)))
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001588 return true;
1589
1590 /* this page cannot be reused so discard it */
1591 __free_pages(page, 0);
1592 return false;
1593 }
1594
Scott Peterson9b37c932017-02-09 23:43:30 -08001595 /* we need the header to contain the greater of either
1596 * ETH_HLEN or 60 bytes if the skb->len is less than
1597 * 60 for skb_pad.
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001598 */
Scott Peterson9b37c932017-02-09 23:43:30 -08001599 pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001600
Scott Peterson9b37c932017-02-09 23:43:30 -08001601 /* align pull length to size of long to optimize
1602 * memcpy performance
1603 */
1604 memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
1605
1606 /* update all of the pointers */
1607 va += pull_len;
1608 size -= pull_len;
1609
1610add_tail_frag:
1611 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1612 (unsigned long)va & ~PAGE_MASK, size, truesize);
1613
1614 return i40e_can_reuse_rx_page(rx_buffer, page, truesize);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001615}
1616
1617/**
1618 * i40e_fetch_rx_buffer - Allocate skb and populate it
1619 * @rx_ring: rx descriptor ring to transact packets on
1620 * @rx_desc: descriptor containing info written by hardware
1621 *
1622 * This function allocates an skb on the fly, and populates it with the page
1623 * data from the current receive descriptor, taking care to set up the skb
1624 * correctly, as well as handling calling the page recycle function if
1625 * necessary.
1626 */
1627static inline
1628struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
Scott Petersone72e5652017-02-09 23:40:25 -08001629 union i40e_rx_desc *rx_desc,
1630 struct sk_buff *skb)
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001631{
Scott Peterson7987dcd2017-02-09 23:37:28 -08001632 u64 local_status_error_len =
1633 le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1634 unsigned int size =
1635 (local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1636 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001637 struct i40e_rx_buffer *rx_buffer;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001638 struct page *page;
1639
1640 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
1641 page = rx_buffer->page;
1642 prefetchw(page);
1643
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001644 if (likely(!skb)) {
1645 void *page_addr = page_address(page) + rx_buffer->page_offset;
1646
1647 /* prefetch first cache line of first page */
1648 prefetch(page_addr);
1649#if L1_CACHE_BYTES < 128
1650 prefetch(page_addr + L1_CACHE_BYTES);
1651#endif
1652
1653 /* allocate a skb to store the frags */
1654 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
1655 I40E_RX_HDR_SIZE,
1656 GFP_ATOMIC | __GFP_NOWARN);
1657 if (unlikely(!skb)) {
1658 rx_ring->rx_stats.alloc_buff_failed++;
1659 return NULL;
1660 }
1661
1662 /* we will be copying header into skb->data in
1663 * pskb_may_pull so it is in our interest to prefetch
1664 * it now to avoid a possible cache miss
1665 */
1666 prefetchw(skb->data);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001667 }
1668
1669 /* we are reusing so sync this buffer for CPU use */
1670 dma_sync_single_range_for_cpu(rx_ring->dev,
1671 rx_buffer->dma,
1672 rx_buffer->page_offset,
Scott Peterson7987dcd2017-02-09 23:37:28 -08001673 size,
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001674 DMA_FROM_DEVICE);
1675
1676 /* pull page into skb */
Scott Peterson7987dcd2017-02-09 23:37:28 -08001677 if (i40e_add_rx_frag(rx_ring, rx_buffer, size, skb)) {
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001678 /* hand second half of page back to the ring */
1679 i40e_reuse_rx_page(rx_ring, rx_buffer);
1680 rx_ring->rx_stats.page_reuse_count++;
1681 } else {
1682 /* we are not reusing the buffer so unmap it */
1683 dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
1684 DMA_FROM_DEVICE);
1685 }
1686
1687 /* clear contents of buffer_info */
1688 rx_buffer->page = NULL;
1689
1690 return skb;
1691}
1692
1693/**
1694 * i40e_is_non_eop - process handling of non-EOP buffers
1695 * @rx_ring: Rx ring being processed
1696 * @rx_desc: Rx descriptor for current buffer
1697 * @skb: Current socket buffer containing buffer in progress
1698 *
1699 * This function updates next to clean. If the buffer is an EOP buffer
1700 * this function exits returning false, otherwise it will place the
1701 * sk_buff in the next buffer to be chained and return true indicating
1702 * that this is in fact a non-EOP buffer.
1703 **/
1704static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
1705 union i40e_rx_desc *rx_desc,
1706 struct sk_buff *skb)
1707{
1708 u32 ntc = rx_ring->next_to_clean + 1;
1709
1710 /* fetch, update, and store next to clean */
1711 ntc = (ntc < rx_ring->count) ? ntc : 0;
1712 rx_ring->next_to_clean = ntc;
1713
1714 prefetch(I40E_RX_DESC(rx_ring, ntc));
1715
1716#define staterrlen rx_desc->wb.qword1.status_error_len
1717 if (unlikely(i40e_rx_is_programming_status(le64_to_cpu(staterrlen)))) {
1718 i40e_clean_programming_status(rx_ring, rx_desc);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001719 return true;
1720 }
1721 /* if we are the last buffer then there is nothing else to do */
1722#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
1723 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
1724 return false;
1725
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001726 rx_ring->rx_stats.non_eop_descs++;
1727
1728 return true;
1729}
1730
1731/**
1732 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1733 * @rx_ring: rx descriptor ring to transact packets on
1734 * @budget: Total limit on number of packets to process
1735 *
1736 * This function provides a "bounce buffer" approach to Rx interrupt
1737 * processing. The advantage to this is that on systems that have
1738 * expensive overhead for IOMMU access this provides a means of avoiding
1739 * it by maintaining the mapping of the page to the system.
1740 *
1741 * Returns amount of work completed
1742 **/
1743static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
Mitch Williamsa132af22015-01-24 09:58:35 +00001744{
1745 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
Scott Petersone72e5652017-02-09 23:40:25 -08001746 struct sk_buff *skb = rx_ring->skb;
Mitch Williamsa132af22015-01-24 09:58:35 +00001747 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001748 bool failure = false;
Mitch Williamsa132af22015-01-24 09:58:35 +00001749
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001750 while (likely(total_rx_packets < budget)) {
1751 union i40e_rx_desc *rx_desc;
Mitch Williamsa132af22015-01-24 09:58:35 +00001752 u16 vlan_tag;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001753 u8 rx_ptype;
1754 u64 qword;
1755
Mitch Williamsa132af22015-01-24 09:58:35 +00001756 /* return some buffers to hardware, one at a time is too slow */
1757 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001758 failure = failure ||
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001759 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
Mitch Williamsa132af22015-01-24 09:58:35 +00001760 cleaned_count = 0;
1761 }
1762
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001763 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
1764
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001765 /* status_error_len will always be zero for unused descriptors
1766 * because it's cleared in cleanup, and overlaps with hdr_addr
1767 * which is always zero because packet split isn't used, if the
1768 * hardware wrote DD then it will be non-zero
1769 */
Alexander Duyck99dad8b2016-09-27 11:28:50 -07001770 if (!i40e_test_staterr(rx_desc,
1771 BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001772 break;
1773
Mitch Williamsa132af22015-01-24 09:58:35 +00001774 /* This memory barrier is needed to keep us from reading
1775 * any other fields out of the rx_desc until we know the
1776 * DD bit is set.
1777 */
Alexander Duyck67317162015-04-08 18:49:43 -07001778 dma_rmb();
Mitch Williamsa132af22015-01-24 09:58:35 +00001779
Scott Petersone72e5652017-02-09 23:40:25 -08001780 skb = i40e_fetch_rx_buffer(rx_ring, rx_desc, skb);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001781 if (!skb)
1782 break;
Mitch Williamsa132af22015-01-24 09:58:35 +00001783
Mitch Williamsa132af22015-01-24 09:58:35 +00001784 cleaned_count++;
1785
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001786 if (i40e_is_non_eop(rx_ring, rx_desc, skb))
Mitch Williamsa132af22015-01-24 09:58:35 +00001787 continue;
Mitch Williamsa132af22015-01-24 09:58:35 +00001788
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001789 /* ERR_MASK will only have valid bits if EOP set, and
1790 * what we are doing here is actually checking
1791 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1792 * the error field
1793 */
1794 if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
Mitch Williamsa132af22015-01-24 09:58:35 +00001795 dev_kfree_skb_any(skb);
Mitch Williamsa132af22015-01-24 09:58:35 +00001796 continue;
1797 }
1798
Scott Petersone72e5652017-02-09 23:40:25 -08001799 if (i40e_cleanup_headers(rx_ring, skb)) {
1800 skb = NULL;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001801 continue;
Scott Petersone72e5652017-02-09 23:40:25 -08001802 }
Mitch Williamsa132af22015-01-24 09:58:35 +00001803
1804 /* probably a little skewed due to removing CRC */
1805 total_rx_bytes += skb->len;
Mitch Williamsa132af22015-01-24 09:58:35 +00001806
Alexander Duyck99dad8b2016-09-27 11:28:50 -07001807 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1808 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1809 I40E_RXD_QW1_PTYPE_SHIFT;
1810
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001811 /* populate checksum, VLAN, and protocol */
1812 i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
Mitch Williamsa132af22015-01-24 09:58:35 +00001813
Mitch Williamsa132af22015-01-24 09:58:35 +00001814#ifdef I40E_FCOE
Jesse Brandeburg1f15d662016-04-01 03:56:06 -07001815 if (unlikely(
1816 i40e_rx_is_fcoe(rx_ptype) &&
1817 !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) {
Mitch Williamsa132af22015-01-24 09:58:35 +00001818 dev_kfree_skb_any(skb);
1819 continue;
1820 }
1821#endif
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001822
1823 vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
1824 le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
1825
Mitch Williamsa132af22015-01-24 09:58:35 +00001826 i40e_receive_skb(rx_ring, skb, vlan_tag);
Scott Petersone72e5652017-02-09 23:40:25 -08001827 skb = NULL;
Mitch Williamsa132af22015-01-24 09:58:35 +00001828
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001829 /* update budget accounting */
1830 total_rx_packets++;
1831 }
Mitch Williamsa132af22015-01-24 09:58:35 +00001832
Scott Petersone72e5652017-02-09 23:40:25 -08001833 rx_ring->skb = skb;
1834
Mitch Williamsa132af22015-01-24 09:58:35 +00001835 u64_stats_update_begin(&rx_ring->syncp);
1836 rx_ring->stats.packets += total_rx_packets;
1837 rx_ring->stats.bytes += total_rx_bytes;
1838 u64_stats_update_end(&rx_ring->syncp);
1839 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1840 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1841
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001842 /* guarantee a trip back through this routine if there was a failure */
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001843 return failure ? budget : total_rx_packets;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001844}
1845
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001846static u32 i40e_buildreg_itr(const int type, const u16 itr)
1847{
1848 u32 val;
1849
1850 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
Jesse Brandeburg40d72a52016-01-13 16:51:45 -08001851 /* Don't clear PBA because that can cause lost interrupts that
1852 * came in while we were cleaning/polling
1853 */
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001854 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1855 (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
1856
1857 return val;
1858}
1859
1860/* a small macro to shorten up some long lines */
1861#define INTREG I40E_PFINT_DYN_CTLN
Jacob Keller65e87c02016-09-12 14:18:44 -07001862static inline int get_rx_itr_enabled(struct i40e_vsi *vsi, int idx)
1863{
1864 return !!(vsi->rx_rings[idx]->rx_itr_setting);
1865}
1866
1867static inline int get_tx_itr_enabled(struct i40e_vsi *vsi, int idx)
1868{
1869 return !!(vsi->tx_rings[idx]->tx_itr_setting);
1870}
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001871
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001872/**
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001873 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
1874 * @vsi: the VSI we care about
1875 * @q_vector: q_vector for which itr is being updated and interrupt enabled
1876 *
1877 **/
1878static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
1879 struct i40e_q_vector *q_vector)
1880{
1881 struct i40e_hw *hw = &vsi->back->hw;
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001882 bool rx = false, tx = false;
1883 u32 rxval, txval;
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001884 int vector;
Kan Lianga75e8002016-02-19 09:24:04 -05001885 int idx = q_vector->v_idx;
Jacob Keller65e87c02016-09-12 14:18:44 -07001886 int rx_itr_setting, tx_itr_setting;
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001887
1888 vector = (q_vector->v_idx + vsi->base_vector);
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001889
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001890 /* avoid dynamic calculation if in countdown mode OR if
1891 * all dynamic is disabled
1892 */
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001893 rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
1894
Jacob Keller65e87c02016-09-12 14:18:44 -07001895 rx_itr_setting = get_rx_itr_enabled(vsi, idx);
1896 tx_itr_setting = get_tx_itr_enabled(vsi, idx);
1897
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001898 if (q_vector->itr_countdown > 0 ||
Jacob Keller65e87c02016-09-12 14:18:44 -07001899 (!ITR_IS_DYNAMIC(rx_itr_setting) &&
1900 !ITR_IS_DYNAMIC(tx_itr_setting))) {
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001901 goto enable_int;
1902 }
1903
Jacob Keller65e87c02016-09-12 14:18:44 -07001904 if (ITR_IS_DYNAMIC(tx_itr_setting)) {
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001905 rx = i40e_set_new_dynamic_itr(&q_vector->rx);
1906 rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001907 }
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001908
Jacob Keller65e87c02016-09-12 14:18:44 -07001909 if (ITR_IS_DYNAMIC(tx_itr_setting)) {
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001910 tx = i40e_set_new_dynamic_itr(&q_vector->tx);
1911 txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001912 }
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001913
1914 if (rx || tx) {
1915 /* get the higher of the two ITR adjustments and
1916 * use the same value for both ITR registers
1917 * when in adaptive mode (Rx and/or Tx)
1918 */
1919 u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
1920
1921 q_vector->tx.itr = q_vector->rx.itr = itr;
1922 txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
1923 tx = true;
1924 rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
1925 rx = true;
1926 }
1927
1928 /* only need to enable the interrupt once, but need
1929 * to possibly update both ITR values
1930 */
1931 if (rx) {
1932 /* set the INTENA_MSK_MASK so that this first write
1933 * won't actually enable the interrupt, instead just
1934 * updating the ITR (it's bit 31 PF and VF)
1935 */
1936 rxval |= BIT(31);
1937 /* don't check _DOWN because interrupt isn't being enabled */
1938 wr32(hw, INTREG(vector - 1), rxval);
1939 }
1940
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001941enable_int:
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001942 if (!test_bit(__I40E_DOWN, &vsi->state))
1943 wr32(hw, INTREG(vector - 1), txval);
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001944
1945 if (q_vector->itr_countdown)
1946 q_vector->itr_countdown--;
1947 else
1948 q_vector->itr_countdown = ITR_COUNTDOWN_START;
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001949}
1950
1951/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001952 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1953 * @napi: napi struct with our devices info in it
1954 * @budget: amount of work driver is allowed to do this pass, in packets
1955 *
1956 * This function will clean all queues associated with a q_vector.
1957 *
1958 * Returns the amount of work done
1959 **/
1960int i40e_napi_poll(struct napi_struct *napi, int budget)
1961{
1962 struct i40e_q_vector *q_vector =
1963 container_of(napi, struct i40e_q_vector, napi);
1964 struct i40e_vsi *vsi = q_vector->vsi;
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001965 struct i40e_ring *ring;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001966 bool clean_complete = true;
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001967 bool arm_wb = false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001968 int budget_per_ring;
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07001969 int work_done = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001970
1971 if (test_bit(__I40E_DOWN, &vsi->state)) {
1972 napi_complete(napi);
1973 return 0;
1974 }
1975
Kiran Patil9c6c1252015-11-06 15:26:02 -08001976 /* Clear hung_detected bit */
1977 clear_bit(I40E_Q_VECTOR_HUNG_DETECT, &q_vector->hung_detected);
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001978 /* Since the actual Tx work is minimal, we can give the Tx a larger
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001979 * budget and be more aggressive about cleaning up the Tx descriptors.
1980 */
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001981 i40e_for_each_ring(ring, q_vector->tx) {
Alexander Duycka619afe2016-03-07 09:30:03 -08001982 if (!i40e_clean_tx_irq(vsi, ring, budget)) {
Alexander Duyckf2edaaa2016-03-07 09:29:57 -08001983 clean_complete = false;
1984 continue;
1985 }
1986 arm_wb |= ring->arm_wb;
Jesse Brandeburg0deda862015-07-23 16:54:34 -04001987 ring->arm_wb = false;
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001988 }
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001989
Alexander Duyckc67cace2015-09-24 09:04:26 -07001990 /* Handle case where we are called by netpoll with a budget of 0 */
1991 if (budget <= 0)
1992 goto tx_only;
1993
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001994 /* We attempt to distribute budget to each Rx queue fairly, but don't
1995 * allow the budget to go below 1 because that would exit polling early.
1996 */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001997 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001998
Mitch Williamsa132af22015-01-24 09:58:35 +00001999 i40e_for_each_ring(ring, q_vector->rx) {
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002000 int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07002001
2002 work_done += cleaned;
Alexander Duyckf2edaaa2016-03-07 09:29:57 -08002003 /* if we clean as many as budgeted, we must not be done */
2004 if (cleaned >= budget_per_ring)
2005 clean_complete = false;
Mitch Williamsa132af22015-01-24 09:58:35 +00002006 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002007
2008 /* If work not completed, return budget and polling will return */
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00002009 if (!clean_complete) {
Alan Brady96db7762016-09-14 16:24:38 -07002010 const cpumask_t *aff_mask = &q_vector->affinity_mask;
2011 int cpu_id = smp_processor_id();
2012
2013 /* It is possible that the interrupt affinity has changed but,
2014 * if the cpu is pegged at 100%, polling will never exit while
2015 * traffic continues and the interrupt will be stuck on this
2016 * cpu. We check to make sure affinity is correct before we
2017 * continue to poll, otherwise we must stop polling so the
2018 * interrupt can move to the correct cpu.
2019 */
2020 if (likely(cpumask_test_cpu(cpu_id, aff_mask) ||
2021 !(vsi->back->flags & I40E_FLAG_MSIX_ENABLED))) {
Alexander Duyckc67cace2015-09-24 09:04:26 -07002022tx_only:
Alan Brady96db7762016-09-14 16:24:38 -07002023 if (arm_wb) {
2024 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2025 i40e_enable_wb_on_itr(vsi, q_vector);
2026 }
2027 return budget;
Anjali Singhai Jain164c9f52015-10-21 19:47:08 -04002028 }
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00002029 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002030
Anjali Singhai Jain8e0764b2015-06-05 12:20:30 -04002031 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
2032 q_vector->arm_wb_state = false;
2033
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002034 /* Work is done so exit the polling mode and re-enable the interrupt */
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07002035 napi_complete_done(napi, work_done);
Alan Brady96db7762016-09-14 16:24:38 -07002036
2037 /* If we're prematurely stopping polling to fix the interrupt
2038 * affinity we want to make sure polling starts back up so we
2039 * issue a call to i40e_force_wb which triggers a SW interrupt.
2040 */
2041 if (!clean_complete)
2042 i40e_force_wb(vsi, q_vector);
2043 else if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED))
Jesse Brandeburg40d72a52016-01-13 16:51:45 -08002044 i40e_irq_dynamic_enable_icr0(vsi->back, false);
Alan Brady96db7762016-09-14 16:24:38 -07002045 else
2046 i40e_update_enable_itr(vsi, q_vector);
2047
Alexander Duyck6beb84a2016-11-08 13:05:16 -08002048 return min(work_done, budget - 1);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002049}
2050
2051/**
2052 * i40e_atr - Add a Flow Director ATR filter
2053 * @tx_ring: ring to add programming descriptor to
2054 * @skb: send buffer
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002055 * @tx_flags: send tx flags
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002056 **/
2057static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002058 u32 tx_flags)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002059{
2060 struct i40e_filter_program_desc *fdir_desc;
2061 struct i40e_pf *pf = tx_ring->vsi->back;
2062 union {
2063 unsigned char *network;
2064 struct iphdr *ipv4;
2065 struct ipv6hdr *ipv6;
2066 } hdr;
2067 struct tcphdr *th;
2068 unsigned int hlen;
2069 u32 flex_ptype, dtype_cmd;
Alexander Duyckffcc55c2016-01-25 19:32:54 -08002070 int l4_proto;
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002071 u16 i;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002072
2073 /* make sure ATR is enabled */
Jesse Brandeburg60ea5f82014-01-17 15:36:34 -08002074 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002075 return;
2076
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00002077 if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
2078 return;
2079
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002080 /* if sampling is disabled do nothing */
2081 if (!tx_ring->atr_sample_rate)
2082 return;
2083
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002084 /* Currently only IPv4/IPv6 with TCP is supported */
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002085 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002086 return;
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002087
Alexander Duyckffcc55c2016-01-25 19:32:54 -08002088 /* snag network header to get L4 type and address */
2089 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2090 skb_inner_network_header(skb) : skb_network_header(skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002091
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002092 /* Note: tx_flags gets modified to reflect inner protocols in
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002093 * tx_enable_csum function if encap is enabled.
2094 */
Alexander Duyckffcc55c2016-01-25 19:32:54 -08002095 if (tx_flags & I40E_TX_FLAGS_IPV4) {
2096 /* access ihl as u8 to avoid unaligned access on ia64 */
2097 hlen = (hdr.network[0] & 0x0F) << 2;
2098 l4_proto = hdr.ipv4->protocol;
2099 } else {
2100 hlen = hdr.network - skb->data;
2101 l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
2102 hlen -= hdr.network - skb->data;
2103 }
2104
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002105 if (l4_proto != IPPROTO_TCP)
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002106 return;
2107
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002108 th = (struct tcphdr *)(hdr.network + hlen);
2109
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00002110 /* Due to lack of space, no more new filters can be programmed */
2111 if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
2112 return;
Anjali Singhai Jain72b74862016-01-08 17:50:21 -08002113 if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
2114 (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) {
Anjali Singhai Jain52eb95e2015-06-05 12:20:33 -04002115 /* HW ATR eviction will take care of removing filters on FIN
2116 * and RST packets.
2117 */
2118 if (th->fin || th->rst)
2119 return;
2120 }
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00002121
2122 tx_ring->atr_count++;
2123
Anjali Singhai Jaince806782014-03-06 08:59:54 +00002124 /* sample on all syn/fin/rst packets or once every atr sample rate */
2125 if (!th->fin &&
2126 !th->syn &&
2127 !th->rst &&
2128 (tx_ring->atr_count < tx_ring->atr_sample_rate))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002129 return;
2130
2131 tx_ring->atr_count = 0;
2132
2133 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002134 i = tx_ring->next_to_use;
2135 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2136
2137 i++;
2138 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002139
2140 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2141 I40E_TXD_FLTR_QW0_QINDEX_MASK;
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002142 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002143 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2144 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2145 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2146 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2147
2148 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2149
2150 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2151
Anjali Singhai Jaince806782014-03-06 08:59:54 +00002152 dtype_cmd |= (th->fin || th->rst) ?
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002153 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2154 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2155 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2156 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2157
2158 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2159 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2160
2161 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2162 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2163
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +00002164 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
Singhai, Anjali6a899022015-12-14 12:21:18 -08002165 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
Anjali Singhai Jain60ccd452015-04-16 20:06:01 -04002166 dtype_cmd |=
2167 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2168 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2169 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2170 else
2171 dtype_cmd |=
2172 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2173 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2174 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +00002175
Anjali Singhai Jain72b74862016-01-08 17:50:21 -08002176 if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
2177 (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)))
Anjali Singhai Jain52eb95e2015-06-05 12:20:33 -04002178 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2179
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002180 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
Jesse Brandeburg99753ea2014-06-04 04:22:49 +00002181 fdir_desc->rsvd = cpu_to_le32(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002182 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
Jesse Brandeburg99753ea2014-06-04 04:22:49 +00002183 fdir_desc->fd_id = cpu_to_le32(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002184}
2185
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002186/**
2187 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2188 * @skb: send buffer
2189 * @tx_ring: ring to send buffer on
2190 * @flags: the tx flags to be set
2191 *
2192 * Checks the skb and set up correspondingly several generic transmit flags
2193 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2194 *
2195 * Returns error code indicate the frame should be dropped upon error and the
2196 * otherwise returns 0 to indicate the flags has been set properly.
2197 **/
Vasu Dev38e00432014-08-01 13:27:03 -07002198#ifdef I40E_FCOE
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002199inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002200 struct i40e_ring *tx_ring,
2201 u32 *flags)
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002202#else
2203static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2204 struct i40e_ring *tx_ring,
2205 u32 *flags)
Vasu Dev38e00432014-08-01 13:27:03 -07002206#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002207{
2208 __be16 protocol = skb->protocol;
2209 u32 tx_flags = 0;
2210
Greg Rose31eaacc2015-03-31 00:45:03 -07002211 if (protocol == htons(ETH_P_8021Q) &&
2212 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2213 /* When HW VLAN acceleration is turned off by the user the
2214 * stack sets the protocol to 8021q so that the driver
2215 * can take any steps required to support the SW only
2216 * VLAN handling. In our case the driver doesn't need
2217 * to take any further steps so just set the protocol
2218 * to the encapsulated ethertype.
2219 */
2220 skb->protocol = vlan_get_protocol(skb);
2221 goto out;
2222 }
2223
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002224 /* if we have a HW VLAN tag being added, default to the HW one */
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01002225 if (skb_vlan_tag_present(skb)) {
2226 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002227 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2228 /* else if it is a SW VLAN, check the next protocol and store the tag */
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00002229 } else if (protocol == htons(ETH_P_8021Q)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002230 struct vlan_hdr *vhdr, _vhdr;
Jesse Brandeburg6995b362015-08-28 17:55:54 -04002231
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002232 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2233 if (!vhdr)
2234 return -EINVAL;
2235
2236 protocol = vhdr->h_vlan_encapsulated_proto;
2237 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2238 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2239 }
2240
Neerav Parikhd40d00b2015-02-24 06:58:40 +00002241 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2242 goto out;
2243
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002244 /* Insert 802.1p priority into VLAN header */
Vasu Dev38e00432014-08-01 13:27:03 -07002245 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2246 (skb->priority != TC_PRIO_CONTROL)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002247 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2248 tx_flags |= (skb->priority & 0x7) <<
2249 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2250 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2251 struct vlan_ethhdr *vhdr;
Francois Romieudd225bc2014-03-30 03:14:48 +00002252 int rc;
2253
2254 rc = skb_cow_head(skb, 0);
2255 if (rc < 0)
2256 return rc;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002257 vhdr = (struct vlan_ethhdr *)skb->data;
2258 vhdr->h_vlan_TCI = htons(tx_flags >>
2259 I40E_TX_FLAGS_VLAN_SHIFT);
2260 } else {
2261 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2262 }
2263 }
Neerav Parikhd40d00b2015-02-24 06:58:40 +00002264
2265out:
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002266 *flags = tx_flags;
2267 return 0;
2268}
2269
2270/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002271 * i40e_tso - set up the tso context descriptor
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002272 * @first: pointer to first Tx buffer for xmit
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002273 * @hdr_len: ptr to the size of the packet header
Shannon Nelson9c883bd2015-10-21 19:47:02 -04002274 * @cd_type_cmd_tso_mss: Quad Word 1
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002275 *
2276 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2277 **/
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002278static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
2279 u64 *cd_type_cmd_tso_mss)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002280{
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002281 struct sk_buff *skb = first->skb;
Alexander Duyck03f9d6a2016-01-24 21:16:20 -08002282 u64 cd_cmd, cd_tso_len, cd_mss;
Alexander Duyckc7770192016-01-24 21:16:35 -08002283 union {
2284 struct iphdr *v4;
2285 struct ipv6hdr *v6;
2286 unsigned char *hdr;
2287 } ip;
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002288 union {
2289 struct tcphdr *tcp;
Alexander Duyck54532052016-01-24 21:17:29 -08002290 struct udphdr *udp;
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002291 unsigned char *hdr;
2292 } l4;
2293 u32 paylen, l4_offset;
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002294 u16 gso_segs, gso_size;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002295 int err;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002296
Shannon Nelsone9f65632016-01-04 10:33:04 -08002297 if (skb->ip_summed != CHECKSUM_PARTIAL)
2298 return 0;
2299
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002300 if (!skb_is_gso(skb))
2301 return 0;
2302
Francois Romieudd225bc2014-03-30 03:14:48 +00002303 err = skb_cow_head(skb, 0);
2304 if (err < 0)
2305 return err;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002306
Alexander Duyckc7770192016-01-24 21:16:35 -08002307 ip.hdr = skb_network_header(skb);
2308 l4.hdr = skb_transport_header(skb);
Anjali Singhaidf230752014-12-19 02:58:16 +00002309
Alexander Duyckc7770192016-01-24 21:16:35 -08002310 /* initialize outer IP header fields */
2311 if (ip.v4->version == 4) {
2312 ip.v4->tot_len = 0;
2313 ip.v4->check = 0;
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002314 } else {
Alexander Duyckc7770192016-01-24 21:16:35 -08002315 ip.v6->payload_len = 0;
2316 }
2317
Alexander Duyck577389a2016-04-02 00:06:56 -07002318 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04002319 SKB_GSO_GRE_CSUM |
Tom Herbert7e133182016-05-18 09:06:10 -07002320 SKB_GSO_IPXIP4 |
Alexander Duyckbf2d1df2016-05-18 10:44:53 -07002321 SKB_GSO_IPXIP6 |
Alexander Duyck577389a2016-04-02 00:06:56 -07002322 SKB_GSO_UDP_TUNNEL |
Alexander Duyck54532052016-01-24 21:17:29 -08002323 SKB_GSO_UDP_TUNNEL_CSUM)) {
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04002324 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2325 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2326 l4.udp->len = 0;
2327
Alexander Duyck54532052016-01-24 21:17:29 -08002328 /* determine offset of outer transport header */
2329 l4_offset = l4.hdr - skb->data;
2330
2331 /* remove payload length from outer checksum */
Alexander Duyck24d41e52016-03-18 16:06:47 -07002332 paylen = skb->len - l4_offset;
2333 csum_replace_by_diff(&l4.udp->check, htonl(paylen));
Alexander Duyck54532052016-01-24 21:17:29 -08002334 }
2335
Alexander Duyckc7770192016-01-24 21:16:35 -08002336 /* reset pointers to inner headers */
2337 ip.hdr = skb_inner_network_header(skb);
2338 l4.hdr = skb_inner_transport_header(skb);
2339
2340 /* initialize inner IP header fields */
2341 if (ip.v4->version == 4) {
2342 ip.v4->tot_len = 0;
2343 ip.v4->check = 0;
2344 } else {
2345 ip.v6->payload_len = 0;
2346 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002347 }
2348
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002349 /* determine offset of inner transport header */
2350 l4_offset = l4.hdr - skb->data;
2351
2352 /* remove payload length from inner checksum */
Alexander Duyck24d41e52016-03-18 16:06:47 -07002353 paylen = skb->len - l4_offset;
2354 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002355
2356 /* compute length of segmentation header */
2357 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002358
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002359 /* pull values out of skb_shinfo */
2360 gso_size = skb_shinfo(skb)->gso_size;
2361 gso_segs = skb_shinfo(skb)->gso_segs;
2362
2363 /* update GSO size and bytecount with header size */
2364 first->gso_segs = gso_segs;
2365 first->bytecount += (first->gso_segs - 1) * *hdr_len;
2366
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002367 /* find the field values */
2368 cd_cmd = I40E_TX_CTX_DESC_TSO;
2369 cd_tso_len = skb->len - *hdr_len;
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002370 cd_mss = gso_size;
Alexander Duyck03f9d6a2016-01-24 21:16:20 -08002371 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2372 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2373 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002374 return 1;
2375}
2376
2377/**
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002378 * i40e_tsyn - set up the tsyn context descriptor
2379 * @tx_ring: ptr to the ring to send
2380 * @skb: ptr to the skb we're sending
2381 * @tx_flags: the collected send information
Shannon Nelson9c883bd2015-10-21 19:47:02 -04002382 * @cd_type_cmd_tso_mss: Quad Word 1
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002383 *
2384 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2385 **/
2386static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2387 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2388{
2389 struct i40e_pf *pf;
2390
2391 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2392 return 0;
2393
2394 /* Tx timestamps cannot be sampled when doing TSO */
2395 if (tx_flags & I40E_TX_FLAGS_TSO)
2396 return 0;
2397
2398 /* only timestamp the outbound packet if the user has requested it and
2399 * we are not already transmitting a packet to be timestamped
2400 */
2401 pf = i40e_netdev_to_pf(tx_ring->netdev);
Jacob Keller22b47772014-12-14 01:55:09 +00002402 if (!(pf->flags & I40E_FLAG_PTP))
2403 return 0;
2404
Jakub Kicinski9ce34f02014-03-15 14:55:42 +00002405 if (pf->ptp_tx &&
2406 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002407 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2408 pf->ptp_tx_skb = skb_get(skb);
2409 } else {
2410 return 0;
2411 }
2412
2413 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
2414 I40E_TXD_CTX_QW1_CMD_SHIFT;
2415
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002416 return 1;
2417}
2418
2419/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002420 * i40e_tx_enable_csum - Enable Tx checksum offloads
2421 * @skb: send buffer
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002422 * @tx_flags: pointer to Tx flags currently set
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002423 * @td_cmd: Tx descriptor command bits to set
2424 * @td_offset: Tx descriptor header offsets to set
Jean Sacren554f4542015-10-13 01:06:28 -06002425 * @tx_ring: Tx descriptor ring
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002426 * @cd_tunneling: ptr to context desc bits
2427 **/
Alexander Duyck529f1f62016-01-24 21:17:10 -08002428static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
2429 u32 *td_cmd, u32 *td_offset,
2430 struct i40e_ring *tx_ring,
2431 u32 *cd_tunneling)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002432{
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002433 union {
2434 struct iphdr *v4;
2435 struct ipv6hdr *v6;
2436 unsigned char *hdr;
2437 } ip;
2438 union {
2439 struct tcphdr *tcp;
2440 struct udphdr *udp;
2441 unsigned char *hdr;
2442 } l4;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002443 unsigned char *exthdr;
Jesse Brandeburgd1bd7432016-04-01 03:56:04 -07002444 u32 offset, cmd = 0;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002445 __be16 frag_off;
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002446 u8 l4_proto = 0;
2447
Alexander Duyck529f1f62016-01-24 21:17:10 -08002448 if (skb->ip_summed != CHECKSUM_PARTIAL)
2449 return 0;
2450
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002451 ip.hdr = skb_network_header(skb);
2452 l4.hdr = skb_transport_header(skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002453
Alexander Duyck475b4202016-01-24 21:17:01 -08002454 /* compute outer L2 header size */
2455 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
2456
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002457 if (skb->encapsulation) {
Jesse Brandeburgd1bd7432016-04-01 03:56:04 -07002458 u32 tunnel = 0;
Alexander Duycka0064722016-01-24 21:16:48 -08002459 /* define outer network header type */
2460 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
Alexander Duyck475b4202016-01-24 21:17:01 -08002461 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
2462 I40E_TX_CTX_EXT_IP_IPV4 :
2463 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2464
Alexander Duycka0064722016-01-24 21:16:48 -08002465 l4_proto = ip.v4->protocol;
2466 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
Alexander Duyck475b4202016-01-24 21:17:01 -08002467 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002468
2469 exthdr = ip.hdr + sizeof(*ip.v6);
Alexander Duycka0064722016-01-24 21:16:48 -08002470 l4_proto = ip.v6->nexthdr;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002471 if (l4.hdr != exthdr)
2472 ipv6_skip_exthdr(skb, exthdr - skb->data,
2473 &l4_proto, &frag_off);
Alexander Duycka0064722016-01-24 21:16:48 -08002474 }
2475
2476 /* define outer transport */
2477 switch (l4_proto) {
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002478 case IPPROTO_UDP:
Alexander Duyck475b4202016-01-24 21:17:01 -08002479 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
Singhai, Anjali6a899022015-12-14 12:21:18 -08002480 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002481 break;
Shannon Nelsonc1d17912015-09-25 19:26:04 +00002482 case IPPROTO_GRE:
Alexander Duyck475b4202016-01-24 21:17:01 -08002483 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
Alexander Duycka0064722016-01-24 21:16:48 -08002484 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
Shannon Nelsonc1d17912015-09-25 19:26:04 +00002485 break;
Alexander Duyck577389a2016-04-02 00:06:56 -07002486 case IPPROTO_IPIP:
2487 case IPPROTO_IPV6:
2488 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2489 l4.hdr = skb_inner_network_header(skb);
2490 break;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002491 default:
Alexander Duyck529f1f62016-01-24 21:17:10 -08002492 if (*tx_flags & I40E_TX_FLAGS_TSO)
2493 return -1;
2494
2495 skb_checksum_help(skb);
2496 return 0;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002497 }
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002498
Alexander Duyck577389a2016-04-02 00:06:56 -07002499 /* compute outer L3 header size */
2500 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
2501 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
2502
2503 /* switch IP header pointer from outer to inner header */
2504 ip.hdr = skb_inner_network_header(skb);
2505
Alexander Duyck475b4202016-01-24 21:17:01 -08002506 /* compute tunnel header size */
2507 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
2508 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
2509
Alexander Duyck54532052016-01-24 21:17:29 -08002510 /* indicate if we need to offload outer UDP header */
2511 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04002512 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
Alexander Duyck54532052016-01-24 21:17:29 -08002513 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
2514 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2515
Alexander Duyck475b4202016-01-24 21:17:01 -08002516 /* record tunnel offload values */
2517 *cd_tunneling |= tunnel;
2518
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002519 /* switch L4 header pointer from outer to inner */
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002520 l4.hdr = skb_inner_transport_header(skb);
Alexander Duycka0064722016-01-24 21:16:48 -08002521 l4_proto = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002522
Alexander Duycka0064722016-01-24 21:16:48 -08002523 /* reset type as we transition from outer to inner headers */
2524 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
2525 if (ip.v4->version == 4)
2526 *tx_flags |= I40E_TX_FLAGS_IPV4;
2527 if (ip.v6->version == 6)
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002528 *tx_flags |= I40E_TX_FLAGS_IPV6;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002529 }
2530
2531 /* Enable IP checksum offloads */
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002532 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002533 l4_proto = ip.v4->protocol;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002534 /* the stack computes the IP header already, the only time we
2535 * need the hardware to recompute it is in the case of TSO.
2536 */
Alexander Duyck475b4202016-01-24 21:17:01 -08002537 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
2538 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
2539 I40E_TX_DESC_CMD_IIPT_IPV4;
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002540 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
Alexander Duyck475b4202016-01-24 21:17:01 -08002541 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002542
2543 exthdr = ip.hdr + sizeof(*ip.v6);
2544 l4_proto = ip.v6->nexthdr;
2545 if (l4.hdr != exthdr)
2546 ipv6_skip_exthdr(skb, exthdr - skb->data,
2547 &l4_proto, &frag_off);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002548 }
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002549
Alexander Duyck475b4202016-01-24 21:17:01 -08002550 /* compute inner L3 header size */
2551 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002552
2553 /* Enable L4 checksum offloads */
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002554 switch (l4_proto) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002555 case IPPROTO_TCP:
2556 /* enable checksum offloads */
Alexander Duyck475b4202016-01-24 21:17:01 -08002557 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
2558 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002559 break;
2560 case IPPROTO_SCTP:
2561 /* enable SCTP checksum offload */
Alexander Duyck475b4202016-01-24 21:17:01 -08002562 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
2563 offset |= (sizeof(struct sctphdr) >> 2) <<
2564 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002565 break;
2566 case IPPROTO_UDP:
2567 /* enable UDP checksum offload */
Alexander Duyck475b4202016-01-24 21:17:01 -08002568 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
2569 offset |= (sizeof(struct udphdr) >> 2) <<
2570 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002571 break;
2572 default:
Alexander Duyck529f1f62016-01-24 21:17:10 -08002573 if (*tx_flags & I40E_TX_FLAGS_TSO)
2574 return -1;
2575 skb_checksum_help(skb);
2576 return 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002577 }
Alexander Duyck475b4202016-01-24 21:17:01 -08002578
2579 *td_cmd |= cmd;
2580 *td_offset |= offset;
Alexander Duyck529f1f62016-01-24 21:17:10 -08002581
2582 return 1;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002583}
2584
2585/**
2586 * i40e_create_tx_ctx Build the Tx context descriptor
2587 * @tx_ring: ring to create the descriptor on
2588 * @cd_type_cmd_tso_mss: Quad Word 1
2589 * @cd_tunneling: Quad Word 0 - bits 0-31
2590 * @cd_l2tag2: Quad Word 0 - bits 32-63
2591 **/
2592static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
2593 const u64 cd_type_cmd_tso_mss,
2594 const u32 cd_tunneling, const u32 cd_l2tag2)
2595{
2596 struct i40e_tx_context_desc *context_desc;
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002597 int i = tx_ring->next_to_use;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002598
Jesse Brandeburgff40dd52014-02-14 02:14:41 +00002599 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
2600 !cd_tunneling && !cd_l2tag2)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002601 return;
2602
2603 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002604 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
2605
2606 i++;
2607 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002608
2609 /* cpu_to_le32 and assign to struct fields */
2610 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2611 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
Jesse Brandeburg3efbbb22014-06-04 20:41:54 +00002612 context_desc->rsvd = cpu_to_le16(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002613 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2614}
2615
2616/**
Eric Dumazet4567dc12014-10-07 13:30:23 -07002617 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2618 * @tx_ring: the ring to be checked
2619 * @size: the size buffer we want to assure is available
2620 *
2621 * Returns -EBUSY if a stop is needed, else 0
2622 **/
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002623int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
Eric Dumazet4567dc12014-10-07 13:30:23 -07002624{
2625 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2626 /* Memory barrier before checking head and tail */
2627 smp_mb();
2628
2629 /* Check again in a case another CPU has just made room available. */
2630 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2631 return -EBUSY;
2632
2633 /* A reprieve! - use start_queue because it doesn't call schedule */
2634 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2635 ++tx_ring->tx_stats.restart_queue;
2636 return 0;
2637}
2638
2639/**
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002640 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
Anjali Singhai71da6192015-02-21 06:42:35 +00002641 * @skb: send buffer
Anjali Singhai71da6192015-02-21 06:42:35 +00002642 *
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002643 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
2644 * and so we need to figure out the cases where we need to linearize the skb.
2645 *
2646 * For TSO we need to count the TSO header and segment payload separately.
2647 * As such we need to check cases where we have 7 fragments or more as we
2648 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2649 * the segment payload in the first descriptor, and another 7 for the
2650 * fragments.
Anjali Singhai71da6192015-02-21 06:42:35 +00002651 **/
Alexander Duyck2d374902016-02-17 11:02:50 -08002652bool __i40e_chk_linearize(struct sk_buff *skb)
Anjali Singhai71da6192015-02-21 06:42:35 +00002653{
Alexander Duyck2d374902016-02-17 11:02:50 -08002654 const struct skb_frag_struct *frag, *stale;
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002655 int nr_frags, sum;
Anjali Singhai71da6192015-02-21 06:42:35 +00002656
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002657 /* no need to check if number of frags is less than 7 */
Alexander Duyck2d374902016-02-17 11:02:50 -08002658 nr_frags = skb_shinfo(skb)->nr_frags;
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002659 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
Alexander Duyck2d374902016-02-17 11:02:50 -08002660 return false;
Anjali Singhai71da6192015-02-21 06:42:35 +00002661
Alexander Duyck2d374902016-02-17 11:02:50 -08002662 /* We need to walk through the list and validate that each group
Alexander Duyck841493a2016-09-06 18:05:04 -07002663 * of 6 fragments totals at least gso_size.
Alexander Duyck2d374902016-02-17 11:02:50 -08002664 */
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002665 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
Alexander Duyck2d374902016-02-17 11:02:50 -08002666 frag = &skb_shinfo(skb)->frags[0];
2667
2668 /* Initialize size to the negative value of gso_size minus 1. We
2669 * use this as the worst case scenerio in which the frag ahead
2670 * of us only provides one byte which is why we are limited to 6
2671 * descriptors for a single transmit as the header and previous
2672 * fragment are already consuming 2 descriptors.
2673 */
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002674 sum = 1 - skb_shinfo(skb)->gso_size;
Alexander Duyck2d374902016-02-17 11:02:50 -08002675
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002676 /* Add size of frags 0 through 4 to create our initial sum */
2677 sum += skb_frag_size(frag++);
2678 sum += skb_frag_size(frag++);
2679 sum += skb_frag_size(frag++);
2680 sum += skb_frag_size(frag++);
2681 sum += skb_frag_size(frag++);
Alexander Duyck2d374902016-02-17 11:02:50 -08002682
2683 /* Walk through fragments adding latest fragment, testing it, and
2684 * then removing stale fragments from the sum.
2685 */
2686 stale = &skb_shinfo(skb)->frags[0];
2687 for (;;) {
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002688 sum += skb_frag_size(frag++);
Alexander Duyck2d374902016-02-17 11:02:50 -08002689
2690 /* if sum is negative we failed to make sufficient progress */
2691 if (sum < 0)
2692 return true;
2693
Alexander Duyck841493a2016-09-06 18:05:04 -07002694 if (!nr_frags--)
Alexander Duyck2d374902016-02-17 11:02:50 -08002695 break;
2696
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002697 sum -= skb_frag_size(stale++);
Anjali Singhai71da6192015-02-21 06:42:35 +00002698 }
2699
Alexander Duyck2d374902016-02-17 11:02:50 -08002700 return false;
Anjali Singhai71da6192015-02-21 06:42:35 +00002701}
2702
2703/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002704 * i40e_tx_map - Build the Tx descriptor
2705 * @tx_ring: ring to send buffer on
2706 * @skb: send buffer
2707 * @first: first buffer info buffer to use
2708 * @tx_flags: collected send information
2709 * @hdr_len: size of the packet header
2710 * @td_cmd: the command field in the descriptor
2711 * @td_offset: offset for checksum or crc
2712 **/
Vasu Dev38e00432014-08-01 13:27:03 -07002713#ifdef I40E_FCOE
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002714inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002715 struct i40e_tx_buffer *first, u32 tx_flags,
2716 const u8 hdr_len, u32 td_cmd, u32 td_offset)
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002717#else
2718static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2719 struct i40e_tx_buffer *first, u32 tx_flags,
2720 const u8 hdr_len, u32 td_cmd, u32 td_offset)
Vasu Dev38e00432014-08-01 13:27:03 -07002721#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002722{
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002723 unsigned int data_len = skb->data_len;
2724 unsigned int size = skb_headlen(skb);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002725 struct skb_frag_struct *frag;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002726 struct i40e_tx_buffer *tx_bi;
2727 struct i40e_tx_desc *tx_desc;
Alexander Duycka5e9c572013-09-28 06:00:27 +00002728 u16 i = tx_ring->next_to_use;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002729 u32 td_tag = 0;
2730 dma_addr_t dma;
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002731 u16 desc_count = 1;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002732
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002733 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
2734 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
2735 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
2736 I40E_TX_FLAGS_VLAN_SHIFT;
2737 }
2738
Alexander Duycka5e9c572013-09-28 06:00:27 +00002739 first->tx_flags = tx_flags;
2740
2741 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2742
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002743 tx_desc = I40E_TX_DESC(tx_ring, i);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002744 tx_bi = first;
2745
2746 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002747 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
2748
Alexander Duycka5e9c572013-09-28 06:00:27 +00002749 if (dma_mapping_error(tx_ring->dev, dma))
2750 goto dma_error;
2751
2752 /* record length, and DMA address */
2753 dma_unmap_len_set(tx_bi, len, size);
2754 dma_unmap_addr_set(tx_bi, dma, dma);
2755
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002756 /* align size to end of page */
2757 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002758 tx_desc->buffer_addr = cpu_to_le64(dma);
2759
2760 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002761 tx_desc->cmd_type_offset_bsz =
2762 build_ctob(td_cmd, td_offset,
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002763 max_data, td_tag);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002764
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002765 tx_desc++;
2766 i++;
Anjali Singhai58044742015-09-25 18:26:13 -07002767 desc_count++;
2768
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002769 if (i == tx_ring->count) {
2770 tx_desc = I40E_TX_DESC(tx_ring, 0);
2771 i = 0;
2772 }
Alexander Duycka5e9c572013-09-28 06:00:27 +00002773
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002774 dma += max_data;
2775 size -= max_data;
Alexander Duycka5e9c572013-09-28 06:00:27 +00002776
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002777 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
Alexander Duycka5e9c572013-09-28 06:00:27 +00002778 tx_desc->buffer_addr = cpu_to_le64(dma);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002779 }
2780
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002781 if (likely(!data_len))
2782 break;
2783
Alexander Duycka5e9c572013-09-28 06:00:27 +00002784 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2785 size, td_tag);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002786
2787 tx_desc++;
2788 i++;
Anjali Singhai58044742015-09-25 18:26:13 -07002789 desc_count++;
2790
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002791 if (i == tx_ring->count) {
2792 tx_desc = I40E_TX_DESC(tx_ring, 0);
2793 i = 0;
2794 }
2795
Alexander Duycka5e9c572013-09-28 06:00:27 +00002796 size = skb_frag_size(frag);
2797 data_len -= size;
2798
2799 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2800 DMA_TO_DEVICE);
2801
2802 tx_bi = &tx_ring->tx_bi[i];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002803 }
2804
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002805 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002806
2807 i++;
2808 if (i == tx_ring->count)
2809 i = 0;
2810
2811 tx_ring->next_to_use = i;
2812
Eric Dumazet4567dc12014-10-07 13:30:23 -07002813 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
Anjali Singhai58044742015-09-25 18:26:13 -07002814
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002815 /* write last descriptor with EOP bit */
2816 td_cmd |= I40E_TX_DESC_CMD_EOP;
2817
2818 /* We can OR these values together as they both are checked against
2819 * 4 below and at this point desc_count will be used as a boolean value
2820 * after this if/else block.
2821 */
2822 desc_count |= ++tx_ring->packet_stride;
2823
Anjali Singhai58044742015-09-25 18:26:13 -07002824 /* Algorithm to optimize tail and RS bit setting:
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002825 * if queue is stopped
2826 * mark RS bit
2827 * reset packet counter
2828 * else if xmit_more is supported and is true
2829 * advance packet counter to 4
2830 * reset desc_count to 0
Anjali Singhai58044742015-09-25 18:26:13 -07002831 *
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002832 * if desc_count >= 4
2833 * mark RS bit
2834 * reset packet counter
2835 * if desc_count > 0
2836 * update tail
Anjali Singhai58044742015-09-25 18:26:13 -07002837 *
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002838 * Note: If there are less than 4 descriptors
Anjali Singhai58044742015-09-25 18:26:13 -07002839 * pending and interrupts were disabled the service task will
2840 * trigger a force WB.
2841 */
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002842 if (netif_xmit_stopped(txring_txq(tx_ring))) {
2843 goto do_rs;
2844 } else if (skb->xmit_more) {
2845 /* set stride to arm on next packet and reset desc_count */
2846 tx_ring->packet_stride = WB_STRIDE;
2847 desc_count = 0;
2848 } else if (desc_count >= WB_STRIDE) {
2849do_rs:
2850 /* write last descriptor with RS bit set */
2851 td_cmd |= I40E_TX_DESC_CMD_RS;
Anjali Singhai58044742015-09-25 18:26:13 -07002852 tx_ring->packet_stride = 0;
Anjali Singhai58044742015-09-25 18:26:13 -07002853 }
Anjali Singhai58044742015-09-25 18:26:13 -07002854
2855 tx_desc->cmd_type_offset_bsz =
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002856 build_ctob(td_cmd, td_offset, size, td_tag);
2857
2858 /* Force memory writes to complete before letting h/w know there
2859 * are new descriptors to fetch.
2860 *
2861 * We also use this memory barrier to make certain all of the
2862 * status bits have been updated before next_to_watch is written.
2863 */
2864 wmb();
2865
2866 /* set next_to_watch value indicating a packet is present */
2867 first->next_to_watch = tx_desc;
Anjali Singhai58044742015-09-25 18:26:13 -07002868
Alexander Duycka5e9c572013-09-28 06:00:27 +00002869 /* notify HW of packet */
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002870 if (desc_count) {
Anjali Singhai58044742015-09-25 18:26:13 -07002871 writel(i, tx_ring->tail);
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002872
2873 /* we need this if more than one processor can write to our tail
2874 * at a time, it synchronizes IO on IA64/Altix systems
2875 */
2876 mmiowb();
Anjali Singhai58044742015-09-25 18:26:13 -07002877 }
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002878
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002879 return;
2880
2881dma_error:
Alexander Duycka5e9c572013-09-28 06:00:27 +00002882 dev_info(tx_ring->dev, "TX DMA map failed\n");
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002883
2884 /* clear dma mappings for failed tx_bi map */
2885 for (;;) {
2886 tx_bi = &tx_ring->tx_bi[i];
Alexander Duycka5e9c572013-09-28 06:00:27 +00002887 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002888 if (tx_bi == first)
2889 break;
2890 if (i == 0)
2891 i = tx_ring->count;
2892 i--;
2893 }
2894
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002895 tx_ring->next_to_use = i;
2896}
2897
2898/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002899 * i40e_xmit_frame_ring - Sends buffer on Tx ring
2900 * @skb: send buffer
2901 * @tx_ring: ring to send buffer on
2902 *
2903 * Returns NETDEV_TX_OK if sent, else an error code
2904 **/
2905static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2906 struct i40e_ring *tx_ring)
2907{
2908 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
2909 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2910 struct i40e_tx_buffer *first;
2911 u32 td_offset = 0;
2912 u32 tx_flags = 0;
2913 __be16 protocol;
2914 u32 td_cmd = 0;
2915 u8 hdr_len = 0;
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002916 int tso, count;
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002917 int tsyn;
Jesse Brandeburg6995b362015-08-28 17:55:54 -04002918
Jesse Brandeburgb74118f2015-10-26 19:44:30 -04002919 /* prefetch the data, we'll need it later */
2920 prefetch(skb->data);
2921
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002922 count = i40e_xmit_descriptor_count(skb);
Alexander Duyck2d374902016-02-17 11:02:50 -08002923 if (i40e_chk_linearize(skb, count)) {
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002924 if (__skb_linearize(skb)) {
2925 dev_kfree_skb_any(skb);
2926 return NETDEV_TX_OK;
2927 }
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002928 count = i40e_txd_use_count(skb->len);
Alexander Duyck2d374902016-02-17 11:02:50 -08002929 tx_ring->tx_stats.tx_linearize++;
2930 }
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002931
2932 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2933 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
2934 * + 4 desc gap to avoid the cache line where head is,
2935 * + 1 desc for context descriptor,
2936 * otherwise try next time
2937 */
2938 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2939 tx_ring->tx_stats.tx_busy++;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002940 return NETDEV_TX_BUSY;
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002941 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002942
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002943 /* record the location of the first descriptor for this packet */
2944 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2945 first->skb = skb;
2946 first->bytecount = skb->len;
2947 first->gso_segs = 1;
2948
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002949 /* prepare the xmit flags */
2950 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2951 goto out_drop;
2952
2953 /* obtain protocol of skb */
Vlad Yasevich3d34dd02014-08-25 10:34:52 -04002954 protocol = vlan_get_protocol(skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002955
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002956 /* setup IPv4/IPv6 offloads */
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00002957 if (protocol == htons(ETH_P_IP))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002958 tx_flags |= I40E_TX_FLAGS_IPV4;
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00002959 else if (protocol == htons(ETH_P_IPV6))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002960 tx_flags |= I40E_TX_FLAGS_IPV6;
2961
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002962 tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002963
2964 if (tso < 0)
2965 goto out_drop;
2966 else if (tso)
2967 tx_flags |= I40E_TX_FLAGS_TSO;
2968
Alexander Duyck3bc67972016-02-17 11:02:56 -08002969 /* Always offload the checksum, since it's in the data descriptor */
2970 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2971 tx_ring, &cd_tunneling);
2972 if (tso < 0)
2973 goto out_drop;
2974
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002975 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
2976
2977 if (tsyn)
2978 tx_flags |= I40E_TX_FLAGS_TSYN;
2979
Jakub Kicinski259afec2014-03-15 14:55:37 +00002980 skb_tx_timestamp(skb);
2981
Alexander Duyckb1941302013-09-28 06:00:32 +00002982 /* always enable CRC insertion offload */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002983 td_cmd |= I40E_TX_DESC_CMD_ICRC;
2984
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002985 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2986 cd_tunneling, cd_l2tag2);
2987
2988 /* Add Flow Director ATR if it's enabled.
2989 *
2990 * NOTE: this must always be directly before the data descriptor.
2991 */
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002992 i40e_atr(tx_ring, skb, tx_flags);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002993
2994 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2995 td_cmd, td_offset);
2996
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002997 return NETDEV_TX_OK;
2998
2999out_drop:
Alexander Duyck52ea3e82016-11-28 16:05:59 -08003000 dev_kfree_skb_any(first->skb);
3001 first->skb = NULL;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003002 return NETDEV_TX_OK;
3003}
3004
3005/**
3006 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
3007 * @skb: send buffer
3008 * @netdev: network interface device structure
3009 *
3010 * Returns NETDEV_TX_OK if sent, else an error code
3011 **/
3012netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3013{
3014 struct i40e_netdev_priv *np = netdev_priv(netdev);
3015 struct i40e_vsi *vsi = np->vsi;
Alexander Duyck9f65e152013-09-28 06:00:58 +00003016 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003017
3018 /* hardware can't handle really short frames, hardware padding works
3019 * beyond this point
3020 */
Alexander Duycka94d9e22014-12-03 08:17:39 -08003021 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
3022 return NETDEV_TX_OK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003023
3024 return i40e_xmit_frame_ring(skb, tx_ring);
3025}