blob: 7ada05e7776ff6ab292e19e1fc87ccbeed9abdc1 [file] [log] [blame]
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -08004 * Copyright(c) 2013 - 2016 Intel Corporation.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
Greg Rosedc641b72013-12-18 13:45:51 +000015 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000017 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
Mitch Williams1c112a62014-04-04 04:43:06 +000027#include <linux/prefetch.h>
Mitch Williamsa132af22015-01-24 09:58:35 +000028#include <net/busy_poll.h>
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000029#include "i40e.h"
Jesse Brandeburg206812b2014-02-12 01:45:33 +000030#include "i40e_prototype.h"
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000031
32static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
33 u32 td_tag)
34{
35 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
36 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
37 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
38 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
39 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
40}
41
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +000042#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000043#define I40E_FD_CLEAN_DELAY 10
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000044/**
45 * i40e_program_fdir_filter - Program a Flow Director filter
Joseph Gasparakis17a73f62014-02-12 01:45:30 +000046 * @fdir_data: Packet data that will be filter parameters
47 * @raw_packet: the pre-allocated packet buffer for FDir
Jeff Kirsherb40c82e2015-02-27 09:18:34 +000048 * @pf: The PF pointer
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000049 * @add: True for add/update, False for remove
50 **/
Joseph Gasparakis17a73f62014-02-12 01:45:30 +000051int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000052 struct i40e_pf *pf, bool add)
53{
54 struct i40e_filter_program_desc *fdir_desc;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000055 struct i40e_tx_buffer *tx_buf, *first;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000056 struct i40e_tx_desc *tx_desc;
57 struct i40e_ring *tx_ring;
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +000058 unsigned int fpt, dcc;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000059 struct i40e_vsi *vsi;
60 struct device *dev;
61 dma_addr_t dma;
62 u32 td_cmd = 0;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000063 u16 delay = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000064 u16 i;
65
66 /* find existing FDIR VSI */
67 vsi = NULL;
Mitch Williams505682c2014-05-20 08:01:37 +000068 for (i = 0; i < pf->num_alloc_vsi; i++)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000069 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
70 vsi = pf->vsi[i];
71 if (!vsi)
72 return -ENOENT;
73
Alexander Duyck9f65e15b2013-09-28 06:00:58 +000074 tx_ring = vsi->tx_rings[0];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000075 dev = tx_ring->dev;
76
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000077 /* we need two descriptors to add/del a filter and we can wait */
78 do {
79 if (I40E_DESC_UNUSED(tx_ring) > 1)
80 break;
81 msleep_interruptible(1);
82 delay++;
83 } while (delay < I40E_FD_CLEAN_DELAY);
84
85 if (!(I40E_DESC_UNUSED(tx_ring) > 1))
86 return -EAGAIN;
87
Joseph Gasparakis17a73f62014-02-12 01:45:30 +000088 dma = dma_map_single(dev, raw_packet,
89 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000090 if (dma_mapping_error(dev, dma))
91 goto dma_fail;
92
93 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +000094 i = tx_ring->next_to_use;
95 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000096 first = &tx_ring->tx_bi[i];
97 memset(first, 0, sizeof(struct i40e_tx_buffer));
Alexander Duyckfc4ac672013-09-28 06:00:22 +000098
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000099 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000100
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000101 fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
102 I40E_TXD_FLTR_QW0_QINDEX_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000103
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000104 fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
105 I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000106
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000107 fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
108 I40E_TXD_FLTR_QW0_PCTYPE_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000109
110 /* Use LAN VSI Id if not programmed by user */
111 if (fdir_data->dest_vsi == 0)
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000112 fpt |= (pf->vsi[pf->lan_vsi]->id) <<
113 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000114 else
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000115 fpt |= ((u32)fdir_data->dest_vsi <<
116 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
117 I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000118
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000119 dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000120
121 if (add)
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000122 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
123 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000124 else
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000125 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
126 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000127
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000128 dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
129 I40E_TXD_FLTR_QW1_DEST_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000130
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000131 dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
132 I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000133
134 if (fdir_data->cnt_index != 0) {
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000135 dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
136 dcc |= ((u32)fdir_data->cnt_index <<
137 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +0000138 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000139 }
140
Jesse Brandeburg99753ea2014-06-04 04:22:49 +0000141 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
142 fdir_desc->rsvd = cpu_to_le32(0);
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000143 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000144 fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
145
146 /* Now program a dummy descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000147 i = tx_ring->next_to_use;
148 tx_desc = I40E_TX_DESC(tx_ring, i);
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000149 tx_buf = &tx_ring->tx_bi[i];
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000150
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000151 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
152
153 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000154
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000155 /* record length, and DMA address */
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000156 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000157 dma_unmap_addr_set(tx_buf, dma, dma);
158
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000159 tx_desc->buffer_addr = cpu_to_le64(dma);
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000160 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000161
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000162 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
163 tx_buf->raw_buf = (void *)raw_packet;
164
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000165 tx_desc->cmd_type_offset_bsz =
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000166 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000167
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000168 /* Force memory writes to complete before letting h/w
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000169 * know there are new descriptors to fetch.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000170 */
171 wmb();
172
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000173 /* Mark the data descriptor to be watched */
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000174 first->next_to_watch = tx_desc;
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000175
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000176 writel(tx_ring->next_to_use, tx_ring->tail);
177 return 0;
178
179dma_fail:
180 return -1;
181}
182
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000183#define IP_HEADER_OFFSET 14
184#define I40E_UDPIP_DUMMY_PACKET_LEN 42
185/**
186 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
187 * @vsi: pointer to the targeted VSI
188 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000189 * @add: true adds a filter, false removes it
190 *
191 * Returns 0 if the filters were successfully added or removed
192 **/
193static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
194 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000195 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000196{
197 struct i40e_pf *pf = vsi->back;
198 struct udphdr *udp;
199 struct iphdr *ip;
200 bool err = false;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000201 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000202 int ret;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000203 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
204 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
205 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
206
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000207 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
208 if (!raw_packet)
209 return -ENOMEM;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000210 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
211
212 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
213 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
214 + sizeof(struct iphdr));
215
216 ip->daddr = fd_data->dst_ip[0];
217 udp->dest = fd_data->dst_port;
218 ip->saddr = fd_data->src_ip[0];
219 udp->source = fd_data->src_port;
220
Kevin Scottb2d36c02014-04-09 05:58:59 +0000221 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
222 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
223 if (ret) {
224 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000225 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
226 fd_data->pctype, fd_data->fd_id, ret);
Kevin Scottb2d36c02014-04-09 05:58:59 +0000227 err = true;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000228 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000229 if (add)
230 dev_info(&pf->pdev->dev,
231 "Filter OK for PCTYPE %d loc = %d\n",
232 fd_data->pctype, fd_data->fd_id);
233 else
234 dev_info(&pf->pdev->dev,
235 "Filter deleted for PCTYPE %d loc = %d\n",
236 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000237 }
Kiran Patila42e7a32015-11-06 15:26:03 -0800238 if (err)
239 kfree(raw_packet);
240
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000241 return err ? -EOPNOTSUPP : 0;
242}
243
244#define I40E_TCPIP_DUMMY_PACKET_LEN 54
245/**
246 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
247 * @vsi: pointer to the targeted VSI
248 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000249 * @add: true adds a filter, false removes it
250 *
251 * Returns 0 if the filters were successfully added or removed
252 **/
253static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
254 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000255 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000256{
257 struct i40e_pf *pf = vsi->back;
258 struct tcphdr *tcp;
259 struct iphdr *ip;
260 bool err = false;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000261 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000262 int ret;
263 /* Dummy packet */
264 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
265 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
266 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
267 0x0, 0x72, 0, 0, 0, 0};
268
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000269 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
270 if (!raw_packet)
271 return -ENOMEM;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000272 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
273
274 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
275 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
276 + sizeof(struct iphdr));
277
278 ip->daddr = fd_data->dst_ip[0];
279 tcp->dest = fd_data->dst_port;
280 ip->saddr = fd_data->src_ip[0];
281 tcp->source = fd_data->src_port;
282
283 if (add) {
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000284 pf->fd_tcp_rule++;
Jacob Keller234dc4e2016-09-06 18:05:09 -0700285 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
286 I40E_DEBUG_FD & pf->hw.debug_mask)
287 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
288 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000289 } else {
290 pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
291 (pf->fd_tcp_rule - 1) : 0;
292 if (pf->fd_tcp_rule == 0) {
Jacob Keller234dc4e2016-09-06 18:05:09 -0700293 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
294 I40E_DEBUG_FD & pf->hw.debug_mask)
Anjali Singhai Jain2e4875e2015-04-16 20:06:06 -0400295 dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
Jacob Keller234dc4e2016-09-06 18:05:09 -0700296 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000297 }
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000298 }
299
Kevin Scottb2d36c02014-04-09 05:58:59 +0000300 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000301 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
302
303 if (ret) {
304 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000305 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
306 fd_data->pctype, fd_data->fd_id, ret);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000307 err = true;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000308 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000309 if (add)
310 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
311 fd_data->pctype, fd_data->fd_id);
312 else
313 dev_info(&pf->pdev->dev,
314 "Filter deleted for PCTYPE %d loc = %d\n",
315 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000316 }
317
Kiran Patila42e7a32015-11-06 15:26:03 -0800318 if (err)
319 kfree(raw_packet);
320
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000321 return err ? -EOPNOTSUPP : 0;
322}
323
324/**
325 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
326 * a specific flow spec
327 * @vsi: pointer to the targeted VSI
328 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000329 * @add: true adds a filter, false removes it
330 *
Jesse Brandeburg4eeb1ff2015-11-18 17:35:42 -0800331 * Returns 0 if the filters were successfully added or removed
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000332 **/
333static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
334 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000335 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000336{
337 return -EOPNOTSUPP;
338}
339
340#define I40E_IP_DUMMY_PACKET_LEN 34
341/**
342 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
343 * a specific flow spec
344 * @vsi: pointer to the targeted VSI
345 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000346 * @add: true adds a filter, false removes it
347 *
348 * Returns 0 if the filters were successfully added or removed
349 **/
350static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
351 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000352 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000353{
354 struct i40e_pf *pf = vsi->back;
355 struct iphdr *ip;
356 bool err = false;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000357 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000358 int ret;
359 int i;
360 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
361 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
362 0, 0, 0, 0};
363
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000364 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
365 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000366 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
367 if (!raw_packet)
368 return -ENOMEM;
369 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
370 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
371
372 ip->saddr = fd_data->src_ip[0];
373 ip->daddr = fd_data->dst_ip[0];
374 ip->protocol = 0;
375
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000376 fd_data->pctype = i;
377 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
378
379 if (ret) {
380 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000381 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
382 fd_data->pctype, fd_data->fd_id, ret);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000383 err = true;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000384 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000385 if (add)
386 dev_info(&pf->pdev->dev,
387 "Filter OK for PCTYPE %d loc = %d\n",
388 fd_data->pctype, fd_data->fd_id);
389 else
390 dev_info(&pf->pdev->dev,
391 "Filter deleted for PCTYPE %d loc = %d\n",
392 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000393 }
394 }
395
Kiran Patila42e7a32015-11-06 15:26:03 -0800396 if (err)
397 kfree(raw_packet);
398
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000399 return err ? -EOPNOTSUPP : 0;
400}
401
402/**
403 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
404 * @vsi: pointer to the targeted VSI
405 * @cmd: command to get or set RX flow classification rules
406 * @add: true adds a filter, false removes it
407 *
408 **/
409int i40e_add_del_fdir(struct i40e_vsi *vsi,
410 struct i40e_fdir_filter *input, bool add)
411{
412 struct i40e_pf *pf = vsi->back;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000413 int ret;
414
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000415 switch (input->flow_type & ~FLOW_EXT) {
416 case TCP_V4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000417 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000418 break;
419 case UDP_V4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000420 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000421 break;
422 case SCTP_V4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000423 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000424 break;
425 case IPV4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000426 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000427 break;
428 case IP_USER_FLOW:
429 switch (input->ip4_proto) {
430 case IPPROTO_TCP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000431 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000432 break;
433 case IPPROTO_UDP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000434 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000435 break;
436 case IPPROTO_SCTP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000437 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000438 break;
439 default:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000440 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000441 break;
442 }
443 break;
444 default:
Jakub Kicinskic5ffe7e2014-04-02 10:33:22 +0000445 dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000446 input->flow_type);
447 ret = -EINVAL;
448 }
449
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000450 /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000451 return ret;
452}
453
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000454/**
455 * i40e_fd_handle_status - check the Programming Status for FD
456 * @rx_ring: the Rx ring for this descriptor
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000457 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000458 * @prog_id: the id originally used for programming
459 *
460 * This is used to verify if the FD programming or invalidation
461 * requested by SW to the HW is successful or not and take actions accordingly.
462 **/
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000463static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
464 union i40e_rx_desc *rx_desc, u8 prog_id)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000465{
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000466 struct i40e_pf *pf = rx_ring->vsi->back;
467 struct pci_dev *pdev = pf->pdev;
468 u32 fcnt_prog, fcnt_avail;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000469 u32 error;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000470 u64 qw;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000471
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000472 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000473 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
474 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
475
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400476 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
Carolyn Wyborny3487b6c2015-08-27 11:42:38 -0400477 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000478 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
479 (I40E_DEBUG_FD & pf->hw.debug_mask))
480 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
Carolyn Wyborny3487b6c2015-08-27 11:42:38 -0400481 pf->fd_inv);
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000482
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000483 /* Check if the programming error is for ATR.
484 * If so, auto disable ATR and set a state for
485 * flush in progress. Next time we come here if flush is in
486 * progress do nothing, once flush is complete the state will
487 * be cleared.
488 */
489 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
490 return;
491
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000492 pf->fd_add_err++;
493 /* store the current atr filter count */
494 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
495
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000496 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
497 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
498 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
499 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
500 }
501
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000502 /* filter programming failed most likely due to table full */
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000503 fcnt_prog = i40e_get_global_fd_count(pf);
Anjali Singhai Jain12957382014-06-04 04:22:47 +0000504 fcnt_avail = pf->fdir_pf_filter_count;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000505 /* If ATR is running fcnt_prog can quickly change,
506 * if we are very close to full, it makes sense to disable
507 * FD ATR/SB and then re-enable it when there is room.
508 */
509 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000510 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
Anjali Singhai Jainb814ba62014-06-04 20:41:48 +0000511 !(pf->auto_disable_flags &
Anjali Singhai Jainb814ba62014-06-04 20:41:48 +0000512 I40E_FLAG_FD_SB_ENABLED)) {
Anjali Singhai Jain2e4875e2015-04-16 20:06:06 -0400513 if (I40E_DEBUG_FD & pf->hw.debug_mask)
514 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000515 pf->auto_disable_flags |=
516 I40E_FLAG_FD_SB_ENABLED;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000517 }
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000518 }
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400519 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
Anjali Singhai Jain13c28842014-03-06 09:00:04 +0000520 if (I40E_DEBUG_FD & pf->hw.debug_mask)
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000521 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
Anjali Singhai Jain13c28842014-03-06 09:00:04 +0000522 rx_desc->wb.qword0.hi_dword.fd_id);
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000523 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000524}
525
526/**
Alexander Duycka5e9c572013-09-28 06:00:27 +0000527 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000528 * @ring: the ring that owns the buffer
529 * @tx_buffer: the buffer to free
530 **/
Alexander Duycka5e9c572013-09-28 06:00:27 +0000531static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
532 struct i40e_tx_buffer *tx_buffer)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000533{
Alexander Duycka5e9c572013-09-28 06:00:27 +0000534 if (tx_buffer->skb) {
Kiran Patila42e7a32015-11-06 15:26:03 -0800535 dev_kfree_skb_any(tx_buffer->skb);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000536 if (dma_unmap_len(tx_buffer, len))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000537 dma_unmap_single(ring->dev,
Alexander Duyck35a1e2a2013-09-28 06:00:17 +0000538 dma_unmap_addr(tx_buffer, dma),
539 dma_unmap_len(tx_buffer, len),
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000540 DMA_TO_DEVICE);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000541 } else if (dma_unmap_len(tx_buffer, len)) {
542 dma_unmap_page(ring->dev,
543 dma_unmap_addr(tx_buffer, dma),
544 dma_unmap_len(tx_buffer, len),
545 DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000546 }
Kiran Patila42e7a32015-11-06 15:26:03 -0800547
548 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
549 kfree(tx_buffer->raw_buf);
550
Alexander Duycka5e9c572013-09-28 06:00:27 +0000551 tx_buffer->next_to_watch = NULL;
552 tx_buffer->skb = NULL;
Alexander Duyck35a1e2a2013-09-28 06:00:17 +0000553 dma_unmap_len_set(tx_buffer, len, 0);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000554 /* tx_buffer must be completely set up in the transmit path */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000555}
556
557/**
558 * i40e_clean_tx_ring - Free any empty Tx buffers
559 * @tx_ring: ring to be cleaned
560 **/
561void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
562{
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000563 unsigned long bi_size;
564 u16 i;
565
566 /* ring already cleared, nothing to do */
567 if (!tx_ring->tx_bi)
568 return;
569
570 /* Free all the Tx ring sk_buffs */
Alexander Duycka5e9c572013-09-28 06:00:27 +0000571 for (i = 0; i < tx_ring->count; i++)
572 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000573
574 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
575 memset(tx_ring->tx_bi, 0, bi_size);
576
577 /* Zero out the descriptor ring */
578 memset(tx_ring->desc, 0, tx_ring->size);
579
580 tx_ring->next_to_use = 0;
581 tx_ring->next_to_clean = 0;
Alexander Duyck7070ce02013-09-28 06:00:37 +0000582
583 if (!tx_ring->netdev)
584 return;
585
586 /* cleanup Tx queue statistics */
587 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
588 tx_ring->queue_index));
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000589}
590
591/**
592 * i40e_free_tx_resources - Free Tx resources per queue
593 * @tx_ring: Tx descriptor ring for a specific queue
594 *
595 * Free all transmit software resources
596 **/
597void i40e_free_tx_resources(struct i40e_ring *tx_ring)
598{
599 i40e_clean_tx_ring(tx_ring);
600 kfree(tx_ring->tx_bi);
601 tx_ring->tx_bi = NULL;
602
603 if (tx_ring->desc) {
604 dma_free_coherent(tx_ring->dev, tx_ring->size,
605 tx_ring->desc, tx_ring->dma);
606 tx_ring->desc = NULL;
607 }
608}
609
Jesse Brandeburga68de582015-02-24 05:26:03 +0000610/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000611 * i40e_get_tx_pending - how many tx descriptors not processed
612 * @tx_ring: the ring of descriptors
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800613 * @in_sw: is tx_pending being checked in SW or HW
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000614 *
615 * Since there is no access to the ring head register
616 * in XL710, we need to use our local copies
617 **/
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800618u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000619{
Jesse Brandeburga68de582015-02-24 05:26:03 +0000620 u32 head, tail;
621
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800622 if (!in_sw)
623 head = i40e_get_head(ring);
624 else
625 head = ring->next_to_clean;
Jesse Brandeburga68de582015-02-24 05:26:03 +0000626 tail = readl(ring->tail);
627
628 if (head != tail)
629 return (head < tail) ?
630 tail - head : (tail + ring->count - head);
631
632 return 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000633}
634
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000635#define WB_STRIDE 0x3
636
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000637/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000638 * i40e_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duycka619afe2016-03-07 09:30:03 -0800639 * @vsi: the VSI we care about
640 * @tx_ring: Tx ring to clean
641 * @napi_budget: Used to determine if we are in netpoll
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000642 *
643 * Returns true if there's any budget left (e.g. the clean is finished)
644 **/
Alexander Duycka619afe2016-03-07 09:30:03 -0800645static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
646 struct i40e_ring *tx_ring, int napi_budget)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000647{
648 u16 i = tx_ring->next_to_clean;
649 struct i40e_tx_buffer *tx_buf;
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000650 struct i40e_tx_desc *tx_head;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000651 struct i40e_tx_desc *tx_desc;
Alexander Duycka619afe2016-03-07 09:30:03 -0800652 unsigned int total_bytes = 0, total_packets = 0;
653 unsigned int budget = vsi->work_limit;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000654
655 tx_buf = &tx_ring->tx_bi[i];
656 tx_desc = I40E_TX_DESC(tx_ring, i);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000657 i -= tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000658
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000659 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
660
Alexander Duycka5e9c572013-09-28 06:00:27 +0000661 do {
662 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000663
664 /* if next_to_watch is not set then there is no work pending */
665 if (!eop_desc)
666 break;
667
Alexander Duycka5e9c572013-09-28 06:00:27 +0000668 /* prevent any other reads prior to eop_desc */
669 read_barrier_depends();
670
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000671 /* we have caught up to head, no work left to do */
672 if (tx_head == tx_desc)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000673 break;
674
Alexander Duyckc304fda2013-09-28 06:00:12 +0000675 /* clear next_to_watch to prevent false hangs */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000676 tx_buf->next_to_watch = NULL;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000677
Alexander Duycka5e9c572013-09-28 06:00:27 +0000678 /* update the statistics for this packet */
679 total_bytes += tx_buf->bytecount;
680 total_packets += tx_buf->gso_segs;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000681
Alexander Duycka5e9c572013-09-28 06:00:27 +0000682 /* free the skb */
Alexander Duycka619afe2016-03-07 09:30:03 -0800683 napi_consume_skb(tx_buf->skb, napi_budget);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000684
Alexander Duycka5e9c572013-09-28 06:00:27 +0000685 /* unmap skb header data */
686 dma_unmap_single(tx_ring->dev,
687 dma_unmap_addr(tx_buf, dma),
688 dma_unmap_len(tx_buf, len),
689 DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000690
Alexander Duycka5e9c572013-09-28 06:00:27 +0000691 /* clear tx_buffer data */
692 tx_buf->skb = NULL;
693 dma_unmap_len_set(tx_buf, len, 0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000694
Alexander Duycka5e9c572013-09-28 06:00:27 +0000695 /* unmap remaining buffers */
696 while (tx_desc != eop_desc) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000697
698 tx_buf++;
699 tx_desc++;
700 i++;
Alexander Duycka5e9c572013-09-28 06:00:27 +0000701 if (unlikely(!i)) {
702 i -= tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000703 tx_buf = tx_ring->tx_bi;
704 tx_desc = I40E_TX_DESC(tx_ring, 0);
705 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000706
Alexander Duycka5e9c572013-09-28 06:00:27 +0000707 /* unmap any remaining paged data */
708 if (dma_unmap_len(tx_buf, len)) {
709 dma_unmap_page(tx_ring->dev,
710 dma_unmap_addr(tx_buf, dma),
711 dma_unmap_len(tx_buf, len),
712 DMA_TO_DEVICE);
713 dma_unmap_len_set(tx_buf, len, 0);
714 }
715 }
716
717 /* move us one more past the eop_desc for start of next pkt */
718 tx_buf++;
719 tx_desc++;
720 i++;
721 if (unlikely(!i)) {
722 i -= tx_ring->count;
723 tx_buf = tx_ring->tx_bi;
724 tx_desc = I40E_TX_DESC(tx_ring, 0);
725 }
726
Jesse Brandeburg016890b2015-02-27 09:15:31 +0000727 prefetch(tx_desc);
728
Alexander Duycka5e9c572013-09-28 06:00:27 +0000729 /* update budget accounting */
730 budget--;
731 } while (likely(budget));
732
733 i += tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000734 tx_ring->next_to_clean = i;
Alexander Duyck980e9b12013-09-28 06:01:03 +0000735 u64_stats_update_begin(&tx_ring->syncp);
Alexander Duycka114d0a2013-09-28 06:00:43 +0000736 tx_ring->stats.bytes += total_bytes;
737 tx_ring->stats.packets += total_packets;
Alexander Duyck980e9b12013-09-28 06:01:03 +0000738 u64_stats_update_end(&tx_ring->syncp);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000739 tx_ring->q_vector->tx.total_bytes += total_bytes;
740 tx_ring->q_vector->tx.total_packets += total_packets;
Alexander Duycka5e9c572013-09-28 06:00:27 +0000741
Anjali Singhai58044742015-09-25 18:26:13 -0700742 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
Anjali Singhai58044742015-09-25 18:26:13 -0700743 /* check to see if there are < 4 descriptors
744 * waiting to be written back, then kick the hardware to force
745 * them to be written back in case we stay in NAPI.
746 * In this mode on X722 we do not enable Interrupt.
747 */
Mitch Williams88dc9e62016-06-20 09:10:35 -0700748 unsigned int j = i40e_get_tx_pending(tx_ring, false);
Anjali Singhai58044742015-09-25 18:26:13 -0700749
750 if (budget &&
751 ((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
Alexander Duycka619afe2016-03-07 09:30:03 -0800752 !test_bit(__I40E_DOWN, &vsi->state) &&
Anjali Singhai58044742015-09-25 18:26:13 -0700753 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
754 tx_ring->arm_wb = true;
755 }
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000756
Alexander Duyck7070ce02013-09-28 06:00:37 +0000757 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
758 tx_ring->queue_index),
759 total_packets, total_bytes);
760
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000761#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
762 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
763 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
764 /* Make sure that anybody stopping the queue after this
765 * sees the new next_to_clean.
766 */
767 smp_mb();
768 if (__netif_subqueue_stopped(tx_ring->netdev,
769 tx_ring->queue_index) &&
Alexander Duycka619afe2016-03-07 09:30:03 -0800770 !test_bit(__I40E_DOWN, &vsi->state)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000771 netif_wake_subqueue(tx_ring->netdev,
772 tx_ring->queue_index);
773 ++tx_ring->tx_stats.restart_queue;
774 }
775 }
776
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000777 return !!budget;
778}
779
780/**
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -0800781 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
782 * @vsi: the VSI we care about
783 * @q_vector: the vector on which to enable writeback
784 *
785 **/
786static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
787 struct i40e_q_vector *q_vector)
788{
789 u16 flags = q_vector->tx.ring[0].flags;
790 u32 val;
791
792 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
793 return;
794
795 if (q_vector->arm_wb_state)
796 return;
797
798 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
799 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
800 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
801
802 wr32(&vsi->back->hw,
803 I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
804 val);
805 } else {
806 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
807 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
808
809 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
810 }
811 q_vector->arm_wb_state = true;
812}
813
814/**
815 * i40e_force_wb - Issue SW Interrupt so HW does a wb
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000816 * @vsi: the VSI we care about
817 * @q_vector: the vector on which to force writeback
818 *
819 **/
Kiran Patilb03a8c12015-09-24 18:13:15 -0400820void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000821{
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -0800822 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
Anjali Singhai Jain8e0764b2015-06-05 12:20:30 -0400823 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
824 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
825 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
826 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
827 /* allow 00 to be written to the index */
828
829 wr32(&vsi->back->hw,
830 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
831 vsi->base_vector - 1), val);
832 } else {
833 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
834 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
835 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
836 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
837 /* allow 00 to be written to the index */
838
839 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
840 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000841}
842
843/**
844 * i40e_set_new_dynamic_itr - Find new ITR level
845 * @rc: structure containing ring performance data
846 *
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400847 * Returns true if ITR changed, false if not
848 *
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000849 * Stores a new ITR value based on packets and byte counts during
850 * the last interrupt. The advantage of per interrupt computation
851 * is faster updates and more accurate ITR for the current traffic
852 * pattern. Constants in this function were computed based on
853 * theoretical maximum wire speed and thresholds were set based on
854 * testing data as well as attempting to minimize response time
855 * while increasing bulk throughput.
856 **/
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400857static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000858{
859 enum i40e_latency_range new_latency_range = rc->latency_range;
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400860 struct i40e_q_vector *qv = rc->ring->q_vector;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000861 u32 new_itr = rc->itr;
862 int bytes_per_int;
Jesse Brandeburg51cc6d92015-09-28 14:16:52 -0400863 int usecs;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000864
865 if (rc->total_packets == 0 || !rc->itr)
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400866 return false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000867
868 /* simple throttlerate management
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400869 * 0-10MB/s lowest (50000 ints/s)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000870 * 10-20MB/s low (20000 ints/s)
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400871 * 20-1249MB/s bulk (18000 ints/s)
872 * > 40000 Rx packets per second (8000 ints/s)
Jesse Brandeburg51cc6d92015-09-28 14:16:52 -0400873 *
874 * The math works out because the divisor is in 10^(-6) which
875 * turns the bytes/us input value into MB/s values, but
876 * make sure to use usecs, as the register values written
Jesse Brandeburgee2319c2015-09-28 14:16:54 -0400877 * are in 2 usec increments in the ITR registers, and make sure
878 * to use the smoothed values that the countdown timer gives us.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000879 */
Jesse Brandeburgee2319c2015-09-28 14:16:54 -0400880 usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
Jesse Brandeburg51cc6d92015-09-28 14:16:52 -0400881 bytes_per_int = rc->total_bytes / usecs;
Jesse Brandeburgee2319c2015-09-28 14:16:54 -0400882
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400883 switch (new_latency_range) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000884 case I40E_LOWEST_LATENCY:
885 if (bytes_per_int > 10)
886 new_latency_range = I40E_LOW_LATENCY;
887 break;
888 case I40E_LOW_LATENCY:
889 if (bytes_per_int > 20)
890 new_latency_range = I40E_BULK_LATENCY;
891 else if (bytes_per_int <= 10)
892 new_latency_range = I40E_LOWEST_LATENCY;
893 break;
894 case I40E_BULK_LATENCY:
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400895 case I40E_ULTRA_LATENCY:
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400896 default:
897 if (bytes_per_int <= 20)
898 new_latency_range = I40E_LOW_LATENCY;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000899 break;
900 }
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400901
902 /* this is to adjust RX more aggressively when streaming small
903 * packets. The value of 40000 was picked as it is just beyond
904 * what the hardware can receive per second if in low latency
905 * mode.
906 */
907#define RX_ULTRA_PACKET_RATE 40000
908
909 if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
910 (&qv->rx == rc))
911 new_latency_range = I40E_ULTRA_LATENCY;
912
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400913 rc->latency_range = new_latency_range;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000914
915 switch (new_latency_range) {
916 case I40E_LOWEST_LATENCY:
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400917 new_itr = I40E_ITR_50K;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000918 break;
919 case I40E_LOW_LATENCY:
920 new_itr = I40E_ITR_20K;
921 break;
922 case I40E_BULK_LATENCY:
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400923 new_itr = I40E_ITR_18K;
924 break;
925 case I40E_ULTRA_LATENCY:
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000926 new_itr = I40E_ITR_8K;
927 break;
928 default:
929 break;
930 }
931
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000932 rc->total_bytes = 0;
933 rc->total_packets = 0;
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400934
935 if (new_itr != rc->itr) {
936 rc->itr = new_itr;
937 return true;
938 }
939
940 return false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000941}
942
943/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000944 * i40e_clean_programming_status - clean the programming status descriptor
945 * @rx_ring: the rx ring that has this descriptor
946 * @rx_desc: the rx descriptor written back by HW
947 *
948 * Flow director should handle FD_FILTER_STATUS to check its filter programming
949 * status being successful or not and take actions accordingly. FCoE should
950 * handle its context/filter programming/invalidation status and take actions.
951 *
952 **/
953static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
954 union i40e_rx_desc *rx_desc)
955{
956 u64 qw;
957 u8 id;
958
959 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
960 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
961 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
962
963 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000964 i40e_fd_handle_status(rx_ring, rx_desc, id);
Vasu Dev38e00432014-08-01 13:27:03 -0700965#ifdef I40E_FCOE
966 else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
967 (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
968 i40e_fcoe_handle_status(rx_ring, rx_desc, id);
969#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000970}
971
972/**
973 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
974 * @tx_ring: the tx ring to set up
975 *
976 * Return 0 on success, negative on error
977 **/
978int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
979{
980 struct device *dev = tx_ring->dev;
981 int bi_size;
982
983 if (!dev)
984 return -ENOMEM;
985
Jesse Brandeburge908f812015-07-23 16:54:42 -0400986 /* warn if we are about to overwrite the pointer */
987 WARN_ON(tx_ring->tx_bi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000988 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
989 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
990 if (!tx_ring->tx_bi)
991 goto err;
992
993 /* round up to nearest 4K */
994 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000995 /* add u32 for head writeback, align after this takes care of
996 * guaranteeing this is at least one cache line in size
997 */
998 tx_ring->size += sizeof(u32);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000999 tx_ring->size = ALIGN(tx_ring->size, 4096);
1000 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1001 &tx_ring->dma, GFP_KERNEL);
1002 if (!tx_ring->desc) {
1003 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1004 tx_ring->size);
1005 goto err;
1006 }
1007
1008 tx_ring->next_to_use = 0;
1009 tx_ring->next_to_clean = 0;
1010 return 0;
1011
1012err:
1013 kfree(tx_ring->tx_bi);
1014 tx_ring->tx_bi = NULL;
1015 return -ENOMEM;
1016}
1017
1018/**
1019 * i40e_clean_rx_ring - Free Rx buffers
1020 * @rx_ring: ring to be cleaned
1021 **/
1022void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1023{
1024 struct device *dev = rx_ring->dev;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001025 unsigned long bi_size;
1026 u16 i;
1027
1028 /* ring already cleared, nothing to do */
1029 if (!rx_ring->rx_bi)
1030 return;
1031
1032 /* Free all the Rx ring sk_buffs */
1033 for (i = 0; i < rx_ring->count; i++) {
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001034 struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
1035
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001036 if (rx_bi->skb) {
1037 dev_kfree_skb(rx_bi->skb);
1038 rx_bi->skb = NULL;
1039 }
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001040 if (!rx_bi->page)
1041 continue;
1042
1043 dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE);
1044 __free_pages(rx_bi->page, 0);
1045
1046 rx_bi->page = NULL;
1047 rx_bi->page_offset = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001048 }
1049
1050 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1051 memset(rx_ring->rx_bi, 0, bi_size);
1052
1053 /* Zero out the descriptor ring */
1054 memset(rx_ring->desc, 0, rx_ring->size);
1055
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001056 rx_ring->next_to_alloc = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001057 rx_ring->next_to_clean = 0;
1058 rx_ring->next_to_use = 0;
1059}
1060
1061/**
1062 * i40e_free_rx_resources - Free Rx resources
1063 * @rx_ring: ring to clean the resources from
1064 *
1065 * Free all receive software resources
1066 **/
1067void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1068{
1069 i40e_clean_rx_ring(rx_ring);
1070 kfree(rx_ring->rx_bi);
1071 rx_ring->rx_bi = NULL;
1072
1073 if (rx_ring->desc) {
1074 dma_free_coherent(rx_ring->dev, rx_ring->size,
1075 rx_ring->desc, rx_ring->dma);
1076 rx_ring->desc = NULL;
1077 }
1078}
1079
1080/**
1081 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1082 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1083 *
1084 * Returns 0 on success, negative on failure
1085 **/
1086int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1087{
1088 struct device *dev = rx_ring->dev;
1089 int bi_size;
1090
Jesse Brandeburge908f812015-07-23 16:54:42 -04001091 /* warn if we are about to overwrite the pointer */
1092 WARN_ON(rx_ring->rx_bi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001093 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1094 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1095 if (!rx_ring->rx_bi)
1096 goto err;
1097
Carolyn Wybornyf217d6c2015-02-09 17:42:31 -08001098 u64_stats_init(&rx_ring->syncp);
Carolyn Wyborny638702b2015-01-24 09:58:32 +00001099
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001100 /* Round up to nearest 4K */
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001101 rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001102 rx_ring->size = ALIGN(rx_ring->size, 4096);
1103 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1104 &rx_ring->dma, GFP_KERNEL);
1105
1106 if (!rx_ring->desc) {
1107 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1108 rx_ring->size);
1109 goto err;
1110 }
1111
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001112 rx_ring->next_to_alloc = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001113 rx_ring->next_to_clean = 0;
1114 rx_ring->next_to_use = 0;
1115
1116 return 0;
1117err:
1118 kfree(rx_ring->rx_bi);
1119 rx_ring->rx_bi = NULL;
1120 return -ENOMEM;
1121}
1122
1123/**
1124 * i40e_release_rx_desc - Store the new tail and head values
1125 * @rx_ring: ring to bump
1126 * @val: new head index
1127 **/
1128static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1129{
1130 rx_ring->next_to_use = val;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001131
1132 /* update next to alloc since we have filled the ring */
1133 rx_ring->next_to_alloc = val;
1134
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001135 /* Force memory writes to complete before letting h/w
1136 * know there are new descriptors to fetch. (Only
1137 * applicable for weak-ordered memory model archs,
1138 * such as IA-64).
1139 */
1140 wmb();
1141 writel(val, rx_ring->tail);
1142}
1143
1144/**
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001145 * i40e_alloc_mapped_page - recycle or make a new page
1146 * @rx_ring: ring to use
1147 * @bi: rx_buffer struct to modify
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001148 *
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001149 * Returns true if the page was successfully allocated or
1150 * reused.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001151 **/
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001152static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1153 struct i40e_rx_buffer *bi)
Mitch Williamsa132af22015-01-24 09:58:35 +00001154{
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001155 struct page *page = bi->page;
1156 dma_addr_t dma;
Mitch Williamsa132af22015-01-24 09:58:35 +00001157
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001158 /* since we are recycling buffers we should seldom need to alloc */
1159 if (likely(page)) {
1160 rx_ring->rx_stats.page_reuse_count++;
1161 return true;
Mitch Williamsa132af22015-01-24 09:58:35 +00001162 }
1163
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001164 /* alloc new page for storage */
1165 page = dev_alloc_page();
1166 if (unlikely(!page)) {
1167 rx_ring->rx_stats.alloc_page_failed++;
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001168 return false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001169 }
1170
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001171 /* map page for use */
1172 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001173
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001174 /* if mapping failed free memory back to system since
1175 * there isn't much point in holding memory we can't use
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001176 */
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001177 if (dma_mapping_error(rx_ring->dev, dma)) {
1178 __free_pages(page, 0);
1179 rx_ring->rx_stats.alloc_page_failed++;
1180 return false;
1181 }
1182
1183 bi->dma = dma;
1184 bi->page = page;
1185 bi->page_offset = 0;
1186
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001187 return true;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001188}
1189
1190/**
1191 * i40e_receive_skb - Send a completed packet up the stack
1192 * @rx_ring: rx ring in play
1193 * @skb: packet to send up
1194 * @vlan_tag: vlan tag for packet
1195 **/
1196static void i40e_receive_skb(struct i40e_ring *rx_ring,
1197 struct sk_buff *skb, u16 vlan_tag)
1198{
1199 struct i40e_q_vector *q_vector = rx_ring->q_vector;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001200
Jesse Brandeburga149f2c2016-04-12 08:30:49 -07001201 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1202 (vlan_tag & VLAN_VID_MASK))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001203 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1204
Alexander Duyck8b650352015-09-24 09:04:32 -07001205 napi_gro_receive(&q_vector->napi, skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001206}
1207
1208/**
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001209 * i40e_alloc_rx_buffers - Replace used receive buffers
1210 * @rx_ring: ring to place buffers on
1211 * @cleaned_count: number of buffers to replace
1212 *
1213 * Returns false if all allocations were successful, true if any fail
1214 **/
1215bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1216{
1217 u16 ntu = rx_ring->next_to_use;
1218 union i40e_rx_desc *rx_desc;
1219 struct i40e_rx_buffer *bi;
1220
1221 /* do nothing if no valid netdev defined */
1222 if (!rx_ring->netdev || !cleaned_count)
1223 return false;
1224
1225 rx_desc = I40E_RX_DESC(rx_ring, ntu);
1226 bi = &rx_ring->rx_bi[ntu];
1227
1228 do {
1229 if (!i40e_alloc_mapped_page(rx_ring, bi))
1230 goto no_buffers;
1231
1232 /* Refresh the desc even if buffer_addrs didn't change
1233 * because each write-back erases this info.
1234 */
1235 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1236 rx_desc->read.hdr_addr = 0;
1237
1238 rx_desc++;
1239 bi++;
1240 ntu++;
1241 if (unlikely(ntu == rx_ring->count)) {
1242 rx_desc = I40E_RX_DESC(rx_ring, 0);
1243 bi = rx_ring->rx_bi;
1244 ntu = 0;
1245 }
1246
1247 /* clear the status bits for the next_to_use descriptor */
1248 rx_desc->wb.qword1.status_error_len = 0;
1249
1250 cleaned_count--;
1251 } while (cleaned_count);
1252
1253 if (rx_ring->next_to_use != ntu)
1254 i40e_release_rx_desc(rx_ring, ntu);
1255
1256 return false;
1257
1258no_buffers:
1259 if (rx_ring->next_to_use != ntu)
1260 i40e_release_rx_desc(rx_ring, ntu);
1261
1262 /* make sure to come back via polling to try again after
1263 * allocation failure
1264 */
1265 return true;
1266}
1267
1268/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001269 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1270 * @vsi: the VSI we care about
1271 * @skb: skb currently being received and modified
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001272 * @rx_desc: the receive descriptor
1273 *
1274 * skb->protocol must be set before this function is called
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001275 **/
1276static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1277 struct sk_buff *skb,
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001278 union i40e_rx_desc *rx_desc)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001279{
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001280 struct i40e_rx_ptype_decoded decoded;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001281 u32 rx_error, rx_status;
Alexander Duyck858296c2016-06-14 15:45:42 -07001282 bool ipv4, ipv6;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001283 u8 ptype;
1284 u64 qword;
1285
1286 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1287 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
1288 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1289 I40E_RXD_QW1_ERROR_SHIFT;
1290 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1291 I40E_RXD_QW1_STATUS_SHIFT;
1292 decoded = decode_rx_desc_ptype(ptype);
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001293
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001294 skb->ip_summed = CHECKSUM_NONE;
1295
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001296 skb_checksum_none_assert(skb);
1297
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001298 /* Rx csum enabled and ip headers found? */
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001299 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001300 return;
1301
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001302 /* did the hardware decode the packet and checksum? */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001303 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001304 return;
1305
1306 /* both known and outer_ip must be set for the below code to work */
1307 if (!(decoded.known && decoded.outer_ip))
1308 return;
1309
Alexander Duyckfad57332016-01-24 21:17:22 -08001310 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1311 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1312 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1313 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001314
1315 if (ipv4 &&
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001316 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1317 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001318 goto checksum_fail;
1319
Jesse Brandeburgddf1d0d2014-02-13 03:48:39 -08001320 /* likely incorrect csum if alternate IP extension headers found */
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001321 if (ipv6 &&
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001322 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001323 /* don't increment checksum err here, non-fatal err */
Shannon Nelson8ee75a82013-12-21 05:44:46 +00001324 return;
1325
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001326 /* there was some L4 error, count error and punt packet to the stack */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001327 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001328 goto checksum_fail;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001329
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001330 /* handle packets that were not able to be checksummed due
1331 * to arrival speed, in this case the stack can compute
1332 * the csum.
1333 */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001334 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001335 return;
1336
Alexander Duyck858296c2016-06-14 15:45:42 -07001337 /* If there is an outer header present that might contain a checksum
1338 * we need to bump the checksum level by 1 to reflect the fact that
1339 * we are indicating we validated the inner checksum.
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001340 */
Alexander Duyck858296c2016-06-14 15:45:42 -07001341 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
1342 skb->csum_level = 1;
Alexander Duyckfad57332016-01-24 21:17:22 -08001343
Alexander Duyck858296c2016-06-14 15:45:42 -07001344 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
1345 switch (decoded.inner_prot) {
1346 case I40E_RX_PTYPE_INNER_PROT_TCP:
1347 case I40E_RX_PTYPE_INNER_PROT_UDP:
1348 case I40E_RX_PTYPE_INNER_PROT_SCTP:
1349 skb->ip_summed = CHECKSUM_UNNECESSARY;
1350 /* fall though */
1351 default:
1352 break;
1353 }
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001354
1355 return;
1356
1357checksum_fail:
1358 vsi->back->hw_csum_rx_error++;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001359}
1360
1361/**
Anjali Singhai Jain857942f2015-12-09 15:50:21 -08001362 * i40e_ptype_to_htype - get a hash type
Jesse Brandeburg206812b2014-02-12 01:45:33 +00001363 * @ptype: the ptype value from the descriptor
1364 *
1365 * Returns a hash type to be used by skb_set_hash
1366 **/
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001367static inline int i40e_ptype_to_htype(u8 ptype)
Jesse Brandeburg206812b2014-02-12 01:45:33 +00001368{
1369 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1370
1371 if (!decoded.known)
1372 return PKT_HASH_TYPE_NONE;
1373
1374 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1375 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1376 return PKT_HASH_TYPE_L4;
1377 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1378 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1379 return PKT_HASH_TYPE_L3;
1380 else
1381 return PKT_HASH_TYPE_L2;
1382}
1383
1384/**
Anjali Singhai Jain857942f2015-12-09 15:50:21 -08001385 * i40e_rx_hash - set the hash value in the skb
1386 * @ring: descriptor ring
1387 * @rx_desc: specific descriptor
1388 **/
1389static inline void i40e_rx_hash(struct i40e_ring *ring,
1390 union i40e_rx_desc *rx_desc,
1391 struct sk_buff *skb,
1392 u8 rx_ptype)
1393{
1394 u32 hash;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001395 const __le64 rss_mask =
Anjali Singhai Jain857942f2015-12-09 15:50:21 -08001396 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1397 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1398
Mitch Williamsa876c3b2016-05-03 15:13:18 -07001399 if (!(ring->netdev->features & NETIF_F_RXHASH))
Anjali Singhai Jain857942f2015-12-09 15:50:21 -08001400 return;
1401
1402 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1403 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1404 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1405 }
1406}
1407
1408/**
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001409 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
1410 * @rx_ring: rx descriptor ring packet is being transacted on
1411 * @rx_desc: pointer to the EOP Rx descriptor
1412 * @skb: pointer to current skb being populated
1413 * @rx_ptype: the packet type decoded by hardware
Mitch Williamsa132af22015-01-24 09:58:35 +00001414 *
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001415 * This function checks the ring, descriptor, and packet information in
1416 * order to populate the hash, checksum, VLAN, protocol, and
1417 * other fields within the skb.
Mitch Williamsa132af22015-01-24 09:58:35 +00001418 **/
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001419static inline
1420void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1421 union i40e_rx_desc *rx_desc, struct sk_buff *skb,
1422 u8 rx_ptype)
1423{
1424 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1425 u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1426 I40E_RXD_QW1_STATUS_SHIFT;
1427 u32 rsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1428 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
1429
1430 if (unlikely(rsyn)) {
1431 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, rsyn);
1432 rx_ring->last_rx_timestamp = jiffies;
1433 }
1434
1435 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1436
1437 /* modifies the skb - consumes the enet header */
1438 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1439
1440 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1441
1442 skb_record_rx_queue(skb, rx_ring->queue_index);
1443}
1444
1445/**
1446 * i40e_pull_tail - i40e specific version of skb_pull_tail
1447 * @rx_ring: rx descriptor ring packet is being transacted on
1448 * @skb: pointer to current skb being adjusted
1449 *
1450 * This function is an i40e specific version of __pskb_pull_tail. The
1451 * main difference between this version and the original function is that
1452 * this function can make several assumptions about the state of things
1453 * that allow for significant optimizations versus the standard function.
1454 * As a result we can do things like drop a frag and maintain an accurate
1455 * truesize for the skb.
1456 */
1457static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
1458{
1459 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1460 unsigned char *va;
1461 unsigned int pull_len;
1462
1463 /* it is valid to use page_address instead of kmap since we are
1464 * working with pages allocated out of the lomem pool per
1465 * alloc_page(GFP_ATOMIC)
1466 */
1467 va = skb_frag_address(frag);
1468
1469 /* we need the header to contain the greater of either ETH_HLEN or
1470 * 60 bytes if the skb->len is less than 60 for skb_pad.
1471 */
1472 pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
1473
1474 /* align pull length to size of long to optimize memcpy performance */
1475 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
1476
1477 /* update all of the pointers */
1478 skb_frag_size_sub(frag, pull_len);
1479 frag->page_offset += pull_len;
1480 skb->data_len -= pull_len;
1481 skb->tail += pull_len;
1482}
1483
1484/**
1485 * i40e_cleanup_headers - Correct empty headers
1486 * @rx_ring: rx descriptor ring packet is being transacted on
1487 * @skb: pointer to current skb being fixed
1488 *
1489 * Also address the case where we are pulling data in on pages only
1490 * and as such no data is present in the skb header.
1491 *
1492 * In addition if skb is not at least 60 bytes we need to pad it so that
1493 * it is large enough to qualify as a valid Ethernet frame.
1494 *
1495 * Returns true if an error was encountered and skb was freed.
1496 **/
1497static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
1498{
1499 /* place header in linear portion of buffer */
1500 if (skb_is_nonlinear(skb))
1501 i40e_pull_tail(rx_ring, skb);
1502
1503 /* if eth_skb_pad returns an error the skb was freed */
1504 if (eth_skb_pad(skb))
1505 return true;
1506
1507 return false;
1508}
1509
1510/**
1511 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1512 * @rx_ring: rx descriptor ring to store buffers on
1513 * @old_buff: donor buffer to have page reused
1514 *
1515 * Synchronizes page for reuse by the adapter
1516 **/
1517static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1518 struct i40e_rx_buffer *old_buff)
1519{
1520 struct i40e_rx_buffer *new_buff;
1521 u16 nta = rx_ring->next_to_alloc;
1522
1523 new_buff = &rx_ring->rx_bi[nta];
1524
1525 /* update, and store next to alloc */
1526 nta++;
1527 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1528
1529 /* transfer page from old buffer to new buffer */
1530 *new_buff = *old_buff;
1531}
1532
1533/**
1534 * i40e_page_is_reserved - check if reuse is possible
1535 * @page: page struct to check
1536 */
1537static inline bool i40e_page_is_reserved(struct page *page)
1538{
1539 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
1540}
1541
1542/**
1543 * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
1544 * @rx_ring: rx descriptor ring to transact packets on
1545 * @rx_buffer: buffer containing page to add
1546 * @rx_desc: descriptor containing length of buffer written by hardware
1547 * @skb: sk_buff to place the data into
1548 *
1549 * This function will add the data contained in rx_buffer->page to the skb.
1550 * This is done either through a direct copy if the data in the buffer is
1551 * less than the skb header size, otherwise it will just attach the page as
1552 * a frag to the skb.
1553 *
1554 * The function will then update the page offset if necessary and return
1555 * true if the buffer can be reused by the adapter.
1556 **/
1557static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
1558 struct i40e_rx_buffer *rx_buffer,
1559 union i40e_rx_desc *rx_desc,
1560 struct sk_buff *skb)
1561{
1562 struct page *page = rx_buffer->page;
1563 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1564 unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1565 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1566#if (PAGE_SIZE < 8192)
1567 unsigned int truesize = I40E_RXBUFFER_2048;
1568#else
1569 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
1570 unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
1571#endif
1572
1573 /* will the data fit in the skb we allocated? if so, just
1574 * copy it as it is pretty small anyway
1575 */
1576 if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
1577 unsigned char *va = page_address(page) + rx_buffer->page_offset;
1578
1579 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
1580
1581 /* page is not reserved, we can reuse buffer as-is */
1582 if (likely(!i40e_page_is_reserved(page)))
1583 return true;
1584
1585 /* this page cannot be reused so discard it */
1586 __free_pages(page, 0);
1587 return false;
1588 }
1589
1590 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1591 rx_buffer->page_offset, size, truesize);
1592
1593 /* avoid re-using remote pages */
1594 if (unlikely(i40e_page_is_reserved(page)))
1595 return false;
1596
1597#if (PAGE_SIZE < 8192)
1598 /* if we are only owner of page we can reuse it */
1599 if (unlikely(page_count(page) != 1))
1600 return false;
1601
1602 /* flip page offset to other buffer */
1603 rx_buffer->page_offset ^= truesize;
1604#else
1605 /* move offset up to the next cache line */
1606 rx_buffer->page_offset += truesize;
1607
1608 if (rx_buffer->page_offset > last_offset)
1609 return false;
1610#endif
1611
1612 /* Even if we own the page, we are not allowed to use atomic_set()
1613 * This would break get_page_unless_zero() users.
1614 */
1615 get_page(rx_buffer->page);
1616
1617 return true;
1618}
1619
1620/**
1621 * i40e_fetch_rx_buffer - Allocate skb and populate it
1622 * @rx_ring: rx descriptor ring to transact packets on
1623 * @rx_desc: descriptor containing info written by hardware
1624 *
1625 * This function allocates an skb on the fly, and populates it with the page
1626 * data from the current receive descriptor, taking care to set up the skb
1627 * correctly, as well as handling calling the page recycle function if
1628 * necessary.
1629 */
1630static inline
1631struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
1632 union i40e_rx_desc *rx_desc)
1633{
1634 struct i40e_rx_buffer *rx_buffer;
1635 struct sk_buff *skb;
1636 struct page *page;
1637
1638 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
1639 page = rx_buffer->page;
1640 prefetchw(page);
1641
1642 skb = rx_buffer->skb;
1643
1644 if (likely(!skb)) {
1645 void *page_addr = page_address(page) + rx_buffer->page_offset;
1646
1647 /* prefetch first cache line of first page */
1648 prefetch(page_addr);
1649#if L1_CACHE_BYTES < 128
1650 prefetch(page_addr + L1_CACHE_BYTES);
1651#endif
1652
1653 /* allocate a skb to store the frags */
1654 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
1655 I40E_RX_HDR_SIZE,
1656 GFP_ATOMIC | __GFP_NOWARN);
1657 if (unlikely(!skb)) {
1658 rx_ring->rx_stats.alloc_buff_failed++;
1659 return NULL;
1660 }
1661
1662 /* we will be copying header into skb->data in
1663 * pskb_may_pull so it is in our interest to prefetch
1664 * it now to avoid a possible cache miss
1665 */
1666 prefetchw(skb->data);
1667 } else {
1668 rx_buffer->skb = NULL;
1669 }
1670
1671 /* we are reusing so sync this buffer for CPU use */
1672 dma_sync_single_range_for_cpu(rx_ring->dev,
1673 rx_buffer->dma,
1674 rx_buffer->page_offset,
1675 I40E_RXBUFFER_2048,
1676 DMA_FROM_DEVICE);
1677
1678 /* pull page into skb */
1679 if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
1680 /* hand second half of page back to the ring */
1681 i40e_reuse_rx_page(rx_ring, rx_buffer);
1682 rx_ring->rx_stats.page_reuse_count++;
1683 } else {
1684 /* we are not reusing the buffer so unmap it */
1685 dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
1686 DMA_FROM_DEVICE);
1687 }
1688
1689 /* clear contents of buffer_info */
1690 rx_buffer->page = NULL;
1691
1692 return skb;
1693}
1694
1695/**
1696 * i40e_is_non_eop - process handling of non-EOP buffers
1697 * @rx_ring: Rx ring being processed
1698 * @rx_desc: Rx descriptor for current buffer
1699 * @skb: Current socket buffer containing buffer in progress
1700 *
1701 * This function updates next to clean. If the buffer is an EOP buffer
1702 * this function exits returning false, otherwise it will place the
1703 * sk_buff in the next buffer to be chained and return true indicating
1704 * that this is in fact a non-EOP buffer.
1705 **/
1706static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
1707 union i40e_rx_desc *rx_desc,
1708 struct sk_buff *skb)
1709{
1710 u32 ntc = rx_ring->next_to_clean + 1;
1711
1712 /* fetch, update, and store next to clean */
1713 ntc = (ntc < rx_ring->count) ? ntc : 0;
1714 rx_ring->next_to_clean = ntc;
1715
1716 prefetch(I40E_RX_DESC(rx_ring, ntc));
1717
1718#define staterrlen rx_desc->wb.qword1.status_error_len
1719 if (unlikely(i40e_rx_is_programming_status(le64_to_cpu(staterrlen)))) {
1720 i40e_clean_programming_status(rx_ring, rx_desc);
1721 rx_ring->rx_bi[ntc].skb = skb;
1722 return true;
1723 }
1724 /* if we are the last buffer then there is nothing else to do */
1725#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
1726 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
1727 return false;
1728
1729 /* place skb in next buffer to be received */
1730 rx_ring->rx_bi[ntc].skb = skb;
1731 rx_ring->rx_stats.non_eop_descs++;
1732
1733 return true;
1734}
1735
1736/**
1737 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1738 * @rx_ring: rx descriptor ring to transact packets on
1739 * @budget: Total limit on number of packets to process
1740 *
1741 * This function provides a "bounce buffer" approach to Rx interrupt
1742 * processing. The advantage to this is that on systems that have
1743 * expensive overhead for IOMMU access this provides a means of avoiding
1744 * it by maintaining the mapping of the page to the system.
1745 *
1746 * Returns amount of work completed
1747 **/
1748static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
Mitch Williamsa132af22015-01-24 09:58:35 +00001749{
1750 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1751 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001752 bool failure = false;
Mitch Williamsa132af22015-01-24 09:58:35 +00001753
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001754 while (likely(total_rx_packets < budget)) {
1755 union i40e_rx_desc *rx_desc;
Mitch Williamsa132af22015-01-24 09:58:35 +00001756 struct sk_buff *skb;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001757 u32 rx_status;
Mitch Williamsa132af22015-01-24 09:58:35 +00001758 u16 vlan_tag;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001759 u8 rx_ptype;
1760 u64 qword;
1761
Mitch Williamsa132af22015-01-24 09:58:35 +00001762 /* return some buffers to hardware, one at a time is too slow */
1763 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001764 failure = failure ||
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001765 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
Mitch Williamsa132af22015-01-24 09:58:35 +00001766 cleaned_count = 0;
1767 }
1768
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001769 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
1770
Mitch Williamsa132af22015-01-24 09:58:35 +00001771 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001772 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1773 I40E_RXD_QW1_PTYPE_SHIFT;
Mitch Williamsa132af22015-01-24 09:58:35 +00001774 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001775 I40E_RXD_QW1_STATUS_SHIFT;
Mitch Williamsa132af22015-01-24 09:58:35 +00001776
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001777 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
Mitch Williamsa132af22015-01-24 09:58:35 +00001778 break;
1779
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001780 /* status_error_len will always be zero for unused descriptors
1781 * because it's cleared in cleanup, and overlaps with hdr_addr
1782 * which is always zero because packet split isn't used, if the
1783 * hardware wrote DD then it will be non-zero
1784 */
1785 if (!rx_desc->wb.qword1.status_error_len)
1786 break;
1787
Mitch Williamsa132af22015-01-24 09:58:35 +00001788 /* This memory barrier is needed to keep us from reading
1789 * any other fields out of the rx_desc until we know the
1790 * DD bit is set.
1791 */
Alexander Duyck67317162015-04-08 18:49:43 -07001792 dma_rmb();
Mitch Williamsa132af22015-01-24 09:58:35 +00001793
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001794 skb = i40e_fetch_rx_buffer(rx_ring, rx_desc);
1795 if (!skb)
1796 break;
Mitch Williamsa132af22015-01-24 09:58:35 +00001797
Mitch Williamsa132af22015-01-24 09:58:35 +00001798 cleaned_count++;
1799
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001800 if (i40e_is_non_eop(rx_ring, rx_desc, skb))
Mitch Williamsa132af22015-01-24 09:58:35 +00001801 continue;
Mitch Williamsa132af22015-01-24 09:58:35 +00001802
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001803 /* ERR_MASK will only have valid bits if EOP set, and
1804 * what we are doing here is actually checking
1805 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1806 * the error field
1807 */
1808 if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
Mitch Williamsa132af22015-01-24 09:58:35 +00001809 dev_kfree_skb_any(skb);
Mitch Williamsa132af22015-01-24 09:58:35 +00001810 continue;
1811 }
1812
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001813 if (i40e_cleanup_headers(rx_ring, skb))
1814 continue;
Mitch Williamsa132af22015-01-24 09:58:35 +00001815
1816 /* probably a little skewed due to removing CRC */
1817 total_rx_bytes += skb->len;
Mitch Williamsa132af22015-01-24 09:58:35 +00001818
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001819 /* populate checksum, VLAN, and protocol */
1820 i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
Mitch Williamsa132af22015-01-24 09:58:35 +00001821
Mitch Williamsa132af22015-01-24 09:58:35 +00001822#ifdef I40E_FCOE
Jesse Brandeburg1f15d662016-04-01 03:56:06 -07001823 if (unlikely(
1824 i40e_rx_is_fcoe(rx_ptype) &&
1825 !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) {
Mitch Williamsa132af22015-01-24 09:58:35 +00001826 dev_kfree_skb_any(skb);
1827 continue;
1828 }
1829#endif
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001830
1831 vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
1832 le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
1833
Mitch Williamsa132af22015-01-24 09:58:35 +00001834 i40e_receive_skb(rx_ring, skb, vlan_tag);
1835
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001836 /* update budget accounting */
1837 total_rx_packets++;
1838 }
Mitch Williamsa132af22015-01-24 09:58:35 +00001839
1840 u64_stats_update_begin(&rx_ring->syncp);
1841 rx_ring->stats.packets += total_rx_packets;
1842 rx_ring->stats.bytes += total_rx_bytes;
1843 u64_stats_update_end(&rx_ring->syncp);
1844 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1845 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1846
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001847 /* guarantee a trip back through this routine if there was a failure */
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001848 return failure ? budget : total_rx_packets;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001849}
1850
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001851static u32 i40e_buildreg_itr(const int type, const u16 itr)
1852{
1853 u32 val;
1854
1855 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
Jesse Brandeburg40d72a52016-01-13 16:51:45 -08001856 /* Don't clear PBA because that can cause lost interrupts that
1857 * came in while we were cleaning/polling
1858 */
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001859 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1860 (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
1861
1862 return val;
1863}
1864
1865/* a small macro to shorten up some long lines */
1866#define INTREG I40E_PFINT_DYN_CTLN
1867
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001868/**
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001869 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
1870 * @vsi: the VSI we care about
1871 * @q_vector: q_vector for which itr is being updated and interrupt enabled
1872 *
1873 **/
1874static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
1875 struct i40e_q_vector *q_vector)
1876{
1877 struct i40e_hw *hw = &vsi->back->hw;
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001878 bool rx = false, tx = false;
1879 u32 rxval, txval;
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001880 int vector;
Kan Lianga75e8002016-02-19 09:24:04 -05001881 int idx = q_vector->v_idx;
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001882
1883 vector = (q_vector->v_idx + vsi->base_vector);
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001884
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001885 /* avoid dynamic calculation if in countdown mode OR if
1886 * all dynamic is disabled
1887 */
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001888 rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
1889
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001890 if (q_vector->itr_countdown > 0 ||
Kan Lianga75e8002016-02-19 09:24:04 -05001891 (!ITR_IS_DYNAMIC(vsi->rx_rings[idx]->rx_itr_setting) &&
1892 !ITR_IS_DYNAMIC(vsi->tx_rings[idx]->tx_itr_setting))) {
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001893 goto enable_int;
1894 }
1895
Kan Lianga75e8002016-02-19 09:24:04 -05001896 if (ITR_IS_DYNAMIC(vsi->rx_rings[idx]->rx_itr_setting)) {
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001897 rx = i40e_set_new_dynamic_itr(&q_vector->rx);
1898 rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001899 }
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001900
Kan Lianga75e8002016-02-19 09:24:04 -05001901 if (ITR_IS_DYNAMIC(vsi->tx_rings[idx]->tx_itr_setting)) {
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001902 tx = i40e_set_new_dynamic_itr(&q_vector->tx);
1903 txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001904 }
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001905
1906 if (rx || tx) {
1907 /* get the higher of the two ITR adjustments and
1908 * use the same value for both ITR registers
1909 * when in adaptive mode (Rx and/or Tx)
1910 */
1911 u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
1912
1913 q_vector->tx.itr = q_vector->rx.itr = itr;
1914 txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
1915 tx = true;
1916 rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
1917 rx = true;
1918 }
1919
1920 /* only need to enable the interrupt once, but need
1921 * to possibly update both ITR values
1922 */
1923 if (rx) {
1924 /* set the INTENA_MSK_MASK so that this first write
1925 * won't actually enable the interrupt, instead just
1926 * updating the ITR (it's bit 31 PF and VF)
1927 */
1928 rxval |= BIT(31);
1929 /* don't check _DOWN because interrupt isn't being enabled */
1930 wr32(hw, INTREG(vector - 1), rxval);
1931 }
1932
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001933enable_int:
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001934 if (!test_bit(__I40E_DOWN, &vsi->state))
1935 wr32(hw, INTREG(vector - 1), txval);
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001936
1937 if (q_vector->itr_countdown)
1938 q_vector->itr_countdown--;
1939 else
1940 q_vector->itr_countdown = ITR_COUNTDOWN_START;
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001941}
1942
1943/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001944 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1945 * @napi: napi struct with our devices info in it
1946 * @budget: amount of work driver is allowed to do this pass, in packets
1947 *
1948 * This function will clean all queues associated with a q_vector.
1949 *
1950 * Returns the amount of work done
1951 **/
1952int i40e_napi_poll(struct napi_struct *napi, int budget)
1953{
1954 struct i40e_q_vector *q_vector =
1955 container_of(napi, struct i40e_q_vector, napi);
1956 struct i40e_vsi *vsi = q_vector->vsi;
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001957 struct i40e_ring *ring;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001958 bool clean_complete = true;
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001959 bool arm_wb = false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001960 int budget_per_ring;
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07001961 int work_done = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001962
1963 if (test_bit(__I40E_DOWN, &vsi->state)) {
1964 napi_complete(napi);
1965 return 0;
1966 }
1967
Kiran Patil9c6c1252015-11-06 15:26:02 -08001968 /* Clear hung_detected bit */
1969 clear_bit(I40E_Q_VECTOR_HUNG_DETECT, &q_vector->hung_detected);
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001970 /* Since the actual Tx work is minimal, we can give the Tx a larger
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001971 * budget and be more aggressive about cleaning up the Tx descriptors.
1972 */
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001973 i40e_for_each_ring(ring, q_vector->tx) {
Alexander Duycka619afe2016-03-07 09:30:03 -08001974 if (!i40e_clean_tx_irq(vsi, ring, budget)) {
Alexander Duyckf2edaaa2016-03-07 09:29:57 -08001975 clean_complete = false;
1976 continue;
1977 }
1978 arm_wb |= ring->arm_wb;
Jesse Brandeburg0deda862015-07-23 16:54:34 -04001979 ring->arm_wb = false;
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001980 }
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001981
Alexander Duyckc67cace2015-09-24 09:04:26 -07001982 /* Handle case where we are called by netpoll with a budget of 0 */
1983 if (budget <= 0)
1984 goto tx_only;
1985
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001986 /* We attempt to distribute budget to each Rx queue fairly, but don't
1987 * allow the budget to go below 1 because that would exit polling early.
1988 */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001989 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001990
Mitch Williamsa132af22015-01-24 09:58:35 +00001991 i40e_for_each_ring(ring, q_vector->rx) {
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001992 int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07001993
1994 work_done += cleaned;
Alexander Duyckf2edaaa2016-03-07 09:29:57 -08001995 /* if we clean as many as budgeted, we must not be done */
1996 if (cleaned >= budget_per_ring)
1997 clean_complete = false;
Mitch Williamsa132af22015-01-24 09:58:35 +00001998 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001999
2000 /* If work not completed, return budget and polling will return */
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00002001 if (!clean_complete) {
Alexander Duyckc67cace2015-09-24 09:04:26 -07002002tx_only:
Anjali Singhai Jain164c9f52015-10-21 19:47:08 -04002003 if (arm_wb) {
2004 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -08002005 i40e_enable_wb_on_itr(vsi, q_vector);
Anjali Singhai Jain164c9f52015-10-21 19:47:08 -04002006 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002007 return budget;
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00002008 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002009
Anjali Singhai Jain8e0764b2015-06-05 12:20:30 -04002010 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
2011 q_vector->arm_wb_state = false;
2012
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002013 /* Work is done so exit the polling mode and re-enable the interrupt */
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07002014 napi_complete_done(napi, work_done);
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04002015 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
2016 i40e_update_enable_itr(vsi, q_vector);
2017 } else { /* Legacy mode */
Jesse Brandeburg40d72a52016-01-13 16:51:45 -08002018 i40e_irq_dynamic_enable_icr0(vsi->back, false);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002019 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002020 return 0;
2021}
2022
2023/**
2024 * i40e_atr - Add a Flow Director ATR filter
2025 * @tx_ring: ring to add programming descriptor to
2026 * @skb: send buffer
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002027 * @tx_flags: send tx flags
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002028 **/
2029static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002030 u32 tx_flags)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002031{
2032 struct i40e_filter_program_desc *fdir_desc;
2033 struct i40e_pf *pf = tx_ring->vsi->back;
2034 union {
2035 unsigned char *network;
2036 struct iphdr *ipv4;
2037 struct ipv6hdr *ipv6;
2038 } hdr;
2039 struct tcphdr *th;
2040 unsigned int hlen;
2041 u32 flex_ptype, dtype_cmd;
Alexander Duyckffcc55c2016-01-25 19:32:54 -08002042 int l4_proto;
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002043 u16 i;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002044
2045 /* make sure ATR is enabled */
Jesse Brandeburg60ea5f82014-01-17 15:36:34 -08002046 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002047 return;
2048
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00002049 if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
2050 return;
2051
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002052 /* if sampling is disabled do nothing */
2053 if (!tx_ring->atr_sample_rate)
2054 return;
2055
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002056 /* Currently only IPv4/IPv6 with TCP is supported */
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002057 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002058 return;
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002059
Alexander Duyckffcc55c2016-01-25 19:32:54 -08002060 /* snag network header to get L4 type and address */
2061 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2062 skb_inner_network_header(skb) : skb_network_header(skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002063
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002064 /* Note: tx_flags gets modified to reflect inner protocols in
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002065 * tx_enable_csum function if encap is enabled.
2066 */
Alexander Duyckffcc55c2016-01-25 19:32:54 -08002067 if (tx_flags & I40E_TX_FLAGS_IPV4) {
2068 /* access ihl as u8 to avoid unaligned access on ia64 */
2069 hlen = (hdr.network[0] & 0x0F) << 2;
2070 l4_proto = hdr.ipv4->protocol;
2071 } else {
2072 hlen = hdr.network - skb->data;
2073 l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
2074 hlen -= hdr.network - skb->data;
2075 }
2076
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002077 if (l4_proto != IPPROTO_TCP)
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002078 return;
2079
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002080 th = (struct tcphdr *)(hdr.network + hlen);
2081
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00002082 /* Due to lack of space, no more new filters can be programmed */
2083 if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
2084 return;
Anjali Singhai Jain72b74862016-01-08 17:50:21 -08002085 if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
2086 (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) {
Anjali Singhai Jain52eb95e2015-06-05 12:20:33 -04002087 /* HW ATR eviction will take care of removing filters on FIN
2088 * and RST packets.
2089 */
2090 if (th->fin || th->rst)
2091 return;
2092 }
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00002093
2094 tx_ring->atr_count++;
2095
Anjali Singhai Jaince806782014-03-06 08:59:54 +00002096 /* sample on all syn/fin/rst packets or once every atr sample rate */
2097 if (!th->fin &&
2098 !th->syn &&
2099 !th->rst &&
2100 (tx_ring->atr_count < tx_ring->atr_sample_rate))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002101 return;
2102
2103 tx_ring->atr_count = 0;
2104
2105 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002106 i = tx_ring->next_to_use;
2107 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2108
2109 i++;
2110 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002111
2112 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2113 I40E_TXD_FLTR_QW0_QINDEX_MASK;
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002114 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002115 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2116 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2117 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2118 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2119
2120 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2121
2122 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2123
Anjali Singhai Jaince806782014-03-06 08:59:54 +00002124 dtype_cmd |= (th->fin || th->rst) ?
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002125 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2126 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2127 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2128 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2129
2130 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2131 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2132
2133 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2134 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2135
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +00002136 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
Singhai, Anjali6a899022015-12-14 12:21:18 -08002137 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
Anjali Singhai Jain60ccd452015-04-16 20:06:01 -04002138 dtype_cmd |=
2139 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2140 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2141 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2142 else
2143 dtype_cmd |=
2144 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2145 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2146 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +00002147
Anjali Singhai Jain72b74862016-01-08 17:50:21 -08002148 if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
2149 (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)))
Anjali Singhai Jain52eb95e2015-06-05 12:20:33 -04002150 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2151
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002152 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
Jesse Brandeburg99753ea2014-06-04 04:22:49 +00002153 fdir_desc->rsvd = cpu_to_le32(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002154 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
Jesse Brandeburg99753ea2014-06-04 04:22:49 +00002155 fdir_desc->fd_id = cpu_to_le32(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002156}
2157
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002158/**
2159 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2160 * @skb: send buffer
2161 * @tx_ring: ring to send buffer on
2162 * @flags: the tx flags to be set
2163 *
2164 * Checks the skb and set up correspondingly several generic transmit flags
2165 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2166 *
2167 * Returns error code indicate the frame should be dropped upon error and the
2168 * otherwise returns 0 to indicate the flags has been set properly.
2169 **/
Vasu Dev38e00432014-08-01 13:27:03 -07002170#ifdef I40E_FCOE
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002171inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002172 struct i40e_ring *tx_ring,
2173 u32 *flags)
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002174#else
2175static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2176 struct i40e_ring *tx_ring,
2177 u32 *flags)
Vasu Dev38e00432014-08-01 13:27:03 -07002178#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002179{
2180 __be16 protocol = skb->protocol;
2181 u32 tx_flags = 0;
2182
Greg Rose31eaacc2015-03-31 00:45:03 -07002183 if (protocol == htons(ETH_P_8021Q) &&
2184 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2185 /* When HW VLAN acceleration is turned off by the user the
2186 * stack sets the protocol to 8021q so that the driver
2187 * can take any steps required to support the SW only
2188 * VLAN handling. In our case the driver doesn't need
2189 * to take any further steps so just set the protocol
2190 * to the encapsulated ethertype.
2191 */
2192 skb->protocol = vlan_get_protocol(skb);
2193 goto out;
2194 }
2195
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002196 /* if we have a HW VLAN tag being added, default to the HW one */
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01002197 if (skb_vlan_tag_present(skb)) {
2198 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002199 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2200 /* else if it is a SW VLAN, check the next protocol and store the tag */
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00002201 } else if (protocol == htons(ETH_P_8021Q)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002202 struct vlan_hdr *vhdr, _vhdr;
Jesse Brandeburg6995b362015-08-28 17:55:54 -04002203
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002204 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2205 if (!vhdr)
2206 return -EINVAL;
2207
2208 protocol = vhdr->h_vlan_encapsulated_proto;
2209 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2210 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2211 }
2212
Neerav Parikhd40d00b2015-02-24 06:58:40 +00002213 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2214 goto out;
2215
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002216 /* Insert 802.1p priority into VLAN header */
Vasu Dev38e00432014-08-01 13:27:03 -07002217 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2218 (skb->priority != TC_PRIO_CONTROL)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002219 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2220 tx_flags |= (skb->priority & 0x7) <<
2221 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2222 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2223 struct vlan_ethhdr *vhdr;
Francois Romieudd225bc2014-03-30 03:14:48 +00002224 int rc;
2225
2226 rc = skb_cow_head(skb, 0);
2227 if (rc < 0)
2228 return rc;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002229 vhdr = (struct vlan_ethhdr *)skb->data;
2230 vhdr->h_vlan_TCI = htons(tx_flags >>
2231 I40E_TX_FLAGS_VLAN_SHIFT);
2232 } else {
2233 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2234 }
2235 }
Neerav Parikhd40d00b2015-02-24 06:58:40 +00002236
2237out:
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002238 *flags = tx_flags;
2239 return 0;
2240}
2241
2242/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002243 * i40e_tso - set up the tso context descriptor
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002244 * @skb: ptr to the skb we're sending
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002245 * @hdr_len: ptr to the size of the packet header
Shannon Nelson9c883bd2015-10-21 19:47:02 -04002246 * @cd_type_cmd_tso_mss: Quad Word 1
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002247 *
2248 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2249 **/
Jesse Brandeburg84b07992016-04-01 03:56:05 -07002250static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002251{
Alexander Duyck03f9d6a2016-01-24 21:16:20 -08002252 u64 cd_cmd, cd_tso_len, cd_mss;
Alexander Duyckc7770192016-01-24 21:16:35 -08002253 union {
2254 struct iphdr *v4;
2255 struct ipv6hdr *v6;
2256 unsigned char *hdr;
2257 } ip;
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002258 union {
2259 struct tcphdr *tcp;
Alexander Duyck54532052016-01-24 21:17:29 -08002260 struct udphdr *udp;
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002261 unsigned char *hdr;
2262 } l4;
2263 u32 paylen, l4_offset;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002264 int err;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002265
Shannon Nelsone9f65632016-01-04 10:33:04 -08002266 if (skb->ip_summed != CHECKSUM_PARTIAL)
2267 return 0;
2268
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002269 if (!skb_is_gso(skb))
2270 return 0;
2271
Francois Romieudd225bc2014-03-30 03:14:48 +00002272 err = skb_cow_head(skb, 0);
2273 if (err < 0)
2274 return err;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002275
Alexander Duyckc7770192016-01-24 21:16:35 -08002276 ip.hdr = skb_network_header(skb);
2277 l4.hdr = skb_transport_header(skb);
Anjali Singhaidf230752014-12-19 02:58:16 +00002278
Alexander Duyckc7770192016-01-24 21:16:35 -08002279 /* initialize outer IP header fields */
2280 if (ip.v4->version == 4) {
2281 ip.v4->tot_len = 0;
2282 ip.v4->check = 0;
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002283 } else {
Alexander Duyckc7770192016-01-24 21:16:35 -08002284 ip.v6->payload_len = 0;
2285 }
2286
Alexander Duyck577389a2016-04-02 00:06:56 -07002287 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04002288 SKB_GSO_GRE_CSUM |
Tom Herbert7e133182016-05-18 09:06:10 -07002289 SKB_GSO_IPXIP4 |
Alexander Duyckbf2d1df2016-05-18 10:44:53 -07002290 SKB_GSO_IPXIP6 |
Alexander Duyck577389a2016-04-02 00:06:56 -07002291 SKB_GSO_UDP_TUNNEL |
Alexander Duyck54532052016-01-24 21:17:29 -08002292 SKB_GSO_UDP_TUNNEL_CSUM)) {
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04002293 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2294 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2295 l4.udp->len = 0;
2296
Alexander Duyck54532052016-01-24 21:17:29 -08002297 /* determine offset of outer transport header */
2298 l4_offset = l4.hdr - skb->data;
2299
2300 /* remove payload length from outer checksum */
Alexander Duyck24d41e52016-03-18 16:06:47 -07002301 paylen = skb->len - l4_offset;
2302 csum_replace_by_diff(&l4.udp->check, htonl(paylen));
Alexander Duyck54532052016-01-24 21:17:29 -08002303 }
2304
Alexander Duyckc7770192016-01-24 21:16:35 -08002305 /* reset pointers to inner headers */
2306 ip.hdr = skb_inner_network_header(skb);
2307 l4.hdr = skb_inner_transport_header(skb);
2308
2309 /* initialize inner IP header fields */
2310 if (ip.v4->version == 4) {
2311 ip.v4->tot_len = 0;
2312 ip.v4->check = 0;
2313 } else {
2314 ip.v6->payload_len = 0;
2315 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002316 }
2317
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002318 /* determine offset of inner transport header */
2319 l4_offset = l4.hdr - skb->data;
2320
2321 /* remove payload length from inner checksum */
Alexander Duyck24d41e52016-03-18 16:06:47 -07002322 paylen = skb->len - l4_offset;
2323 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002324
2325 /* compute length of segmentation header */
2326 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002327
2328 /* find the field values */
2329 cd_cmd = I40E_TX_CTX_DESC_TSO;
2330 cd_tso_len = skb->len - *hdr_len;
2331 cd_mss = skb_shinfo(skb)->gso_size;
Alexander Duyck03f9d6a2016-01-24 21:16:20 -08002332 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2333 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2334 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002335 return 1;
2336}
2337
2338/**
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002339 * i40e_tsyn - set up the tsyn context descriptor
2340 * @tx_ring: ptr to the ring to send
2341 * @skb: ptr to the skb we're sending
2342 * @tx_flags: the collected send information
Shannon Nelson9c883bd2015-10-21 19:47:02 -04002343 * @cd_type_cmd_tso_mss: Quad Word 1
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002344 *
2345 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2346 **/
2347static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2348 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2349{
2350 struct i40e_pf *pf;
2351
2352 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2353 return 0;
2354
2355 /* Tx timestamps cannot be sampled when doing TSO */
2356 if (tx_flags & I40E_TX_FLAGS_TSO)
2357 return 0;
2358
2359 /* only timestamp the outbound packet if the user has requested it and
2360 * we are not already transmitting a packet to be timestamped
2361 */
2362 pf = i40e_netdev_to_pf(tx_ring->netdev);
Jacob Keller22b47772014-12-14 01:55:09 +00002363 if (!(pf->flags & I40E_FLAG_PTP))
2364 return 0;
2365
Jakub Kicinski9ce34f02014-03-15 14:55:42 +00002366 if (pf->ptp_tx &&
2367 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002368 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2369 pf->ptp_tx_skb = skb_get(skb);
2370 } else {
2371 return 0;
2372 }
2373
2374 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
2375 I40E_TXD_CTX_QW1_CMD_SHIFT;
2376
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002377 return 1;
2378}
2379
2380/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002381 * i40e_tx_enable_csum - Enable Tx checksum offloads
2382 * @skb: send buffer
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002383 * @tx_flags: pointer to Tx flags currently set
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002384 * @td_cmd: Tx descriptor command bits to set
2385 * @td_offset: Tx descriptor header offsets to set
Jean Sacren554f4542015-10-13 01:06:28 -06002386 * @tx_ring: Tx descriptor ring
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002387 * @cd_tunneling: ptr to context desc bits
2388 **/
Alexander Duyck529f1f62016-01-24 21:17:10 -08002389static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
2390 u32 *td_cmd, u32 *td_offset,
2391 struct i40e_ring *tx_ring,
2392 u32 *cd_tunneling)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002393{
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002394 union {
2395 struct iphdr *v4;
2396 struct ipv6hdr *v6;
2397 unsigned char *hdr;
2398 } ip;
2399 union {
2400 struct tcphdr *tcp;
2401 struct udphdr *udp;
2402 unsigned char *hdr;
2403 } l4;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002404 unsigned char *exthdr;
Jesse Brandeburgd1bd7432016-04-01 03:56:04 -07002405 u32 offset, cmd = 0;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002406 __be16 frag_off;
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002407 u8 l4_proto = 0;
2408
Alexander Duyck529f1f62016-01-24 21:17:10 -08002409 if (skb->ip_summed != CHECKSUM_PARTIAL)
2410 return 0;
2411
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002412 ip.hdr = skb_network_header(skb);
2413 l4.hdr = skb_transport_header(skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002414
Alexander Duyck475b4202016-01-24 21:17:01 -08002415 /* compute outer L2 header size */
2416 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
2417
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002418 if (skb->encapsulation) {
Jesse Brandeburgd1bd7432016-04-01 03:56:04 -07002419 u32 tunnel = 0;
Alexander Duycka0064722016-01-24 21:16:48 -08002420 /* define outer network header type */
2421 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
Alexander Duyck475b4202016-01-24 21:17:01 -08002422 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
2423 I40E_TX_CTX_EXT_IP_IPV4 :
2424 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2425
Alexander Duycka0064722016-01-24 21:16:48 -08002426 l4_proto = ip.v4->protocol;
2427 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
Alexander Duyck475b4202016-01-24 21:17:01 -08002428 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002429
2430 exthdr = ip.hdr + sizeof(*ip.v6);
Alexander Duycka0064722016-01-24 21:16:48 -08002431 l4_proto = ip.v6->nexthdr;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002432 if (l4.hdr != exthdr)
2433 ipv6_skip_exthdr(skb, exthdr - skb->data,
2434 &l4_proto, &frag_off);
Alexander Duycka0064722016-01-24 21:16:48 -08002435 }
2436
2437 /* define outer transport */
2438 switch (l4_proto) {
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002439 case IPPROTO_UDP:
Alexander Duyck475b4202016-01-24 21:17:01 -08002440 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
Singhai, Anjali6a899022015-12-14 12:21:18 -08002441 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002442 break;
Shannon Nelsonc1d17912015-09-25 19:26:04 +00002443 case IPPROTO_GRE:
Alexander Duyck475b4202016-01-24 21:17:01 -08002444 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
Alexander Duycka0064722016-01-24 21:16:48 -08002445 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
Shannon Nelsonc1d17912015-09-25 19:26:04 +00002446 break;
Alexander Duyck577389a2016-04-02 00:06:56 -07002447 case IPPROTO_IPIP:
2448 case IPPROTO_IPV6:
2449 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2450 l4.hdr = skb_inner_network_header(skb);
2451 break;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002452 default:
Alexander Duyck529f1f62016-01-24 21:17:10 -08002453 if (*tx_flags & I40E_TX_FLAGS_TSO)
2454 return -1;
2455
2456 skb_checksum_help(skb);
2457 return 0;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002458 }
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002459
Alexander Duyck577389a2016-04-02 00:06:56 -07002460 /* compute outer L3 header size */
2461 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
2462 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
2463
2464 /* switch IP header pointer from outer to inner header */
2465 ip.hdr = skb_inner_network_header(skb);
2466
Alexander Duyck475b4202016-01-24 21:17:01 -08002467 /* compute tunnel header size */
2468 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
2469 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
2470
Alexander Duyck54532052016-01-24 21:17:29 -08002471 /* indicate if we need to offload outer UDP header */
2472 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04002473 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
Alexander Duyck54532052016-01-24 21:17:29 -08002474 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
2475 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2476
Alexander Duyck475b4202016-01-24 21:17:01 -08002477 /* record tunnel offload values */
2478 *cd_tunneling |= tunnel;
2479
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002480 /* switch L4 header pointer from outer to inner */
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002481 l4.hdr = skb_inner_transport_header(skb);
Alexander Duycka0064722016-01-24 21:16:48 -08002482 l4_proto = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002483
Alexander Duycka0064722016-01-24 21:16:48 -08002484 /* reset type as we transition from outer to inner headers */
2485 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
2486 if (ip.v4->version == 4)
2487 *tx_flags |= I40E_TX_FLAGS_IPV4;
2488 if (ip.v6->version == 6)
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002489 *tx_flags |= I40E_TX_FLAGS_IPV6;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002490 }
2491
2492 /* Enable IP checksum offloads */
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002493 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002494 l4_proto = ip.v4->protocol;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002495 /* the stack computes the IP header already, the only time we
2496 * need the hardware to recompute it is in the case of TSO.
2497 */
Alexander Duyck475b4202016-01-24 21:17:01 -08002498 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
2499 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
2500 I40E_TX_DESC_CMD_IIPT_IPV4;
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002501 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
Alexander Duyck475b4202016-01-24 21:17:01 -08002502 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002503
2504 exthdr = ip.hdr + sizeof(*ip.v6);
2505 l4_proto = ip.v6->nexthdr;
2506 if (l4.hdr != exthdr)
2507 ipv6_skip_exthdr(skb, exthdr - skb->data,
2508 &l4_proto, &frag_off);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002509 }
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002510
Alexander Duyck475b4202016-01-24 21:17:01 -08002511 /* compute inner L3 header size */
2512 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002513
2514 /* Enable L4 checksum offloads */
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002515 switch (l4_proto) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002516 case IPPROTO_TCP:
2517 /* enable checksum offloads */
Alexander Duyck475b4202016-01-24 21:17:01 -08002518 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
2519 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002520 break;
2521 case IPPROTO_SCTP:
2522 /* enable SCTP checksum offload */
Alexander Duyck475b4202016-01-24 21:17:01 -08002523 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
2524 offset |= (sizeof(struct sctphdr) >> 2) <<
2525 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002526 break;
2527 case IPPROTO_UDP:
2528 /* enable UDP checksum offload */
Alexander Duyck475b4202016-01-24 21:17:01 -08002529 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
2530 offset |= (sizeof(struct udphdr) >> 2) <<
2531 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002532 break;
2533 default:
Alexander Duyck529f1f62016-01-24 21:17:10 -08002534 if (*tx_flags & I40E_TX_FLAGS_TSO)
2535 return -1;
2536 skb_checksum_help(skb);
2537 return 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002538 }
Alexander Duyck475b4202016-01-24 21:17:01 -08002539
2540 *td_cmd |= cmd;
2541 *td_offset |= offset;
Alexander Duyck529f1f62016-01-24 21:17:10 -08002542
2543 return 1;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002544}
2545
2546/**
2547 * i40e_create_tx_ctx Build the Tx context descriptor
2548 * @tx_ring: ring to create the descriptor on
2549 * @cd_type_cmd_tso_mss: Quad Word 1
2550 * @cd_tunneling: Quad Word 0 - bits 0-31
2551 * @cd_l2tag2: Quad Word 0 - bits 32-63
2552 **/
2553static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
2554 const u64 cd_type_cmd_tso_mss,
2555 const u32 cd_tunneling, const u32 cd_l2tag2)
2556{
2557 struct i40e_tx_context_desc *context_desc;
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002558 int i = tx_ring->next_to_use;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002559
Jesse Brandeburgff40dd52014-02-14 02:14:41 +00002560 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
2561 !cd_tunneling && !cd_l2tag2)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002562 return;
2563
2564 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002565 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
2566
2567 i++;
2568 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002569
2570 /* cpu_to_le32 and assign to struct fields */
2571 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2572 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
Jesse Brandeburg3efbbb22014-06-04 20:41:54 +00002573 context_desc->rsvd = cpu_to_le16(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002574 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2575}
2576
2577/**
Eric Dumazet4567dc12014-10-07 13:30:23 -07002578 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2579 * @tx_ring: the ring to be checked
2580 * @size: the size buffer we want to assure is available
2581 *
2582 * Returns -EBUSY if a stop is needed, else 0
2583 **/
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002584int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
Eric Dumazet4567dc12014-10-07 13:30:23 -07002585{
2586 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2587 /* Memory barrier before checking head and tail */
2588 smp_mb();
2589
2590 /* Check again in a case another CPU has just made room available. */
2591 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2592 return -EBUSY;
2593
2594 /* A reprieve! - use start_queue because it doesn't call schedule */
2595 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2596 ++tx_ring->tx_stats.restart_queue;
2597 return 0;
2598}
2599
2600/**
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002601 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
Anjali Singhai71da6192015-02-21 06:42:35 +00002602 * @skb: send buffer
Anjali Singhai71da6192015-02-21 06:42:35 +00002603 *
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002604 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
2605 * and so we need to figure out the cases where we need to linearize the skb.
2606 *
2607 * For TSO we need to count the TSO header and segment payload separately.
2608 * As such we need to check cases where we have 7 fragments or more as we
2609 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2610 * the segment payload in the first descriptor, and another 7 for the
2611 * fragments.
Anjali Singhai71da6192015-02-21 06:42:35 +00002612 **/
Alexander Duyck2d374902016-02-17 11:02:50 -08002613bool __i40e_chk_linearize(struct sk_buff *skb)
Anjali Singhai71da6192015-02-21 06:42:35 +00002614{
Alexander Duyck2d374902016-02-17 11:02:50 -08002615 const struct skb_frag_struct *frag, *stale;
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002616 int nr_frags, sum;
Anjali Singhai71da6192015-02-21 06:42:35 +00002617
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002618 /* no need to check if number of frags is less than 7 */
Alexander Duyck2d374902016-02-17 11:02:50 -08002619 nr_frags = skb_shinfo(skb)->nr_frags;
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002620 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
Alexander Duyck2d374902016-02-17 11:02:50 -08002621 return false;
Anjali Singhai71da6192015-02-21 06:42:35 +00002622
Alexander Duyck2d374902016-02-17 11:02:50 -08002623 /* We need to walk through the list and validate that each group
Alexander Duyck841493a2016-09-06 18:05:04 -07002624 * of 6 fragments totals at least gso_size.
Alexander Duyck2d374902016-02-17 11:02:50 -08002625 */
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002626 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
Alexander Duyck2d374902016-02-17 11:02:50 -08002627 frag = &skb_shinfo(skb)->frags[0];
2628
2629 /* Initialize size to the negative value of gso_size minus 1. We
2630 * use this as the worst case scenerio in which the frag ahead
2631 * of us only provides one byte which is why we are limited to 6
2632 * descriptors for a single transmit as the header and previous
2633 * fragment are already consuming 2 descriptors.
2634 */
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002635 sum = 1 - skb_shinfo(skb)->gso_size;
Alexander Duyck2d374902016-02-17 11:02:50 -08002636
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002637 /* Add size of frags 0 through 4 to create our initial sum */
2638 sum += skb_frag_size(frag++);
2639 sum += skb_frag_size(frag++);
2640 sum += skb_frag_size(frag++);
2641 sum += skb_frag_size(frag++);
2642 sum += skb_frag_size(frag++);
Alexander Duyck2d374902016-02-17 11:02:50 -08002643
2644 /* Walk through fragments adding latest fragment, testing it, and
2645 * then removing stale fragments from the sum.
2646 */
2647 stale = &skb_shinfo(skb)->frags[0];
2648 for (;;) {
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002649 sum += skb_frag_size(frag++);
Alexander Duyck2d374902016-02-17 11:02:50 -08002650
2651 /* if sum is negative we failed to make sufficient progress */
2652 if (sum < 0)
2653 return true;
2654
Alexander Duyck841493a2016-09-06 18:05:04 -07002655 if (!nr_frags--)
Alexander Duyck2d374902016-02-17 11:02:50 -08002656 break;
2657
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002658 sum -= skb_frag_size(stale++);
Anjali Singhai71da6192015-02-21 06:42:35 +00002659 }
2660
Alexander Duyck2d374902016-02-17 11:02:50 -08002661 return false;
Anjali Singhai71da6192015-02-21 06:42:35 +00002662}
2663
2664/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002665 * i40e_tx_map - Build the Tx descriptor
2666 * @tx_ring: ring to send buffer on
2667 * @skb: send buffer
2668 * @first: first buffer info buffer to use
2669 * @tx_flags: collected send information
2670 * @hdr_len: size of the packet header
2671 * @td_cmd: the command field in the descriptor
2672 * @td_offset: offset for checksum or crc
2673 **/
Vasu Dev38e00432014-08-01 13:27:03 -07002674#ifdef I40E_FCOE
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002675inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002676 struct i40e_tx_buffer *first, u32 tx_flags,
2677 const u8 hdr_len, u32 td_cmd, u32 td_offset)
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002678#else
2679static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2680 struct i40e_tx_buffer *first, u32 tx_flags,
2681 const u8 hdr_len, u32 td_cmd, u32 td_offset)
Vasu Dev38e00432014-08-01 13:27:03 -07002682#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002683{
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002684 unsigned int data_len = skb->data_len;
2685 unsigned int size = skb_headlen(skb);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002686 struct skb_frag_struct *frag;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002687 struct i40e_tx_buffer *tx_bi;
2688 struct i40e_tx_desc *tx_desc;
Alexander Duycka5e9c572013-09-28 06:00:27 +00002689 u16 i = tx_ring->next_to_use;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002690 u32 td_tag = 0;
2691 dma_addr_t dma;
2692 u16 gso_segs;
Anjali Singhai58044742015-09-25 18:26:13 -07002693 u16 desc_count = 0;
2694 bool tail_bump = true;
2695 bool do_rs = false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002696
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002697 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
2698 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
2699 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
2700 I40E_TX_FLAGS_VLAN_SHIFT;
2701 }
2702
Alexander Duycka5e9c572013-09-28 06:00:27 +00002703 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
2704 gso_segs = skb_shinfo(skb)->gso_segs;
2705 else
2706 gso_segs = 1;
2707
2708 /* multiply data chunks by size of headers */
2709 first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
2710 first->gso_segs = gso_segs;
2711 first->skb = skb;
2712 first->tx_flags = tx_flags;
2713
2714 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2715
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002716 tx_desc = I40E_TX_DESC(tx_ring, i);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002717 tx_bi = first;
2718
2719 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002720 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
2721
Alexander Duycka5e9c572013-09-28 06:00:27 +00002722 if (dma_mapping_error(tx_ring->dev, dma))
2723 goto dma_error;
2724
2725 /* record length, and DMA address */
2726 dma_unmap_len_set(tx_bi, len, size);
2727 dma_unmap_addr_set(tx_bi, dma, dma);
2728
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002729 /* align size to end of page */
2730 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002731 tx_desc->buffer_addr = cpu_to_le64(dma);
2732
2733 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002734 tx_desc->cmd_type_offset_bsz =
2735 build_ctob(td_cmd, td_offset,
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002736 max_data, td_tag);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002737
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002738 tx_desc++;
2739 i++;
Anjali Singhai58044742015-09-25 18:26:13 -07002740 desc_count++;
2741
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002742 if (i == tx_ring->count) {
2743 tx_desc = I40E_TX_DESC(tx_ring, 0);
2744 i = 0;
2745 }
Alexander Duycka5e9c572013-09-28 06:00:27 +00002746
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002747 dma += max_data;
2748 size -= max_data;
Alexander Duycka5e9c572013-09-28 06:00:27 +00002749
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002750 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
Alexander Duycka5e9c572013-09-28 06:00:27 +00002751 tx_desc->buffer_addr = cpu_to_le64(dma);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002752 }
2753
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002754 if (likely(!data_len))
2755 break;
2756
Alexander Duycka5e9c572013-09-28 06:00:27 +00002757 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2758 size, td_tag);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002759
2760 tx_desc++;
2761 i++;
Anjali Singhai58044742015-09-25 18:26:13 -07002762 desc_count++;
2763
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002764 if (i == tx_ring->count) {
2765 tx_desc = I40E_TX_DESC(tx_ring, 0);
2766 i = 0;
2767 }
2768
Alexander Duycka5e9c572013-09-28 06:00:27 +00002769 size = skb_frag_size(frag);
2770 data_len -= size;
2771
2772 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2773 DMA_TO_DEVICE);
2774
2775 tx_bi = &tx_ring->tx_bi[i];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002776 }
2777
Alexander Duycka5e9c572013-09-28 06:00:27 +00002778 /* set next_to_watch value indicating a packet is present */
2779 first->next_to_watch = tx_desc;
2780
2781 i++;
2782 if (i == tx_ring->count)
2783 i = 0;
2784
2785 tx_ring->next_to_use = i;
2786
Anjali Singhai58044742015-09-25 18:26:13 -07002787 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
2788 tx_ring->queue_index),
2789 first->bytecount);
Eric Dumazet4567dc12014-10-07 13:30:23 -07002790 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
Anjali Singhai58044742015-09-25 18:26:13 -07002791
2792 /* Algorithm to optimize tail and RS bit setting:
2793 * if xmit_more is supported
2794 * if xmit_more is true
2795 * do not update tail and do not mark RS bit.
2796 * if xmit_more is false and last xmit_more was false
2797 * if every packet spanned less than 4 desc
2798 * then set RS bit on 4th packet and update tail
2799 * on every packet
2800 * else
2801 * update tail and set RS bit on every packet.
2802 * if xmit_more is false and last_xmit_more was true
2803 * update tail and set RS bit.
2804 *
2805 * Optimization: wmb to be issued only in case of tail update.
2806 * Also optimize the Descriptor WB path for RS bit with the same
2807 * algorithm.
2808 *
2809 * Note: If there are less than 4 packets
2810 * pending and interrupts were disabled the service task will
2811 * trigger a force WB.
2812 */
2813 if (skb->xmit_more &&
2814 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2815 tx_ring->queue_index))) {
2816 tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2817 tail_bump = false;
2818 } else if (!skb->xmit_more &&
2819 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2820 tx_ring->queue_index)) &&
2821 (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
2822 (tx_ring->packet_stride < WB_STRIDE) &&
2823 (desc_count < WB_STRIDE)) {
2824 tx_ring->packet_stride++;
2825 } else {
2826 tx_ring->packet_stride = 0;
2827 tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2828 do_rs = true;
2829 }
2830 if (do_rs)
2831 tx_ring->packet_stride = 0;
2832
2833 tx_desc->cmd_type_offset_bsz =
2834 build_ctob(td_cmd, td_offset, size, td_tag) |
2835 cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
2836 I40E_TX_DESC_CMD_EOP) <<
2837 I40E_TXD_QW1_CMD_SHIFT);
2838
Alexander Duycka5e9c572013-09-28 06:00:27 +00002839 /* notify HW of packet */
Carolyn Wybornyffeac832016-08-04 11:37:03 -07002840 if (!tail_bump) {
Jesse Brandeburg489ce7a2015-04-27 14:57:08 -04002841 prefetchw(tx_desc + 1);
Carolyn Wybornyffeac832016-08-04 11:37:03 -07002842 } else {
Anjali Singhai58044742015-09-25 18:26:13 -07002843 /* Force memory writes to complete before letting h/w
2844 * know there are new descriptors to fetch. (Only
2845 * applicable for weak-ordered memory model archs,
2846 * such as IA-64).
2847 */
2848 wmb();
2849 writel(i, tx_ring->tail);
2850 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002851 return;
2852
2853dma_error:
Alexander Duycka5e9c572013-09-28 06:00:27 +00002854 dev_info(tx_ring->dev, "TX DMA map failed\n");
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002855
2856 /* clear dma mappings for failed tx_bi map */
2857 for (;;) {
2858 tx_bi = &tx_ring->tx_bi[i];
Alexander Duycka5e9c572013-09-28 06:00:27 +00002859 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002860 if (tx_bi == first)
2861 break;
2862 if (i == 0)
2863 i = tx_ring->count;
2864 i--;
2865 }
2866
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002867 tx_ring->next_to_use = i;
2868}
2869
2870/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002871 * i40e_xmit_frame_ring - Sends buffer on Tx ring
2872 * @skb: send buffer
2873 * @tx_ring: ring to send buffer on
2874 *
2875 * Returns NETDEV_TX_OK if sent, else an error code
2876 **/
2877static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2878 struct i40e_ring *tx_ring)
2879{
2880 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
2881 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2882 struct i40e_tx_buffer *first;
2883 u32 td_offset = 0;
2884 u32 tx_flags = 0;
2885 __be16 protocol;
2886 u32 td_cmd = 0;
2887 u8 hdr_len = 0;
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002888 int tso, count;
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002889 int tsyn;
Jesse Brandeburg6995b362015-08-28 17:55:54 -04002890
Jesse Brandeburgb74118f2015-10-26 19:44:30 -04002891 /* prefetch the data, we'll need it later */
2892 prefetch(skb->data);
2893
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002894 count = i40e_xmit_descriptor_count(skb);
Alexander Duyck2d374902016-02-17 11:02:50 -08002895 if (i40e_chk_linearize(skb, count)) {
2896 if (__skb_linearize(skb))
2897 goto out_drop;
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002898 count = i40e_txd_use_count(skb->len);
Alexander Duyck2d374902016-02-17 11:02:50 -08002899 tx_ring->tx_stats.tx_linearize++;
2900 }
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002901
2902 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2903 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
2904 * + 4 desc gap to avoid the cache line where head is,
2905 * + 1 desc for context descriptor,
2906 * otherwise try next time
2907 */
2908 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2909 tx_ring->tx_stats.tx_busy++;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002910 return NETDEV_TX_BUSY;
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002911 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002912
2913 /* prepare the xmit flags */
2914 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2915 goto out_drop;
2916
2917 /* obtain protocol of skb */
Vlad Yasevich3d34dd02014-08-25 10:34:52 -04002918 protocol = vlan_get_protocol(skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002919
2920 /* record the location of the first descriptor for this packet */
2921 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2922
2923 /* setup IPv4/IPv6 offloads */
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00002924 if (protocol == htons(ETH_P_IP))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002925 tx_flags |= I40E_TX_FLAGS_IPV4;
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00002926 else if (protocol == htons(ETH_P_IPV6))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002927 tx_flags |= I40E_TX_FLAGS_IPV6;
2928
Jesse Brandeburg84b07992016-04-01 03:56:05 -07002929 tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002930
2931 if (tso < 0)
2932 goto out_drop;
2933 else if (tso)
2934 tx_flags |= I40E_TX_FLAGS_TSO;
2935
Alexander Duyck3bc67972016-02-17 11:02:56 -08002936 /* Always offload the checksum, since it's in the data descriptor */
2937 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2938 tx_ring, &cd_tunneling);
2939 if (tso < 0)
2940 goto out_drop;
2941
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002942 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
2943
2944 if (tsyn)
2945 tx_flags |= I40E_TX_FLAGS_TSYN;
2946
Jakub Kicinski259afec2014-03-15 14:55:37 +00002947 skb_tx_timestamp(skb);
2948
Alexander Duyckb1941302013-09-28 06:00:32 +00002949 /* always enable CRC insertion offload */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002950 td_cmd |= I40E_TX_DESC_CMD_ICRC;
2951
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002952 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2953 cd_tunneling, cd_l2tag2);
2954
2955 /* Add Flow Director ATR if it's enabled.
2956 *
2957 * NOTE: this must always be directly before the data descriptor.
2958 */
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002959 i40e_atr(tx_ring, skb, tx_flags);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002960
2961 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2962 td_cmd, td_offset);
2963
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002964 return NETDEV_TX_OK;
2965
2966out_drop:
2967 dev_kfree_skb_any(skb);
2968 return NETDEV_TX_OK;
2969}
2970
2971/**
2972 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2973 * @skb: send buffer
2974 * @netdev: network interface device structure
2975 *
2976 * Returns NETDEV_TX_OK if sent, else an error code
2977 **/
2978netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2979{
2980 struct i40e_netdev_priv *np = netdev_priv(netdev);
2981 struct i40e_vsi *vsi = np->vsi;
Alexander Duyck9f65e15b2013-09-28 06:00:58 +00002982 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002983
2984 /* hardware can't handle really short frames, hardware padding works
2985 * beyond this point
2986 */
Alexander Duycka94d9e22014-12-03 08:17:39 -08002987 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
2988 return NETDEV_TX_OK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002989
2990 return i40e_xmit_frame_ring(skb, tx_ring);
2991}