blob: 6e44cf118843624560a760a153d07b2e99874a6c [file] [log] [blame]
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -08004 * Copyright(c) 2013 - 2016 Intel Corporation.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
Greg Rosedc641b72013-12-18 13:45:51 +000015 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000017 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
Mitch Williams1c112a62014-04-04 04:43:06 +000027#include <linux/prefetch.h>
Mitch Williamsa132af22015-01-24 09:58:35 +000028#include <net/busy_poll.h>
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000029#include "i40e.h"
Jesse Brandeburg206812b2014-02-12 01:45:33 +000030#include "i40e_prototype.h"
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000031
32static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
33 u32 td_tag)
34{
35 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
36 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
37 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
38 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
39 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
40}
41
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +000042#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000043#define I40E_FD_CLEAN_DELAY 10
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000044/**
45 * i40e_program_fdir_filter - Program a Flow Director filter
Joseph Gasparakis17a73f62014-02-12 01:45:30 +000046 * @fdir_data: Packet data that will be filter parameters
47 * @raw_packet: the pre-allocated packet buffer for FDir
Jeff Kirsherb40c82e2015-02-27 09:18:34 +000048 * @pf: The PF pointer
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000049 * @add: True for add/update, False for remove
50 **/
Joseph Gasparakis17a73f62014-02-12 01:45:30 +000051int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000052 struct i40e_pf *pf, bool add)
53{
54 struct i40e_filter_program_desc *fdir_desc;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000055 struct i40e_tx_buffer *tx_buf, *first;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000056 struct i40e_tx_desc *tx_desc;
57 struct i40e_ring *tx_ring;
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +000058 unsigned int fpt, dcc;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000059 struct i40e_vsi *vsi;
60 struct device *dev;
61 dma_addr_t dma;
62 u32 td_cmd = 0;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000063 u16 delay = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000064 u16 i;
65
66 /* find existing FDIR VSI */
67 vsi = NULL;
Mitch Williams505682c2014-05-20 08:01:37 +000068 for (i = 0; i < pf->num_alloc_vsi; i++)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000069 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
70 vsi = pf->vsi[i];
71 if (!vsi)
72 return -ENOENT;
73
Alexander Duyck9f65e15b2013-09-28 06:00:58 +000074 tx_ring = vsi->tx_rings[0];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000075 dev = tx_ring->dev;
76
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000077 /* we need two descriptors to add/del a filter and we can wait */
78 do {
79 if (I40E_DESC_UNUSED(tx_ring) > 1)
80 break;
81 msleep_interruptible(1);
82 delay++;
83 } while (delay < I40E_FD_CLEAN_DELAY);
84
85 if (!(I40E_DESC_UNUSED(tx_ring) > 1))
86 return -EAGAIN;
87
Joseph Gasparakis17a73f62014-02-12 01:45:30 +000088 dma = dma_map_single(dev, raw_packet,
89 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000090 if (dma_mapping_error(dev, dma))
91 goto dma_fail;
92
93 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +000094 i = tx_ring->next_to_use;
95 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000096 first = &tx_ring->tx_bi[i];
97 memset(first, 0, sizeof(struct i40e_tx_buffer));
Alexander Duyckfc4ac672013-09-28 06:00:22 +000098
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000099 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000100
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000101 fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
102 I40E_TXD_FLTR_QW0_QINDEX_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000103
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000104 fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
105 I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000106
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000107 fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
108 I40E_TXD_FLTR_QW0_PCTYPE_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000109
110 /* Use LAN VSI Id if not programmed by user */
111 if (fdir_data->dest_vsi == 0)
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000112 fpt |= (pf->vsi[pf->lan_vsi]->id) <<
113 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000114 else
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000115 fpt |= ((u32)fdir_data->dest_vsi <<
116 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
117 I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000118
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000119 dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000120
121 if (add)
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000122 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
123 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000124 else
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000125 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
126 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000127
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000128 dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
129 I40E_TXD_FLTR_QW1_DEST_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000130
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000131 dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
132 I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000133
134 if (fdir_data->cnt_index != 0) {
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000135 dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
136 dcc |= ((u32)fdir_data->cnt_index <<
137 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +0000138 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000139 }
140
Jesse Brandeburg99753ea2014-06-04 04:22:49 +0000141 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
142 fdir_desc->rsvd = cpu_to_le32(0);
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000143 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000144 fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
145
146 /* Now program a dummy descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000147 i = tx_ring->next_to_use;
148 tx_desc = I40E_TX_DESC(tx_ring, i);
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000149 tx_buf = &tx_ring->tx_bi[i];
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000150
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000151 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
152
153 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000154
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000155 /* record length, and DMA address */
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000156 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000157 dma_unmap_addr_set(tx_buf, dma, dma);
158
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000159 tx_desc->buffer_addr = cpu_to_le64(dma);
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000160 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000161
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000162 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
163 tx_buf->raw_buf = (void *)raw_packet;
164
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000165 tx_desc->cmd_type_offset_bsz =
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000166 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000167
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000168 /* Force memory writes to complete before letting h/w
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000169 * know there are new descriptors to fetch.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000170 */
171 wmb();
172
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000173 /* Mark the data descriptor to be watched */
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000174 first->next_to_watch = tx_desc;
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000175
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000176 writel(tx_ring->next_to_use, tx_ring->tail);
177 return 0;
178
179dma_fail:
180 return -1;
181}
182
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000183#define IP_HEADER_OFFSET 14
184#define I40E_UDPIP_DUMMY_PACKET_LEN 42
185/**
186 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
187 * @vsi: pointer to the targeted VSI
188 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000189 * @add: true adds a filter, false removes it
190 *
191 * Returns 0 if the filters were successfully added or removed
192 **/
193static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
194 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000195 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000196{
197 struct i40e_pf *pf = vsi->back;
198 struct udphdr *udp;
199 struct iphdr *ip;
200 bool err = false;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000201 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000202 int ret;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000203 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
204 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
205 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
206
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000207 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
208 if (!raw_packet)
209 return -ENOMEM;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000210 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
211
212 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
213 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
214 + sizeof(struct iphdr));
215
216 ip->daddr = fd_data->dst_ip[0];
217 udp->dest = fd_data->dst_port;
218 ip->saddr = fd_data->src_ip[0];
219 udp->source = fd_data->src_port;
220
Kevin Scottb2d36c02014-04-09 05:58:59 +0000221 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
222 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
223 if (ret) {
224 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000225 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
226 fd_data->pctype, fd_data->fd_id, ret);
Kevin Scottb2d36c02014-04-09 05:58:59 +0000227 err = true;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000228 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000229 if (add)
230 dev_info(&pf->pdev->dev,
231 "Filter OK for PCTYPE %d loc = %d\n",
232 fd_data->pctype, fd_data->fd_id);
233 else
234 dev_info(&pf->pdev->dev,
235 "Filter deleted for PCTYPE %d loc = %d\n",
236 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000237 }
Kiran Patila42e7a32015-11-06 15:26:03 -0800238 if (err)
239 kfree(raw_packet);
240
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000241 return err ? -EOPNOTSUPP : 0;
242}
243
244#define I40E_TCPIP_DUMMY_PACKET_LEN 54
245/**
246 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
247 * @vsi: pointer to the targeted VSI
248 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000249 * @add: true adds a filter, false removes it
250 *
251 * Returns 0 if the filters were successfully added or removed
252 **/
253static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
254 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000255 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000256{
257 struct i40e_pf *pf = vsi->back;
258 struct tcphdr *tcp;
259 struct iphdr *ip;
260 bool err = false;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000261 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000262 int ret;
263 /* Dummy packet */
264 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
265 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
266 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
267 0x0, 0x72, 0, 0, 0, 0};
268
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000269 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
270 if (!raw_packet)
271 return -ENOMEM;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000272 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
273
274 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
275 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
276 + sizeof(struct iphdr));
277
278 ip->daddr = fd_data->dst_ip[0];
279 tcp->dest = fd_data->dst_port;
280 ip->saddr = fd_data->src_ip[0];
281 tcp->source = fd_data->src_port;
282
283 if (add) {
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000284 pf->fd_tcp_rule++;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000285 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
Anjali Singhai Jain2e4875e2015-04-16 20:06:06 -0400286 if (I40E_DEBUG_FD & pf->hw.debug_mask)
287 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000288 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
289 }
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000290 } else {
291 pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
292 (pf->fd_tcp_rule - 1) : 0;
293 if (pf->fd_tcp_rule == 0) {
294 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
Anjali Singhai Jain2e4875e2015-04-16 20:06:06 -0400295 if (I40E_DEBUG_FD & pf->hw.debug_mask)
296 dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000297 }
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000298 }
299
Kevin Scottb2d36c02014-04-09 05:58:59 +0000300 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000301 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
302
303 if (ret) {
304 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000305 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
306 fd_data->pctype, fd_data->fd_id, ret);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000307 err = true;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000308 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000309 if (add)
310 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
311 fd_data->pctype, fd_data->fd_id);
312 else
313 dev_info(&pf->pdev->dev,
314 "Filter deleted for PCTYPE %d loc = %d\n",
315 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000316 }
317
Kiran Patila42e7a32015-11-06 15:26:03 -0800318 if (err)
319 kfree(raw_packet);
320
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000321 return err ? -EOPNOTSUPP : 0;
322}
323
324/**
325 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
326 * a specific flow spec
327 * @vsi: pointer to the targeted VSI
328 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000329 * @add: true adds a filter, false removes it
330 *
Jesse Brandeburg4eeb1ff2015-11-18 17:35:42 -0800331 * Returns 0 if the filters were successfully added or removed
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000332 **/
333static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
334 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000335 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000336{
337 return -EOPNOTSUPP;
338}
339
340#define I40E_IP_DUMMY_PACKET_LEN 34
341/**
342 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
343 * a specific flow spec
344 * @vsi: pointer to the targeted VSI
345 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000346 * @add: true adds a filter, false removes it
347 *
348 * Returns 0 if the filters were successfully added or removed
349 **/
350static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
351 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000352 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000353{
354 struct i40e_pf *pf = vsi->back;
355 struct iphdr *ip;
356 bool err = false;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000357 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000358 int ret;
359 int i;
360 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
361 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
362 0, 0, 0, 0};
363
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000364 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
365 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000366 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
367 if (!raw_packet)
368 return -ENOMEM;
369 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
370 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
371
372 ip->saddr = fd_data->src_ip[0];
373 ip->daddr = fd_data->dst_ip[0];
374 ip->protocol = 0;
375
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000376 fd_data->pctype = i;
377 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
378
379 if (ret) {
380 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000381 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
382 fd_data->pctype, fd_data->fd_id, ret);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000383 err = true;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000384 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000385 if (add)
386 dev_info(&pf->pdev->dev,
387 "Filter OK for PCTYPE %d loc = %d\n",
388 fd_data->pctype, fd_data->fd_id);
389 else
390 dev_info(&pf->pdev->dev,
391 "Filter deleted for PCTYPE %d loc = %d\n",
392 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000393 }
394 }
395
Kiran Patila42e7a32015-11-06 15:26:03 -0800396 if (err)
397 kfree(raw_packet);
398
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000399 return err ? -EOPNOTSUPP : 0;
400}
401
402/**
403 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
404 * @vsi: pointer to the targeted VSI
405 * @cmd: command to get or set RX flow classification rules
406 * @add: true adds a filter, false removes it
407 *
408 **/
409int i40e_add_del_fdir(struct i40e_vsi *vsi,
410 struct i40e_fdir_filter *input, bool add)
411{
412 struct i40e_pf *pf = vsi->back;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000413 int ret;
414
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000415 switch (input->flow_type & ~FLOW_EXT) {
416 case TCP_V4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000417 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000418 break;
419 case UDP_V4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000420 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000421 break;
422 case SCTP_V4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000423 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000424 break;
425 case IPV4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000426 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000427 break;
428 case IP_USER_FLOW:
429 switch (input->ip4_proto) {
430 case IPPROTO_TCP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000431 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000432 break;
433 case IPPROTO_UDP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000434 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000435 break;
436 case IPPROTO_SCTP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000437 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000438 break;
439 default:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000440 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000441 break;
442 }
443 break;
444 default:
Jakub Kicinskic5ffe7e2014-04-02 10:33:22 +0000445 dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000446 input->flow_type);
447 ret = -EINVAL;
448 }
449
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000450 /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000451 return ret;
452}
453
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000454/**
455 * i40e_fd_handle_status - check the Programming Status for FD
456 * @rx_ring: the Rx ring for this descriptor
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000457 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000458 * @prog_id: the id originally used for programming
459 *
460 * This is used to verify if the FD programming or invalidation
461 * requested by SW to the HW is successful or not and take actions accordingly.
462 **/
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000463static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
464 union i40e_rx_desc *rx_desc, u8 prog_id)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000465{
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000466 struct i40e_pf *pf = rx_ring->vsi->back;
467 struct pci_dev *pdev = pf->pdev;
468 u32 fcnt_prog, fcnt_avail;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000469 u32 error;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000470 u64 qw;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000471
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000472 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000473 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
474 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
475
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400476 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
Carolyn Wyborny3487b6c2015-08-27 11:42:38 -0400477 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000478 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
479 (I40E_DEBUG_FD & pf->hw.debug_mask))
480 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
Carolyn Wyborny3487b6c2015-08-27 11:42:38 -0400481 pf->fd_inv);
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000482
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000483 /* Check if the programming error is for ATR.
484 * If so, auto disable ATR and set a state for
485 * flush in progress. Next time we come here if flush is in
486 * progress do nothing, once flush is complete the state will
487 * be cleared.
488 */
489 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
490 return;
491
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000492 pf->fd_add_err++;
493 /* store the current atr filter count */
494 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
495
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000496 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
497 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
498 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
499 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
500 }
501
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000502 /* filter programming failed most likely due to table full */
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000503 fcnt_prog = i40e_get_global_fd_count(pf);
Anjali Singhai Jain12957382014-06-04 04:22:47 +0000504 fcnt_avail = pf->fdir_pf_filter_count;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000505 /* If ATR is running fcnt_prog can quickly change,
506 * if we are very close to full, it makes sense to disable
507 * FD ATR/SB and then re-enable it when there is room.
508 */
509 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000510 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
Anjali Singhai Jainb814ba62014-06-04 20:41:48 +0000511 !(pf->auto_disable_flags &
Anjali Singhai Jainb814ba62014-06-04 20:41:48 +0000512 I40E_FLAG_FD_SB_ENABLED)) {
Anjali Singhai Jain2e4875e2015-04-16 20:06:06 -0400513 if (I40E_DEBUG_FD & pf->hw.debug_mask)
514 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000515 pf->auto_disable_flags |=
516 I40E_FLAG_FD_SB_ENABLED;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000517 }
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000518 }
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400519 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
Anjali Singhai Jain13c28842014-03-06 09:00:04 +0000520 if (I40E_DEBUG_FD & pf->hw.debug_mask)
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000521 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
Anjali Singhai Jain13c28842014-03-06 09:00:04 +0000522 rx_desc->wb.qword0.hi_dword.fd_id);
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000523 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000524}
525
526/**
Alexander Duycka5e9c572013-09-28 06:00:27 +0000527 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000528 * @ring: the ring that owns the buffer
529 * @tx_buffer: the buffer to free
530 **/
Alexander Duycka5e9c572013-09-28 06:00:27 +0000531static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
532 struct i40e_tx_buffer *tx_buffer)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000533{
Alexander Duycka5e9c572013-09-28 06:00:27 +0000534 if (tx_buffer->skb) {
Kiran Patila42e7a32015-11-06 15:26:03 -0800535 dev_kfree_skb_any(tx_buffer->skb);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000536 if (dma_unmap_len(tx_buffer, len))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000537 dma_unmap_single(ring->dev,
Alexander Duyck35a1e2a2013-09-28 06:00:17 +0000538 dma_unmap_addr(tx_buffer, dma),
539 dma_unmap_len(tx_buffer, len),
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000540 DMA_TO_DEVICE);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000541 } else if (dma_unmap_len(tx_buffer, len)) {
542 dma_unmap_page(ring->dev,
543 dma_unmap_addr(tx_buffer, dma),
544 dma_unmap_len(tx_buffer, len),
545 DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000546 }
Kiran Patila42e7a32015-11-06 15:26:03 -0800547
548 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
549 kfree(tx_buffer->raw_buf);
550
Alexander Duycka5e9c572013-09-28 06:00:27 +0000551 tx_buffer->next_to_watch = NULL;
552 tx_buffer->skb = NULL;
Alexander Duyck35a1e2a2013-09-28 06:00:17 +0000553 dma_unmap_len_set(tx_buffer, len, 0);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000554 /* tx_buffer must be completely set up in the transmit path */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000555}
556
557/**
558 * i40e_clean_tx_ring - Free any empty Tx buffers
559 * @tx_ring: ring to be cleaned
560 **/
561void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
562{
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000563 unsigned long bi_size;
564 u16 i;
565
566 /* ring already cleared, nothing to do */
567 if (!tx_ring->tx_bi)
568 return;
569
570 /* Free all the Tx ring sk_buffs */
Alexander Duycka5e9c572013-09-28 06:00:27 +0000571 for (i = 0; i < tx_ring->count; i++)
572 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000573
574 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
575 memset(tx_ring->tx_bi, 0, bi_size);
576
577 /* Zero out the descriptor ring */
578 memset(tx_ring->desc, 0, tx_ring->size);
579
580 tx_ring->next_to_use = 0;
581 tx_ring->next_to_clean = 0;
Alexander Duyck7070ce02013-09-28 06:00:37 +0000582
583 if (!tx_ring->netdev)
584 return;
585
586 /* cleanup Tx queue statistics */
587 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
588 tx_ring->queue_index));
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000589}
590
591/**
592 * i40e_free_tx_resources - Free Tx resources per queue
593 * @tx_ring: Tx descriptor ring for a specific queue
594 *
595 * Free all transmit software resources
596 **/
597void i40e_free_tx_resources(struct i40e_ring *tx_ring)
598{
599 i40e_clean_tx_ring(tx_ring);
600 kfree(tx_ring->tx_bi);
601 tx_ring->tx_bi = NULL;
602
603 if (tx_ring->desc) {
604 dma_free_coherent(tx_ring->dev, tx_ring->size,
605 tx_ring->desc, tx_ring->dma);
606 tx_ring->desc = NULL;
607 }
608}
609
Jesse Brandeburga68de582015-02-24 05:26:03 +0000610/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000611 * i40e_get_tx_pending - how many tx descriptors not processed
612 * @tx_ring: the ring of descriptors
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800613 * @in_sw: is tx_pending being checked in SW or HW
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000614 *
615 * Since there is no access to the ring head register
616 * in XL710, we need to use our local copies
617 **/
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800618u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000619{
Jesse Brandeburga68de582015-02-24 05:26:03 +0000620 u32 head, tail;
621
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800622 if (!in_sw)
623 head = i40e_get_head(ring);
624 else
625 head = ring->next_to_clean;
Jesse Brandeburga68de582015-02-24 05:26:03 +0000626 tail = readl(ring->tail);
627
628 if (head != tail)
629 return (head < tail) ?
630 tail - head : (tail + ring->count - head);
631
632 return 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000633}
634
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000635#define WB_STRIDE 0x3
636
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000637/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000638 * i40e_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duycka619afe2016-03-07 09:30:03 -0800639 * @vsi: the VSI we care about
640 * @tx_ring: Tx ring to clean
641 * @napi_budget: Used to determine if we are in netpoll
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000642 *
643 * Returns true if there's any budget left (e.g. the clean is finished)
644 **/
Alexander Duycka619afe2016-03-07 09:30:03 -0800645static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
646 struct i40e_ring *tx_ring, int napi_budget)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000647{
648 u16 i = tx_ring->next_to_clean;
649 struct i40e_tx_buffer *tx_buf;
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000650 struct i40e_tx_desc *tx_head;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000651 struct i40e_tx_desc *tx_desc;
Alexander Duycka619afe2016-03-07 09:30:03 -0800652 unsigned int total_bytes = 0, total_packets = 0;
653 unsigned int budget = vsi->work_limit;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000654
655 tx_buf = &tx_ring->tx_bi[i];
656 tx_desc = I40E_TX_DESC(tx_ring, i);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000657 i -= tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000658
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000659 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
660
Alexander Duycka5e9c572013-09-28 06:00:27 +0000661 do {
662 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000663
664 /* if next_to_watch is not set then there is no work pending */
665 if (!eop_desc)
666 break;
667
Alexander Duycka5e9c572013-09-28 06:00:27 +0000668 /* prevent any other reads prior to eop_desc */
669 read_barrier_depends();
670
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000671 /* we have caught up to head, no work left to do */
672 if (tx_head == tx_desc)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000673 break;
674
Alexander Duyckc304fda2013-09-28 06:00:12 +0000675 /* clear next_to_watch to prevent false hangs */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000676 tx_buf->next_to_watch = NULL;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000677
Alexander Duycka5e9c572013-09-28 06:00:27 +0000678 /* update the statistics for this packet */
679 total_bytes += tx_buf->bytecount;
680 total_packets += tx_buf->gso_segs;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000681
Alexander Duycka5e9c572013-09-28 06:00:27 +0000682 /* free the skb */
Alexander Duycka619afe2016-03-07 09:30:03 -0800683 napi_consume_skb(tx_buf->skb, napi_budget);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000684
Alexander Duycka5e9c572013-09-28 06:00:27 +0000685 /* unmap skb header data */
686 dma_unmap_single(tx_ring->dev,
687 dma_unmap_addr(tx_buf, dma),
688 dma_unmap_len(tx_buf, len),
689 DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000690
Alexander Duycka5e9c572013-09-28 06:00:27 +0000691 /* clear tx_buffer data */
692 tx_buf->skb = NULL;
693 dma_unmap_len_set(tx_buf, len, 0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000694
Alexander Duycka5e9c572013-09-28 06:00:27 +0000695 /* unmap remaining buffers */
696 while (tx_desc != eop_desc) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000697
698 tx_buf++;
699 tx_desc++;
700 i++;
Alexander Duycka5e9c572013-09-28 06:00:27 +0000701 if (unlikely(!i)) {
702 i -= tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000703 tx_buf = tx_ring->tx_bi;
704 tx_desc = I40E_TX_DESC(tx_ring, 0);
705 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000706
Alexander Duycka5e9c572013-09-28 06:00:27 +0000707 /* unmap any remaining paged data */
708 if (dma_unmap_len(tx_buf, len)) {
709 dma_unmap_page(tx_ring->dev,
710 dma_unmap_addr(tx_buf, dma),
711 dma_unmap_len(tx_buf, len),
712 DMA_TO_DEVICE);
713 dma_unmap_len_set(tx_buf, len, 0);
714 }
715 }
716
717 /* move us one more past the eop_desc for start of next pkt */
718 tx_buf++;
719 tx_desc++;
720 i++;
721 if (unlikely(!i)) {
722 i -= tx_ring->count;
723 tx_buf = tx_ring->tx_bi;
724 tx_desc = I40E_TX_DESC(tx_ring, 0);
725 }
726
Jesse Brandeburg016890b2015-02-27 09:15:31 +0000727 prefetch(tx_desc);
728
Alexander Duycka5e9c572013-09-28 06:00:27 +0000729 /* update budget accounting */
730 budget--;
731 } while (likely(budget));
732
733 i += tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000734 tx_ring->next_to_clean = i;
Alexander Duyck980e9b12013-09-28 06:01:03 +0000735 u64_stats_update_begin(&tx_ring->syncp);
Alexander Duycka114d0a2013-09-28 06:00:43 +0000736 tx_ring->stats.bytes += total_bytes;
737 tx_ring->stats.packets += total_packets;
Alexander Duyck980e9b12013-09-28 06:01:03 +0000738 u64_stats_update_end(&tx_ring->syncp);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000739 tx_ring->q_vector->tx.total_bytes += total_bytes;
740 tx_ring->q_vector->tx.total_packets += total_packets;
Alexander Duycka5e9c572013-09-28 06:00:27 +0000741
Anjali Singhai58044742015-09-25 18:26:13 -0700742 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
743 unsigned int j = 0;
744
745 /* check to see if there are < 4 descriptors
746 * waiting to be written back, then kick the hardware to force
747 * them to be written back in case we stay in NAPI.
748 * In this mode on X722 we do not enable Interrupt.
749 */
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800750 j = i40e_get_tx_pending(tx_ring, false);
Anjali Singhai58044742015-09-25 18:26:13 -0700751
752 if (budget &&
753 ((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
Alexander Duycka619afe2016-03-07 09:30:03 -0800754 !test_bit(__I40E_DOWN, &vsi->state) &&
Anjali Singhai58044742015-09-25 18:26:13 -0700755 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
756 tx_ring->arm_wb = true;
757 }
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000758
Alexander Duyck7070ce02013-09-28 06:00:37 +0000759 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
760 tx_ring->queue_index),
761 total_packets, total_bytes);
762
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000763#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
764 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
765 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
766 /* Make sure that anybody stopping the queue after this
767 * sees the new next_to_clean.
768 */
769 smp_mb();
770 if (__netif_subqueue_stopped(tx_ring->netdev,
771 tx_ring->queue_index) &&
Alexander Duycka619afe2016-03-07 09:30:03 -0800772 !test_bit(__I40E_DOWN, &vsi->state)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000773 netif_wake_subqueue(tx_ring->netdev,
774 tx_ring->queue_index);
775 ++tx_ring->tx_stats.restart_queue;
776 }
777 }
778
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000779 return !!budget;
780}
781
782/**
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -0800783 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
784 * @vsi: the VSI we care about
785 * @q_vector: the vector on which to enable writeback
786 *
787 **/
788static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
789 struct i40e_q_vector *q_vector)
790{
791 u16 flags = q_vector->tx.ring[0].flags;
792 u32 val;
793
794 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
795 return;
796
797 if (q_vector->arm_wb_state)
798 return;
799
800 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
801 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
802 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
803
804 wr32(&vsi->back->hw,
805 I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
806 val);
807 } else {
808 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
809 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
810
811 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
812 }
813 q_vector->arm_wb_state = true;
814}
815
816/**
817 * i40e_force_wb - Issue SW Interrupt so HW does a wb
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000818 * @vsi: the VSI we care about
819 * @q_vector: the vector on which to force writeback
820 *
821 **/
Kiran Patilb03a8c12015-09-24 18:13:15 -0400822void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000823{
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -0800824 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
Anjali Singhai Jain8e0764b2015-06-05 12:20:30 -0400825 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
826 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
827 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
828 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
829 /* allow 00 to be written to the index */
830
831 wr32(&vsi->back->hw,
832 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
833 vsi->base_vector - 1), val);
834 } else {
835 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
836 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
837 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
838 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
839 /* allow 00 to be written to the index */
840
841 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
842 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000843}
844
845/**
846 * i40e_set_new_dynamic_itr - Find new ITR level
847 * @rc: structure containing ring performance data
848 *
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400849 * Returns true if ITR changed, false if not
850 *
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000851 * Stores a new ITR value based on packets and byte counts during
852 * the last interrupt. The advantage of per interrupt computation
853 * is faster updates and more accurate ITR for the current traffic
854 * pattern. Constants in this function were computed based on
855 * theoretical maximum wire speed and thresholds were set based on
856 * testing data as well as attempting to minimize response time
857 * while increasing bulk throughput.
858 **/
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400859static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000860{
861 enum i40e_latency_range new_latency_range = rc->latency_range;
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400862 struct i40e_q_vector *qv = rc->ring->q_vector;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000863 u32 new_itr = rc->itr;
864 int bytes_per_int;
Jesse Brandeburg51cc6d92015-09-28 14:16:52 -0400865 int usecs;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000866
867 if (rc->total_packets == 0 || !rc->itr)
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400868 return false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000869
870 /* simple throttlerate management
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400871 * 0-10MB/s lowest (50000 ints/s)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000872 * 10-20MB/s low (20000 ints/s)
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400873 * 20-1249MB/s bulk (18000 ints/s)
874 * > 40000 Rx packets per second (8000 ints/s)
Jesse Brandeburg51cc6d92015-09-28 14:16:52 -0400875 *
876 * The math works out because the divisor is in 10^(-6) which
877 * turns the bytes/us input value into MB/s values, but
878 * make sure to use usecs, as the register values written
Jesse Brandeburgee2319c2015-09-28 14:16:54 -0400879 * are in 2 usec increments in the ITR registers, and make sure
880 * to use the smoothed values that the countdown timer gives us.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000881 */
Jesse Brandeburgee2319c2015-09-28 14:16:54 -0400882 usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
Jesse Brandeburg51cc6d92015-09-28 14:16:52 -0400883 bytes_per_int = rc->total_bytes / usecs;
Jesse Brandeburgee2319c2015-09-28 14:16:54 -0400884
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400885 switch (new_latency_range) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000886 case I40E_LOWEST_LATENCY:
887 if (bytes_per_int > 10)
888 new_latency_range = I40E_LOW_LATENCY;
889 break;
890 case I40E_LOW_LATENCY:
891 if (bytes_per_int > 20)
892 new_latency_range = I40E_BULK_LATENCY;
893 else if (bytes_per_int <= 10)
894 new_latency_range = I40E_LOWEST_LATENCY;
895 break;
896 case I40E_BULK_LATENCY:
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400897 case I40E_ULTRA_LATENCY:
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400898 default:
899 if (bytes_per_int <= 20)
900 new_latency_range = I40E_LOW_LATENCY;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000901 break;
902 }
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400903
904 /* this is to adjust RX more aggressively when streaming small
905 * packets. The value of 40000 was picked as it is just beyond
906 * what the hardware can receive per second if in low latency
907 * mode.
908 */
909#define RX_ULTRA_PACKET_RATE 40000
910
911 if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
912 (&qv->rx == rc))
913 new_latency_range = I40E_ULTRA_LATENCY;
914
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400915 rc->latency_range = new_latency_range;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000916
917 switch (new_latency_range) {
918 case I40E_LOWEST_LATENCY:
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400919 new_itr = I40E_ITR_50K;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000920 break;
921 case I40E_LOW_LATENCY:
922 new_itr = I40E_ITR_20K;
923 break;
924 case I40E_BULK_LATENCY:
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400925 new_itr = I40E_ITR_18K;
926 break;
927 case I40E_ULTRA_LATENCY:
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000928 new_itr = I40E_ITR_8K;
929 break;
930 default:
931 break;
932 }
933
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000934 rc->total_bytes = 0;
935 rc->total_packets = 0;
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400936
937 if (new_itr != rc->itr) {
938 rc->itr = new_itr;
939 return true;
940 }
941
942 return false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000943}
944
945/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000946 * i40e_clean_programming_status - clean the programming status descriptor
947 * @rx_ring: the rx ring that has this descriptor
948 * @rx_desc: the rx descriptor written back by HW
949 *
950 * Flow director should handle FD_FILTER_STATUS to check its filter programming
951 * status being successful or not and take actions accordingly. FCoE should
952 * handle its context/filter programming/invalidation status and take actions.
953 *
954 **/
955static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
956 union i40e_rx_desc *rx_desc)
957{
958 u64 qw;
959 u8 id;
960
961 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
962 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
963 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
964
965 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000966 i40e_fd_handle_status(rx_ring, rx_desc, id);
Vasu Dev38e00432014-08-01 13:27:03 -0700967#ifdef I40E_FCOE
968 else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
969 (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
970 i40e_fcoe_handle_status(rx_ring, rx_desc, id);
971#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000972}
973
974/**
975 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
976 * @tx_ring: the tx ring to set up
977 *
978 * Return 0 on success, negative on error
979 **/
980int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
981{
982 struct device *dev = tx_ring->dev;
983 int bi_size;
984
985 if (!dev)
986 return -ENOMEM;
987
Jesse Brandeburge908f812015-07-23 16:54:42 -0400988 /* warn if we are about to overwrite the pointer */
989 WARN_ON(tx_ring->tx_bi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000990 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
991 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
992 if (!tx_ring->tx_bi)
993 goto err;
994
995 /* round up to nearest 4K */
996 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000997 /* add u32 for head writeback, align after this takes care of
998 * guaranteeing this is at least one cache line in size
999 */
1000 tx_ring->size += sizeof(u32);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001001 tx_ring->size = ALIGN(tx_ring->size, 4096);
1002 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1003 &tx_ring->dma, GFP_KERNEL);
1004 if (!tx_ring->desc) {
1005 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1006 tx_ring->size);
1007 goto err;
1008 }
1009
1010 tx_ring->next_to_use = 0;
1011 tx_ring->next_to_clean = 0;
1012 return 0;
1013
1014err:
1015 kfree(tx_ring->tx_bi);
1016 tx_ring->tx_bi = NULL;
1017 return -ENOMEM;
1018}
1019
1020/**
1021 * i40e_clean_rx_ring - Free Rx buffers
1022 * @rx_ring: ring to be cleaned
1023 **/
1024void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1025{
1026 struct device *dev = rx_ring->dev;
1027 struct i40e_rx_buffer *rx_bi;
1028 unsigned long bi_size;
1029 u16 i;
1030
1031 /* ring already cleared, nothing to do */
1032 if (!rx_ring->rx_bi)
1033 return;
1034
Mitch Williamsa132af22015-01-24 09:58:35 +00001035 if (ring_is_ps_enabled(rx_ring)) {
1036 int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
1037
1038 rx_bi = &rx_ring->rx_bi[0];
1039 if (rx_bi->hdr_buf) {
1040 dma_free_coherent(dev,
1041 bufsz,
1042 rx_bi->hdr_buf,
1043 rx_bi->dma);
1044 for (i = 0; i < rx_ring->count; i++) {
1045 rx_bi = &rx_ring->rx_bi[i];
1046 rx_bi->dma = 0;
Shannon Nelson37a29732015-02-27 09:15:19 +00001047 rx_bi->hdr_buf = NULL;
Mitch Williamsa132af22015-01-24 09:58:35 +00001048 }
1049 }
1050 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001051 /* Free all the Rx ring sk_buffs */
1052 for (i = 0; i < rx_ring->count; i++) {
1053 rx_bi = &rx_ring->rx_bi[i];
1054 if (rx_bi->dma) {
1055 dma_unmap_single(dev,
1056 rx_bi->dma,
1057 rx_ring->rx_buf_len,
1058 DMA_FROM_DEVICE);
1059 rx_bi->dma = 0;
1060 }
1061 if (rx_bi->skb) {
1062 dev_kfree_skb(rx_bi->skb);
1063 rx_bi->skb = NULL;
1064 }
1065 if (rx_bi->page) {
1066 if (rx_bi->page_dma) {
1067 dma_unmap_page(dev,
1068 rx_bi->page_dma,
Mitch Williamsf16704e2016-01-13 16:51:49 -08001069 PAGE_SIZE,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001070 DMA_FROM_DEVICE);
1071 rx_bi->page_dma = 0;
1072 }
1073 __free_page(rx_bi->page);
1074 rx_bi->page = NULL;
1075 rx_bi->page_offset = 0;
1076 }
1077 }
1078
1079 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1080 memset(rx_ring->rx_bi, 0, bi_size);
1081
1082 /* Zero out the descriptor ring */
1083 memset(rx_ring->desc, 0, rx_ring->size);
1084
1085 rx_ring->next_to_clean = 0;
1086 rx_ring->next_to_use = 0;
1087}
1088
1089/**
1090 * i40e_free_rx_resources - Free Rx resources
1091 * @rx_ring: ring to clean the resources from
1092 *
1093 * Free all receive software resources
1094 **/
1095void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1096{
1097 i40e_clean_rx_ring(rx_ring);
1098 kfree(rx_ring->rx_bi);
1099 rx_ring->rx_bi = NULL;
1100
1101 if (rx_ring->desc) {
1102 dma_free_coherent(rx_ring->dev, rx_ring->size,
1103 rx_ring->desc, rx_ring->dma);
1104 rx_ring->desc = NULL;
1105 }
1106}
1107
1108/**
Mitch Williamsa132af22015-01-24 09:58:35 +00001109 * i40e_alloc_rx_headers - allocate rx header buffers
1110 * @rx_ring: ring to alloc buffers
1111 *
1112 * Allocate rx header buffers for the entire ring. As these are static,
1113 * this is only called when setting up a new ring.
1114 **/
1115void i40e_alloc_rx_headers(struct i40e_ring *rx_ring)
1116{
1117 struct device *dev = rx_ring->dev;
1118 struct i40e_rx_buffer *rx_bi;
1119 dma_addr_t dma;
1120 void *buffer;
1121 int buf_size;
1122 int i;
1123
1124 if (rx_ring->rx_bi[0].hdr_buf)
1125 return;
1126 /* Make sure the buffers don't cross cache line boundaries. */
1127 buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
1128 buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
1129 &dma, GFP_KERNEL);
1130 if (!buffer)
1131 return;
1132 for (i = 0; i < rx_ring->count; i++) {
1133 rx_bi = &rx_ring->rx_bi[i];
1134 rx_bi->dma = dma + (i * buf_size);
1135 rx_bi->hdr_buf = buffer + (i * buf_size);
1136 }
1137}
1138
1139/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001140 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1141 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1142 *
1143 * Returns 0 on success, negative on failure
1144 **/
1145int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1146{
1147 struct device *dev = rx_ring->dev;
1148 int bi_size;
1149
Jesse Brandeburge908f812015-07-23 16:54:42 -04001150 /* warn if we are about to overwrite the pointer */
1151 WARN_ON(rx_ring->rx_bi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001152 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1153 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1154 if (!rx_ring->rx_bi)
1155 goto err;
1156
Carolyn Wybornyf217d6c2015-02-09 17:42:31 -08001157 u64_stats_init(&rx_ring->syncp);
Carolyn Wyborny638702b2015-01-24 09:58:32 +00001158
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001159 /* Round up to nearest 4K */
1160 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
1161 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
1162 : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1163 rx_ring->size = ALIGN(rx_ring->size, 4096);
1164 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1165 &rx_ring->dma, GFP_KERNEL);
1166
1167 if (!rx_ring->desc) {
1168 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1169 rx_ring->size);
1170 goto err;
1171 }
1172
1173 rx_ring->next_to_clean = 0;
1174 rx_ring->next_to_use = 0;
1175
1176 return 0;
1177err:
1178 kfree(rx_ring->rx_bi);
1179 rx_ring->rx_bi = NULL;
1180 return -ENOMEM;
1181}
1182
1183/**
1184 * i40e_release_rx_desc - Store the new tail and head values
1185 * @rx_ring: ring to bump
1186 * @val: new head index
1187 **/
1188static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1189{
1190 rx_ring->next_to_use = val;
1191 /* Force memory writes to complete before letting h/w
1192 * know there are new descriptors to fetch. (Only
1193 * applicable for weak-ordered memory model archs,
1194 * such as IA-64).
1195 */
1196 wmb();
1197 writel(val, rx_ring->tail);
1198}
1199
1200/**
Mitch Williamsa132af22015-01-24 09:58:35 +00001201 * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001202 * @rx_ring: ring to place buffers on
1203 * @cleaned_count: number of buffers to replace
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001204 *
1205 * Returns true if any errors on allocation
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001206 **/
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001207bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
Mitch Williamsa132af22015-01-24 09:58:35 +00001208{
1209 u16 i = rx_ring->next_to_use;
1210 union i40e_rx_desc *rx_desc;
1211 struct i40e_rx_buffer *bi;
Mitch Williamsf16704e2016-01-13 16:51:49 -08001212 const int current_node = numa_node_id();
Mitch Williamsa132af22015-01-24 09:58:35 +00001213
1214 /* do nothing if no valid netdev defined */
1215 if (!rx_ring->netdev || !cleaned_count)
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001216 return false;
Mitch Williamsa132af22015-01-24 09:58:35 +00001217
1218 while (cleaned_count--) {
1219 rx_desc = I40E_RX_DESC(rx_ring, i);
1220 bi = &rx_ring->rx_bi[i];
1221
1222 if (bi->skb) /* desc is in use */
1223 goto no_buffers;
Mitch Williamsf16704e2016-01-13 16:51:49 -08001224
1225 /* If we've been moved to a different NUMA node, release the
1226 * page so we can get a new one on the current node.
1227 */
1228 if (bi->page && page_to_nid(bi->page) != current_node) {
1229 dma_unmap_page(rx_ring->dev,
1230 bi->page_dma,
1231 PAGE_SIZE,
1232 DMA_FROM_DEVICE);
1233 __free_page(bi->page);
1234 bi->page = NULL;
1235 bi->page_dma = 0;
1236 rx_ring->rx_stats.realloc_count++;
1237 } else if (bi->page) {
1238 rx_ring->rx_stats.page_reuse_count++;
1239 }
1240
Mitch Williamsa132af22015-01-24 09:58:35 +00001241 if (!bi->page) {
1242 bi->page = alloc_page(GFP_ATOMIC);
1243 if (!bi->page) {
1244 rx_ring->rx_stats.alloc_page_failed++;
1245 goto no_buffers;
1246 }
Mitch Williamsa132af22015-01-24 09:58:35 +00001247 bi->page_dma = dma_map_page(rx_ring->dev,
1248 bi->page,
Mitch Williamsf16704e2016-01-13 16:51:49 -08001249 0,
1250 PAGE_SIZE,
Mitch Williamsa132af22015-01-24 09:58:35 +00001251 DMA_FROM_DEVICE);
Mitch Williamsf16704e2016-01-13 16:51:49 -08001252 if (dma_mapping_error(rx_ring->dev, bi->page_dma)) {
Mitch Williamsa132af22015-01-24 09:58:35 +00001253 rx_ring->rx_stats.alloc_page_failed++;
Mitch Williamsf16704e2016-01-13 16:51:49 -08001254 __free_page(bi->page);
1255 bi->page = NULL;
Mitch Williamsa132af22015-01-24 09:58:35 +00001256 bi->page_dma = 0;
Mitch Williamsf16704e2016-01-13 16:51:49 -08001257 bi->page_offset = 0;
Mitch Williamsa132af22015-01-24 09:58:35 +00001258 goto no_buffers;
1259 }
Mitch Williamsf16704e2016-01-13 16:51:49 -08001260 bi->page_offset = 0;
Mitch Williamsa132af22015-01-24 09:58:35 +00001261 }
1262
Mitch Williamsa132af22015-01-24 09:58:35 +00001263 /* Refresh the desc even if buffer_addrs didn't change
1264 * because each write-back erases this info.
1265 */
Mitch Williamsf16704e2016-01-13 16:51:49 -08001266 rx_desc->read.pkt_addr =
1267 cpu_to_le64(bi->page_dma + bi->page_offset);
Mitch Williamsa132af22015-01-24 09:58:35 +00001268 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1269 i++;
1270 if (i == rx_ring->count)
1271 i = 0;
1272 }
1273
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001274 if (rx_ring->next_to_use != i)
1275 i40e_release_rx_desc(rx_ring, i);
1276
1277 return false;
1278
Mitch Williamsa132af22015-01-24 09:58:35 +00001279no_buffers:
1280 if (rx_ring->next_to_use != i)
1281 i40e_release_rx_desc(rx_ring, i);
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001282
1283 /* make sure to come back via polling to try again after
1284 * allocation failure
1285 */
1286 return true;
Mitch Williamsa132af22015-01-24 09:58:35 +00001287}
1288
1289/**
1290 * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
1291 * @rx_ring: ring to place buffers on
1292 * @cleaned_count: number of buffers to replace
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001293 *
1294 * Returns true if any errors on allocation
Mitch Williamsa132af22015-01-24 09:58:35 +00001295 **/
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001296bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001297{
1298 u16 i = rx_ring->next_to_use;
1299 union i40e_rx_desc *rx_desc;
1300 struct i40e_rx_buffer *bi;
1301 struct sk_buff *skb;
1302
1303 /* do nothing if no valid netdev defined */
1304 if (!rx_ring->netdev || !cleaned_count)
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001305 return false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001306
1307 while (cleaned_count--) {
1308 rx_desc = I40E_RX_DESC(rx_ring, i);
1309 bi = &rx_ring->rx_bi[i];
1310 skb = bi->skb;
1311
1312 if (!skb) {
Jesse Brandeburgdd1a5df2016-01-13 16:51:48 -08001313 skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
1314 rx_ring->rx_buf_len,
1315 GFP_ATOMIC |
1316 __GFP_NOWARN);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001317 if (!skb) {
Mitch Williams420136c2013-12-18 13:45:59 +00001318 rx_ring->rx_stats.alloc_buff_failed++;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001319 goto no_buffers;
1320 }
1321 /* initialize queue mapping */
1322 skb_record_rx_queue(skb, rx_ring->queue_index);
1323 bi->skb = skb;
1324 }
1325
1326 if (!bi->dma) {
1327 bi->dma = dma_map_single(rx_ring->dev,
1328 skb->data,
1329 rx_ring->rx_buf_len,
1330 DMA_FROM_DEVICE);
1331 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
Mitch Williams420136c2013-12-18 13:45:59 +00001332 rx_ring->rx_stats.alloc_buff_failed++;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001333 bi->dma = 0;
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001334 dev_kfree_skb(bi->skb);
1335 bi->skb = NULL;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001336 goto no_buffers;
1337 }
1338 }
1339
Mitch Williamsa132af22015-01-24 09:58:35 +00001340 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
1341 rx_desc->read.hdr_addr = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001342 i++;
1343 if (i == rx_ring->count)
1344 i = 0;
1345 }
1346
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001347 if (rx_ring->next_to_use != i)
1348 i40e_release_rx_desc(rx_ring, i);
1349
1350 return false;
1351
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001352no_buffers:
1353 if (rx_ring->next_to_use != i)
1354 i40e_release_rx_desc(rx_ring, i);
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001355
1356 /* make sure to come back via polling to try again after
1357 * allocation failure
1358 */
1359 return true;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001360}
1361
1362/**
1363 * i40e_receive_skb - Send a completed packet up the stack
1364 * @rx_ring: rx ring in play
1365 * @skb: packet to send up
1366 * @vlan_tag: vlan tag for packet
1367 **/
1368static void i40e_receive_skb(struct i40e_ring *rx_ring,
1369 struct sk_buff *skb, u16 vlan_tag)
1370{
1371 struct i40e_q_vector *q_vector = rx_ring->q_vector;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001372
1373 if (vlan_tag & VLAN_VID_MASK)
1374 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1375
Alexander Duyck8b650352015-09-24 09:04:32 -07001376 napi_gro_receive(&q_vector->napi, skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001377}
1378
1379/**
1380 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1381 * @vsi: the VSI we care about
1382 * @skb: skb currently being received and modified
1383 * @rx_status: status value of last descriptor in packet
1384 * @rx_error: error value of last descriptor in packet
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001385 * @rx_ptype: ptype value of last descriptor in packet
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001386 **/
1387static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1388 struct sk_buff *skb,
1389 u32 rx_status,
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001390 u32 rx_error,
1391 u16 rx_ptype)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001392{
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001393 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
Alexander Duyckfad57332016-01-24 21:17:22 -08001394 bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel;
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001395
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001396 skb->ip_summed = CHECKSUM_NONE;
1397
1398 /* Rx csum enabled and ip headers found? */
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001399 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001400 return;
1401
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001402 /* did the hardware decode the packet and checksum? */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001403 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001404 return;
1405
1406 /* both known and outer_ip must be set for the below code to work */
1407 if (!(decoded.known && decoded.outer_ip))
1408 return;
1409
Alexander Duyckfad57332016-01-24 21:17:22 -08001410 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1411 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1412 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1413 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001414
1415 if (ipv4 &&
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001416 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1417 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001418 goto checksum_fail;
1419
Jesse Brandeburgddf1d0d2014-02-13 03:48:39 -08001420 /* likely incorrect csum if alternate IP extension headers found */
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001421 if (ipv6 &&
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001422 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001423 /* don't increment checksum err here, non-fatal err */
Shannon Nelson8ee75a82013-12-21 05:44:46 +00001424 return;
1425
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001426 /* there was some L4 error, count error and punt packet to the stack */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001427 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001428 goto checksum_fail;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001429
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001430 /* handle packets that were not able to be checksummed due
1431 * to arrival speed, in this case the stack can compute
1432 * the csum.
1433 */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001434 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001435 return;
1436
Alexander Duycka9c9a812016-01-24 21:16:13 -08001437 /* The hardware supported by this driver does not validate outer
1438 * checksums for tunneled VXLAN or GENEVE frames. I don't agree
1439 * with it but the specification states that you "MAY validate", it
1440 * doesn't make it a hard requirement so if we have validated the
1441 * inner checksum report CHECKSUM_UNNECESSARY.
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001442 */
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001443
Alexander Duyckfad57332016-01-24 21:17:22 -08001444 ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
1445 (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
1446 ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
1447 (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
1448
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001449 skb->ip_summed = CHECKSUM_UNNECESSARY;
Tom Herbertfa4ba692014-08-27 21:27:32 -07001450 skb->csum_level = ipv4_tunnel || ipv6_tunnel;
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001451
1452 return;
1453
1454checksum_fail:
1455 vsi->back->hw_csum_rx_error++;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001456}
1457
1458/**
Anjali Singhai Jain857942f2015-12-09 15:50:21 -08001459 * i40e_ptype_to_htype - get a hash type
Jesse Brandeburg206812b2014-02-12 01:45:33 +00001460 * @ptype: the ptype value from the descriptor
1461 *
1462 * Returns a hash type to be used by skb_set_hash
1463 **/
Anjali Singhai Jain857942f2015-12-09 15:50:21 -08001464static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
Jesse Brandeburg206812b2014-02-12 01:45:33 +00001465{
1466 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1467
1468 if (!decoded.known)
1469 return PKT_HASH_TYPE_NONE;
1470
1471 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1472 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1473 return PKT_HASH_TYPE_L4;
1474 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1475 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1476 return PKT_HASH_TYPE_L3;
1477 else
1478 return PKT_HASH_TYPE_L2;
1479}
1480
1481/**
Anjali Singhai Jain857942f2015-12-09 15:50:21 -08001482 * i40e_rx_hash - set the hash value in the skb
1483 * @ring: descriptor ring
1484 * @rx_desc: specific descriptor
1485 **/
1486static inline void i40e_rx_hash(struct i40e_ring *ring,
1487 union i40e_rx_desc *rx_desc,
1488 struct sk_buff *skb,
1489 u8 rx_ptype)
1490{
1491 u32 hash;
1492 const __le64 rss_mask =
1493 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1494 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1495
1496 if (ring->netdev->features & NETIF_F_RXHASH)
1497 return;
1498
1499 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1500 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1501 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1502 }
1503}
1504
1505/**
Mitch Williamsa132af22015-01-24 09:58:35 +00001506 * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001507 * @rx_ring: rx ring to clean
1508 * @budget: how many cleans we're allowed
1509 *
1510 * Returns true if there's any budget left (e.g. the clean is finished)
1511 **/
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001512static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001513{
1514 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1515 u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
1516 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001517 struct i40e_vsi *vsi = rx_ring->vsi;
1518 u16 i = rx_ring->next_to_clean;
1519 union i40e_rx_desc *rx_desc;
1520 u32 rx_error, rx_status;
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001521 bool failure = false;
Jesse Brandeburg206812b2014-02-12 01:45:33 +00001522 u8 rx_ptype;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001523 u64 qword;
Mitch Williamsf16704e2016-01-13 16:51:49 -08001524 u32 copysize;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001525
Eric W. Biederman390f86d2014-03-14 17:59:10 -07001526 if (budget <= 0)
1527 return 0;
1528
Mitch Williamsa132af22015-01-24 09:58:35 +00001529 do {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001530 struct i40e_rx_buffer *rx_bi;
1531 struct sk_buff *skb;
1532 u16 vlan_tag;
Mitch Williamsa132af22015-01-24 09:58:35 +00001533 /* return some buffers to hardware, one at a time is too slow */
1534 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001535 failure = failure ||
1536 i40e_alloc_rx_buffers_ps(rx_ring,
1537 cleaned_count);
Mitch Williamsa132af22015-01-24 09:58:35 +00001538 cleaned_count = 0;
1539 }
1540
1541 i = rx_ring->next_to_clean;
1542 rx_desc = I40E_RX_DESC(rx_ring, i);
1543 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1544 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1545 I40E_RXD_QW1_STATUS_SHIFT;
1546
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001547 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
Mitch Williamsa132af22015-01-24 09:58:35 +00001548 break;
1549
1550 /* This memory barrier is needed to keep us from reading
1551 * any other fields out of the rx_desc until we know the
1552 * DD bit is set.
1553 */
Alexander Duyck67317162015-04-08 18:49:43 -07001554 dma_rmb();
Mitch Williamsf16704e2016-01-13 16:51:49 -08001555 /* sync header buffer for reading */
1556 dma_sync_single_range_for_cpu(rx_ring->dev,
1557 rx_ring->rx_bi[0].dma,
1558 i * rx_ring->rx_hdr_len,
1559 rx_ring->rx_hdr_len,
1560 DMA_FROM_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001561 if (i40e_rx_is_programming_status(qword)) {
1562 i40e_clean_programming_status(rx_ring, rx_desc);
Mitch Williamsa132af22015-01-24 09:58:35 +00001563 I40E_RX_INCREMENT(rx_ring, i);
1564 continue;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001565 }
1566 rx_bi = &rx_ring->rx_bi[i];
1567 skb = rx_bi->skb;
Mitch Williamsa132af22015-01-24 09:58:35 +00001568 if (likely(!skb)) {
Jesse Brandeburgdd1a5df2016-01-13 16:51:48 -08001569 skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
1570 rx_ring->rx_hdr_len,
1571 GFP_ATOMIC |
1572 __GFP_NOWARN);
Jesse Brandeburg8b6ed9c2015-03-31 00:45:01 -07001573 if (!skb) {
Mitch Williamsa132af22015-01-24 09:58:35 +00001574 rx_ring->rx_stats.alloc_buff_failed++;
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001575 failure = true;
Jesse Brandeburg8b6ed9c2015-03-31 00:45:01 -07001576 break;
1577 }
1578
Mitch Williamsa132af22015-01-24 09:58:35 +00001579 /* initialize queue mapping */
1580 skb_record_rx_queue(skb, rx_ring->queue_index);
1581 /* we are reusing so sync this buffer for CPU use */
1582 dma_sync_single_range_for_cpu(rx_ring->dev,
Jesse Brandeburg3578fa02016-01-04 10:33:03 -08001583 rx_ring->rx_bi[0].dma,
1584 i * rx_ring->rx_hdr_len,
Mitch Williamsa132af22015-01-24 09:58:35 +00001585 rx_ring->rx_hdr_len,
1586 DMA_FROM_DEVICE);
1587 }
Mitch Williams829af3a2013-12-18 13:46:00 +00001588 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1589 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1590 rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
1591 I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
1592 rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
1593 I40E_RXD_QW1_LENGTH_SPH_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001594
Mitch Williams829af3a2013-12-18 13:46:00 +00001595 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1596 I40E_RXD_QW1_ERROR_SHIFT;
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001597 rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1598 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001599
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001600 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1601 I40E_RXD_QW1_PTYPE_SHIFT;
Mitch Williamsf16704e2016-01-13 16:51:49 -08001602 /* sync half-page for reading */
1603 dma_sync_single_range_for_cpu(rx_ring->dev,
1604 rx_bi->page_dma,
1605 rx_bi->page_offset,
1606 PAGE_SIZE / 2,
1607 DMA_FROM_DEVICE);
1608 prefetch(page_address(rx_bi->page) + rx_bi->page_offset);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001609 rx_bi->skb = NULL;
Mitch Williamsa132af22015-01-24 09:58:35 +00001610 cleaned_count++;
Mitch Williamsf16704e2016-01-13 16:51:49 -08001611 copysize = 0;
Mitch Williamsa132af22015-01-24 09:58:35 +00001612 if (rx_hbo || rx_sph) {
1613 int len;
Jesse Brandeburg6995b362015-08-28 17:55:54 -04001614
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001615 if (rx_hbo)
1616 len = I40E_RX_HDR_SIZE;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001617 else
Mitch Williamsa132af22015-01-24 09:58:35 +00001618 len = rx_header_len;
1619 memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
1620 } else if (skb->len == 0) {
1621 int len;
Mitch Williamsf16704e2016-01-13 16:51:49 -08001622 unsigned char *va = page_address(rx_bi->page) +
1623 rx_bi->page_offset;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001624
Mitch Williamsf16704e2016-01-13 16:51:49 -08001625 len = min(rx_packet_len, rx_ring->rx_hdr_len);
1626 memcpy(__skb_put(skb, len), va, len);
1627 copysize = len;
Mitch Williamsa132af22015-01-24 09:58:35 +00001628 rx_packet_len -= len;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001629 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001630 /* Get the rest of the data if this was a header split */
Mitch Williamsa132af22015-01-24 09:58:35 +00001631 if (rx_packet_len) {
Mitch Williamsf16704e2016-01-13 16:51:49 -08001632 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1633 rx_bi->page,
1634 rx_bi->page_offset + copysize,
1635 rx_packet_len, I40E_RXBUFFER_2048);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001636
Mitch Williamsf16704e2016-01-13 16:51:49 -08001637 /* If the page count is more than 2, then both halves
1638 * of the page are used and we need to free it. Do it
1639 * here instead of in the alloc code. Otherwise one
1640 * of the half-pages might be released between now and
1641 * then, and we wouldn't know which one to use.
Mitch Williams16fd08b2016-01-15 14:33:15 -08001642 * Don't call get_page and free_page since those are
1643 * both expensive atomic operations that just change
1644 * the refcount in opposite directions. Just give the
1645 * page to the stack; he can have our refcount.
Mitch Williamsf16704e2016-01-13 16:51:49 -08001646 */
1647 if (page_count(rx_bi->page) > 2) {
1648 dma_unmap_page(rx_ring->dev,
1649 rx_bi->page_dma,
1650 PAGE_SIZE,
1651 DMA_FROM_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001652 rx_bi->page = NULL;
Mitch Williamsf16704e2016-01-13 16:51:49 -08001653 rx_bi->page_dma = 0;
1654 rx_ring->rx_stats.realloc_count++;
Mitch Williams16fd08b2016-01-15 14:33:15 -08001655 } else {
1656 get_page(rx_bi->page);
1657 /* switch to the other half-page here; the
1658 * allocation code programs the right addr
1659 * into HW. If we haven't used this half-page,
1660 * the address won't be changed, and HW can
1661 * just use it next time through.
1662 */
1663 rx_bi->page_offset ^= PAGE_SIZE / 2;
Mitch Williamsf16704e2016-01-13 16:51:49 -08001664 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001665
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001666 }
Mitch Williamsa132af22015-01-24 09:58:35 +00001667 I40E_RX_INCREMENT(rx_ring, i);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001668
1669 if (unlikely(
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001670 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001671 struct i40e_rx_buffer *next_buffer;
1672
1673 next_buffer = &rx_ring->rx_bi[i];
Mitch Williamsa132af22015-01-24 09:58:35 +00001674 next_buffer->skb = skb;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001675 rx_ring->rx_stats.non_eop_descs++;
Mitch Williamsa132af22015-01-24 09:58:35 +00001676 continue;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001677 }
1678
1679 /* ERR_MASK will only have valid bits if EOP set */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001680 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001681 dev_kfree_skb_any(skb);
Mitch Williamsa132af22015-01-24 09:58:35 +00001682 continue;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001683 }
1684
Anjali Singhai Jain857942f2015-12-09 15:50:21 -08001685 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1686
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00001687 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1688 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1689 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1690 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1691 rx_ring->last_rx_timestamp = jiffies;
1692 }
1693
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001694 /* probably a little skewed due to removing CRC */
1695 total_rx_bytes += skb->len;
1696 total_rx_packets++;
1697
1698 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001699
1700 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1701
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001702 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001703 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1704 : 0;
Vasu Dev38e00432014-08-01 13:27:03 -07001705#ifdef I40E_FCOE
Jesse Brandeburg1f15d662016-04-01 03:56:06 -07001706 if (unlikely(
1707 i40e_rx_is_fcoe(rx_ptype) &&
1708 !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) {
Vasu Dev38e00432014-08-01 13:27:03 -07001709 dev_kfree_skb_any(skb);
Mitch Williamsa132af22015-01-24 09:58:35 +00001710 continue;
Vasu Dev38e00432014-08-01 13:27:03 -07001711 }
1712#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001713 i40e_receive_skb(rx_ring, skb, vlan_tag);
1714
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001715 rx_desc->wb.qword1.status_error_len = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001716
Mitch Williamsa132af22015-01-24 09:58:35 +00001717 } while (likely(total_rx_packets < budget));
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001718
Alexander Duyck980e9b12013-09-28 06:01:03 +00001719 u64_stats_update_begin(&rx_ring->syncp);
Alexander Duycka114d0a2013-09-28 06:00:43 +00001720 rx_ring->stats.packets += total_rx_packets;
1721 rx_ring->stats.bytes += total_rx_bytes;
Alexander Duyck980e9b12013-09-28 06:01:03 +00001722 u64_stats_update_end(&rx_ring->syncp);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001723 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1724 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1725
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001726 return failure ? budget : total_rx_packets;
Mitch Williamsa132af22015-01-24 09:58:35 +00001727}
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001728
Mitch Williamsa132af22015-01-24 09:58:35 +00001729/**
1730 * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
1731 * @rx_ring: rx ring to clean
1732 * @budget: how many cleans we're allowed
1733 *
1734 * Returns number of packets cleaned
1735 **/
1736static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
1737{
1738 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1739 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1740 struct i40e_vsi *vsi = rx_ring->vsi;
1741 union i40e_rx_desc *rx_desc;
1742 u32 rx_error, rx_status;
1743 u16 rx_packet_len;
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001744 bool failure = false;
Mitch Williamsa132af22015-01-24 09:58:35 +00001745 u8 rx_ptype;
1746 u64 qword;
1747 u16 i;
1748
1749 do {
1750 struct i40e_rx_buffer *rx_bi;
1751 struct sk_buff *skb;
1752 u16 vlan_tag;
1753 /* return some buffers to hardware, one at a time is too slow */
1754 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001755 failure = failure ||
1756 i40e_alloc_rx_buffers_1buf(rx_ring,
1757 cleaned_count);
Mitch Williamsa132af22015-01-24 09:58:35 +00001758 cleaned_count = 0;
1759 }
1760
1761 i = rx_ring->next_to_clean;
1762 rx_desc = I40E_RX_DESC(rx_ring, i);
1763 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1764 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1765 I40E_RXD_QW1_STATUS_SHIFT;
1766
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001767 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
Mitch Williamsa132af22015-01-24 09:58:35 +00001768 break;
1769
1770 /* This memory barrier is needed to keep us from reading
1771 * any other fields out of the rx_desc until we know the
1772 * DD bit is set.
1773 */
Alexander Duyck67317162015-04-08 18:49:43 -07001774 dma_rmb();
Mitch Williamsa132af22015-01-24 09:58:35 +00001775
1776 if (i40e_rx_is_programming_status(qword)) {
1777 i40e_clean_programming_status(rx_ring, rx_desc);
1778 I40E_RX_INCREMENT(rx_ring, i);
1779 continue;
1780 }
1781 rx_bi = &rx_ring->rx_bi[i];
1782 skb = rx_bi->skb;
1783 prefetch(skb->data);
1784
1785 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1786 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1787
1788 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1789 I40E_RXD_QW1_ERROR_SHIFT;
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001790 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
Mitch Williamsa132af22015-01-24 09:58:35 +00001791
1792 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1793 I40E_RXD_QW1_PTYPE_SHIFT;
1794 rx_bi->skb = NULL;
1795 cleaned_count++;
1796
1797 /* Get the header and possibly the whole packet
1798 * If this is an skb from previous receive dma will be 0
1799 */
1800 skb_put(skb, rx_packet_len);
1801 dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
1802 DMA_FROM_DEVICE);
1803 rx_bi->dma = 0;
1804
1805 I40E_RX_INCREMENT(rx_ring, i);
1806
1807 if (unlikely(
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001808 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
Mitch Williamsa132af22015-01-24 09:58:35 +00001809 rx_ring->rx_stats.non_eop_descs++;
1810 continue;
1811 }
1812
1813 /* ERR_MASK will only have valid bits if EOP set */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001814 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
Mitch Williamsa132af22015-01-24 09:58:35 +00001815 dev_kfree_skb_any(skb);
Mitch Williamsa132af22015-01-24 09:58:35 +00001816 continue;
1817 }
1818
Anjali Singhai Jain857942f2015-12-09 15:50:21 -08001819 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
Mitch Williamsa132af22015-01-24 09:58:35 +00001820 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1821 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1822 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1823 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1824 rx_ring->last_rx_timestamp = jiffies;
1825 }
1826
1827 /* probably a little skewed due to removing CRC */
1828 total_rx_bytes += skb->len;
1829 total_rx_packets++;
1830
1831 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1832
1833 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1834
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001835 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
Mitch Williamsa132af22015-01-24 09:58:35 +00001836 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1837 : 0;
1838#ifdef I40E_FCOE
Jesse Brandeburg1f15d662016-04-01 03:56:06 -07001839 if (unlikely(
1840 i40e_rx_is_fcoe(rx_ptype) &&
1841 !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) {
Mitch Williamsa132af22015-01-24 09:58:35 +00001842 dev_kfree_skb_any(skb);
1843 continue;
1844 }
1845#endif
1846 i40e_receive_skb(rx_ring, skb, vlan_tag);
1847
Mitch Williamsa132af22015-01-24 09:58:35 +00001848 rx_desc->wb.qword1.status_error_len = 0;
1849 } while (likely(total_rx_packets < budget));
1850
1851 u64_stats_update_begin(&rx_ring->syncp);
1852 rx_ring->stats.packets += total_rx_packets;
1853 rx_ring->stats.bytes += total_rx_bytes;
1854 u64_stats_update_end(&rx_ring->syncp);
1855 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1856 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1857
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001858 return failure ? budget : total_rx_packets;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001859}
1860
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001861static u32 i40e_buildreg_itr(const int type, const u16 itr)
1862{
1863 u32 val;
1864
1865 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
Jesse Brandeburg40d72a52016-01-13 16:51:45 -08001866 /* Don't clear PBA because that can cause lost interrupts that
1867 * came in while we were cleaning/polling
1868 */
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001869 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1870 (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
1871
1872 return val;
1873}
1874
1875/* a small macro to shorten up some long lines */
1876#define INTREG I40E_PFINT_DYN_CTLN
1877
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001878/**
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001879 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
1880 * @vsi: the VSI we care about
1881 * @q_vector: q_vector for which itr is being updated and interrupt enabled
1882 *
1883 **/
1884static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
1885 struct i40e_q_vector *q_vector)
1886{
1887 struct i40e_hw *hw = &vsi->back->hw;
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001888 bool rx = false, tx = false;
1889 u32 rxval, txval;
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001890 int vector;
Kan Lianga75e8002016-02-19 09:24:04 -05001891 int idx = q_vector->v_idx;
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001892
1893 vector = (q_vector->v_idx + vsi->base_vector);
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001894
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001895 /* avoid dynamic calculation if in countdown mode OR if
1896 * all dynamic is disabled
1897 */
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001898 rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
1899
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001900 if (q_vector->itr_countdown > 0 ||
Kan Lianga75e8002016-02-19 09:24:04 -05001901 (!ITR_IS_DYNAMIC(vsi->rx_rings[idx]->rx_itr_setting) &&
1902 !ITR_IS_DYNAMIC(vsi->tx_rings[idx]->tx_itr_setting))) {
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001903 goto enable_int;
1904 }
1905
Kan Lianga75e8002016-02-19 09:24:04 -05001906 if (ITR_IS_DYNAMIC(vsi->rx_rings[idx]->rx_itr_setting)) {
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001907 rx = i40e_set_new_dynamic_itr(&q_vector->rx);
1908 rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001909 }
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001910
Kan Lianga75e8002016-02-19 09:24:04 -05001911 if (ITR_IS_DYNAMIC(vsi->tx_rings[idx]->tx_itr_setting)) {
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001912 tx = i40e_set_new_dynamic_itr(&q_vector->tx);
1913 txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001914 }
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001915
1916 if (rx || tx) {
1917 /* get the higher of the two ITR adjustments and
1918 * use the same value for both ITR registers
1919 * when in adaptive mode (Rx and/or Tx)
1920 */
1921 u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
1922
1923 q_vector->tx.itr = q_vector->rx.itr = itr;
1924 txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
1925 tx = true;
1926 rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
1927 rx = true;
1928 }
1929
1930 /* only need to enable the interrupt once, but need
1931 * to possibly update both ITR values
1932 */
1933 if (rx) {
1934 /* set the INTENA_MSK_MASK so that this first write
1935 * won't actually enable the interrupt, instead just
1936 * updating the ITR (it's bit 31 PF and VF)
1937 */
1938 rxval |= BIT(31);
1939 /* don't check _DOWN because interrupt isn't being enabled */
1940 wr32(hw, INTREG(vector - 1), rxval);
1941 }
1942
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001943enable_int:
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001944 if (!test_bit(__I40E_DOWN, &vsi->state))
1945 wr32(hw, INTREG(vector - 1), txval);
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001946
1947 if (q_vector->itr_countdown)
1948 q_vector->itr_countdown--;
1949 else
1950 q_vector->itr_countdown = ITR_COUNTDOWN_START;
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001951}
1952
1953/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001954 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1955 * @napi: napi struct with our devices info in it
1956 * @budget: amount of work driver is allowed to do this pass, in packets
1957 *
1958 * This function will clean all queues associated with a q_vector.
1959 *
1960 * Returns the amount of work done
1961 **/
1962int i40e_napi_poll(struct napi_struct *napi, int budget)
1963{
1964 struct i40e_q_vector *q_vector =
1965 container_of(napi, struct i40e_q_vector, napi);
1966 struct i40e_vsi *vsi = q_vector->vsi;
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001967 struct i40e_ring *ring;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001968 bool clean_complete = true;
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001969 bool arm_wb = false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001970 int budget_per_ring;
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07001971 int work_done = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001972
1973 if (test_bit(__I40E_DOWN, &vsi->state)) {
1974 napi_complete(napi);
1975 return 0;
1976 }
1977
Kiran Patil9c6c1252015-11-06 15:26:02 -08001978 /* Clear hung_detected bit */
1979 clear_bit(I40E_Q_VECTOR_HUNG_DETECT, &q_vector->hung_detected);
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001980 /* Since the actual Tx work is minimal, we can give the Tx a larger
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001981 * budget and be more aggressive about cleaning up the Tx descriptors.
1982 */
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001983 i40e_for_each_ring(ring, q_vector->tx) {
Alexander Duycka619afe2016-03-07 09:30:03 -08001984 if (!i40e_clean_tx_irq(vsi, ring, budget)) {
Alexander Duyckf2edaaa2016-03-07 09:29:57 -08001985 clean_complete = false;
1986 continue;
1987 }
1988 arm_wb |= ring->arm_wb;
Jesse Brandeburg0deda862015-07-23 16:54:34 -04001989 ring->arm_wb = false;
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001990 }
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001991
Alexander Duyckc67cace2015-09-24 09:04:26 -07001992 /* Handle case where we are called by netpoll with a budget of 0 */
1993 if (budget <= 0)
1994 goto tx_only;
1995
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001996 /* We attempt to distribute budget to each Rx queue fairly, but don't
1997 * allow the budget to go below 1 because that would exit polling early.
1998 */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001999 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00002000
Mitch Williamsa132af22015-01-24 09:58:35 +00002001 i40e_for_each_ring(ring, q_vector->rx) {
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07002002 int cleaned;
2003
Mitch Williamsa132af22015-01-24 09:58:35 +00002004 if (ring_is_ps_enabled(ring))
2005 cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
2006 else
2007 cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07002008
2009 work_done += cleaned;
Alexander Duyckf2edaaa2016-03-07 09:29:57 -08002010 /* if we clean as many as budgeted, we must not be done */
2011 if (cleaned >= budget_per_ring)
2012 clean_complete = false;
Mitch Williamsa132af22015-01-24 09:58:35 +00002013 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002014
2015 /* If work not completed, return budget and polling will return */
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00002016 if (!clean_complete) {
Alexander Duyckc67cace2015-09-24 09:04:26 -07002017tx_only:
Anjali Singhai Jain164c9f52015-10-21 19:47:08 -04002018 if (arm_wb) {
2019 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -08002020 i40e_enable_wb_on_itr(vsi, q_vector);
Anjali Singhai Jain164c9f52015-10-21 19:47:08 -04002021 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002022 return budget;
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00002023 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002024
Anjali Singhai Jain8e0764b2015-06-05 12:20:30 -04002025 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
2026 q_vector->arm_wb_state = false;
2027
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002028 /* Work is done so exit the polling mode and re-enable the interrupt */
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07002029 napi_complete_done(napi, work_done);
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04002030 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
2031 i40e_update_enable_itr(vsi, q_vector);
2032 } else { /* Legacy mode */
Jesse Brandeburg40d72a52016-01-13 16:51:45 -08002033 i40e_irq_dynamic_enable_icr0(vsi->back, false);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002034 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002035 return 0;
2036}
2037
2038/**
2039 * i40e_atr - Add a Flow Director ATR filter
2040 * @tx_ring: ring to add programming descriptor to
2041 * @skb: send buffer
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002042 * @tx_flags: send tx flags
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002043 **/
2044static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002045 u32 tx_flags)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002046{
2047 struct i40e_filter_program_desc *fdir_desc;
2048 struct i40e_pf *pf = tx_ring->vsi->back;
2049 union {
2050 unsigned char *network;
2051 struct iphdr *ipv4;
2052 struct ipv6hdr *ipv6;
2053 } hdr;
2054 struct tcphdr *th;
2055 unsigned int hlen;
2056 u32 flex_ptype, dtype_cmd;
Alexander Duyckffcc55c2016-01-25 19:32:54 -08002057 int l4_proto;
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002058 u16 i;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002059
2060 /* make sure ATR is enabled */
Jesse Brandeburg60ea5f82014-01-17 15:36:34 -08002061 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002062 return;
2063
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00002064 if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
2065 return;
2066
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002067 /* if sampling is disabled do nothing */
2068 if (!tx_ring->atr_sample_rate)
2069 return;
2070
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002071 /* Currently only IPv4/IPv6 with TCP is supported */
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002072 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002073 return;
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002074
Alexander Duyckffcc55c2016-01-25 19:32:54 -08002075 /* snag network header to get L4 type and address */
2076 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2077 skb_inner_network_header(skb) : skb_network_header(skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002078
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002079 /* Note: tx_flags gets modified to reflect inner protocols in
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002080 * tx_enable_csum function if encap is enabled.
2081 */
Alexander Duyckffcc55c2016-01-25 19:32:54 -08002082 if (tx_flags & I40E_TX_FLAGS_IPV4) {
2083 /* access ihl as u8 to avoid unaligned access on ia64 */
2084 hlen = (hdr.network[0] & 0x0F) << 2;
2085 l4_proto = hdr.ipv4->protocol;
2086 } else {
2087 hlen = hdr.network - skb->data;
2088 l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
2089 hlen -= hdr.network - skb->data;
2090 }
2091
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002092 if (l4_proto != IPPROTO_TCP)
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002093 return;
2094
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002095 th = (struct tcphdr *)(hdr.network + hlen);
2096
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00002097 /* Due to lack of space, no more new filters can be programmed */
2098 if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
2099 return;
Anjali Singhai Jain72b74862016-01-08 17:50:21 -08002100 if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
2101 (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) {
Anjali Singhai Jain52eb95e2015-06-05 12:20:33 -04002102 /* HW ATR eviction will take care of removing filters on FIN
2103 * and RST packets.
2104 */
2105 if (th->fin || th->rst)
2106 return;
2107 }
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00002108
2109 tx_ring->atr_count++;
2110
Anjali Singhai Jaince806782014-03-06 08:59:54 +00002111 /* sample on all syn/fin/rst packets or once every atr sample rate */
2112 if (!th->fin &&
2113 !th->syn &&
2114 !th->rst &&
2115 (tx_ring->atr_count < tx_ring->atr_sample_rate))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002116 return;
2117
2118 tx_ring->atr_count = 0;
2119
2120 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002121 i = tx_ring->next_to_use;
2122 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2123
2124 i++;
2125 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002126
2127 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2128 I40E_TXD_FLTR_QW0_QINDEX_MASK;
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002129 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002130 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2131 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2132 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2133 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2134
2135 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2136
2137 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2138
Anjali Singhai Jaince806782014-03-06 08:59:54 +00002139 dtype_cmd |= (th->fin || th->rst) ?
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002140 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2141 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2142 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2143 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2144
2145 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2146 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2147
2148 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2149 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2150
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +00002151 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
Singhai, Anjali6a899022015-12-14 12:21:18 -08002152 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
Anjali Singhai Jain60ccd452015-04-16 20:06:01 -04002153 dtype_cmd |=
2154 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2155 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2156 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2157 else
2158 dtype_cmd |=
2159 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2160 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2161 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +00002162
Anjali Singhai Jain72b74862016-01-08 17:50:21 -08002163 if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
2164 (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)))
Anjali Singhai Jain52eb95e2015-06-05 12:20:33 -04002165 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2166
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002167 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
Jesse Brandeburg99753ea2014-06-04 04:22:49 +00002168 fdir_desc->rsvd = cpu_to_le32(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002169 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
Jesse Brandeburg99753ea2014-06-04 04:22:49 +00002170 fdir_desc->fd_id = cpu_to_le32(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002171}
2172
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002173/**
2174 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2175 * @skb: send buffer
2176 * @tx_ring: ring to send buffer on
2177 * @flags: the tx flags to be set
2178 *
2179 * Checks the skb and set up correspondingly several generic transmit flags
2180 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2181 *
2182 * Returns error code indicate the frame should be dropped upon error and the
2183 * otherwise returns 0 to indicate the flags has been set properly.
2184 **/
Vasu Dev38e00432014-08-01 13:27:03 -07002185#ifdef I40E_FCOE
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002186inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002187 struct i40e_ring *tx_ring,
2188 u32 *flags)
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002189#else
2190static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2191 struct i40e_ring *tx_ring,
2192 u32 *flags)
Vasu Dev38e00432014-08-01 13:27:03 -07002193#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002194{
2195 __be16 protocol = skb->protocol;
2196 u32 tx_flags = 0;
2197
Greg Rose31eaacc2015-03-31 00:45:03 -07002198 if (protocol == htons(ETH_P_8021Q) &&
2199 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2200 /* When HW VLAN acceleration is turned off by the user the
2201 * stack sets the protocol to 8021q so that the driver
2202 * can take any steps required to support the SW only
2203 * VLAN handling. In our case the driver doesn't need
2204 * to take any further steps so just set the protocol
2205 * to the encapsulated ethertype.
2206 */
2207 skb->protocol = vlan_get_protocol(skb);
2208 goto out;
2209 }
2210
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002211 /* if we have a HW VLAN tag being added, default to the HW one */
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01002212 if (skb_vlan_tag_present(skb)) {
2213 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002214 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2215 /* else if it is a SW VLAN, check the next protocol and store the tag */
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00002216 } else if (protocol == htons(ETH_P_8021Q)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002217 struct vlan_hdr *vhdr, _vhdr;
Jesse Brandeburg6995b362015-08-28 17:55:54 -04002218
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002219 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2220 if (!vhdr)
2221 return -EINVAL;
2222
2223 protocol = vhdr->h_vlan_encapsulated_proto;
2224 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2225 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2226 }
2227
Neerav Parikhd40d00b2015-02-24 06:58:40 +00002228 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2229 goto out;
2230
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002231 /* Insert 802.1p priority into VLAN header */
Vasu Dev38e00432014-08-01 13:27:03 -07002232 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2233 (skb->priority != TC_PRIO_CONTROL)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002234 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2235 tx_flags |= (skb->priority & 0x7) <<
2236 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2237 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2238 struct vlan_ethhdr *vhdr;
Francois Romieudd225bc2014-03-30 03:14:48 +00002239 int rc;
2240
2241 rc = skb_cow_head(skb, 0);
2242 if (rc < 0)
2243 return rc;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002244 vhdr = (struct vlan_ethhdr *)skb->data;
2245 vhdr->h_vlan_TCI = htons(tx_flags >>
2246 I40E_TX_FLAGS_VLAN_SHIFT);
2247 } else {
2248 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2249 }
2250 }
Neerav Parikhd40d00b2015-02-24 06:58:40 +00002251
2252out:
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002253 *flags = tx_flags;
2254 return 0;
2255}
2256
2257/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002258 * i40e_tso - set up the tso context descriptor
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002259 * @skb: ptr to the skb we're sending
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002260 * @hdr_len: ptr to the size of the packet header
Shannon Nelson9c883bd2015-10-21 19:47:02 -04002261 * @cd_type_cmd_tso_mss: Quad Word 1
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002262 *
2263 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2264 **/
Jesse Brandeburg84b07992016-04-01 03:56:05 -07002265static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002266{
Alexander Duyck03f9d6a2016-01-24 21:16:20 -08002267 u64 cd_cmd, cd_tso_len, cd_mss;
Alexander Duyckc7770192016-01-24 21:16:35 -08002268 union {
2269 struct iphdr *v4;
2270 struct ipv6hdr *v6;
2271 unsigned char *hdr;
2272 } ip;
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002273 union {
2274 struct tcphdr *tcp;
Alexander Duyck54532052016-01-24 21:17:29 -08002275 struct udphdr *udp;
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002276 unsigned char *hdr;
2277 } l4;
2278 u32 paylen, l4_offset;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002279 int err;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002280
Shannon Nelsone9f65632016-01-04 10:33:04 -08002281 if (skb->ip_summed != CHECKSUM_PARTIAL)
2282 return 0;
2283
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002284 if (!skb_is_gso(skb))
2285 return 0;
2286
Francois Romieudd225bc2014-03-30 03:14:48 +00002287 err = skb_cow_head(skb, 0);
2288 if (err < 0)
2289 return err;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002290
Alexander Duyckc7770192016-01-24 21:16:35 -08002291 ip.hdr = skb_network_header(skb);
2292 l4.hdr = skb_transport_header(skb);
Anjali Singhaidf230752014-12-19 02:58:16 +00002293
Alexander Duyckc7770192016-01-24 21:16:35 -08002294 /* initialize outer IP header fields */
2295 if (ip.v4->version == 4) {
2296 ip.v4->tot_len = 0;
2297 ip.v4->check = 0;
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002298 } else {
Alexander Duyckc7770192016-01-24 21:16:35 -08002299 ip.v6->payload_len = 0;
2300 }
2301
Alexander Duyck577389a2016-04-02 00:06:56 -07002302 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2303 SKB_GSO_IPIP |
2304 SKB_GSO_SIT |
2305 SKB_GSO_UDP_TUNNEL |
Alexander Duyck54532052016-01-24 21:17:29 -08002306 SKB_GSO_UDP_TUNNEL_CSUM)) {
2307 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
2308 /* determine offset of outer transport header */
2309 l4_offset = l4.hdr - skb->data;
2310
2311 /* remove payload length from outer checksum */
Alexander Duyck24d41e52016-03-18 16:06:47 -07002312 paylen = skb->len - l4_offset;
2313 csum_replace_by_diff(&l4.udp->check, htonl(paylen));
Alexander Duyck54532052016-01-24 21:17:29 -08002314 }
2315
Alexander Duyckc7770192016-01-24 21:16:35 -08002316 /* reset pointers to inner headers */
2317 ip.hdr = skb_inner_network_header(skb);
2318 l4.hdr = skb_inner_transport_header(skb);
2319
2320 /* initialize inner IP header fields */
2321 if (ip.v4->version == 4) {
2322 ip.v4->tot_len = 0;
2323 ip.v4->check = 0;
2324 } else {
2325 ip.v6->payload_len = 0;
2326 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002327 }
2328
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002329 /* determine offset of inner transport header */
2330 l4_offset = l4.hdr - skb->data;
2331
2332 /* remove payload length from inner checksum */
Alexander Duyck24d41e52016-03-18 16:06:47 -07002333 paylen = skb->len - l4_offset;
2334 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002335
2336 /* compute length of segmentation header */
2337 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002338
2339 /* find the field values */
2340 cd_cmd = I40E_TX_CTX_DESC_TSO;
2341 cd_tso_len = skb->len - *hdr_len;
2342 cd_mss = skb_shinfo(skb)->gso_size;
Alexander Duyck03f9d6a2016-01-24 21:16:20 -08002343 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2344 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2345 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002346 return 1;
2347}
2348
2349/**
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002350 * i40e_tsyn - set up the tsyn context descriptor
2351 * @tx_ring: ptr to the ring to send
2352 * @skb: ptr to the skb we're sending
2353 * @tx_flags: the collected send information
Shannon Nelson9c883bd2015-10-21 19:47:02 -04002354 * @cd_type_cmd_tso_mss: Quad Word 1
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002355 *
2356 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2357 **/
2358static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2359 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2360{
2361 struct i40e_pf *pf;
2362
2363 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2364 return 0;
2365
2366 /* Tx timestamps cannot be sampled when doing TSO */
2367 if (tx_flags & I40E_TX_FLAGS_TSO)
2368 return 0;
2369
2370 /* only timestamp the outbound packet if the user has requested it and
2371 * we are not already transmitting a packet to be timestamped
2372 */
2373 pf = i40e_netdev_to_pf(tx_ring->netdev);
Jacob Keller22b47772014-12-14 01:55:09 +00002374 if (!(pf->flags & I40E_FLAG_PTP))
2375 return 0;
2376
Jakub Kicinski9ce34f02014-03-15 14:55:42 +00002377 if (pf->ptp_tx &&
2378 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002379 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2380 pf->ptp_tx_skb = skb_get(skb);
2381 } else {
2382 return 0;
2383 }
2384
2385 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
2386 I40E_TXD_CTX_QW1_CMD_SHIFT;
2387
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002388 return 1;
2389}
2390
2391/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002392 * i40e_tx_enable_csum - Enable Tx checksum offloads
2393 * @skb: send buffer
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002394 * @tx_flags: pointer to Tx flags currently set
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002395 * @td_cmd: Tx descriptor command bits to set
2396 * @td_offset: Tx descriptor header offsets to set
Jean Sacren554f4542015-10-13 01:06:28 -06002397 * @tx_ring: Tx descriptor ring
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002398 * @cd_tunneling: ptr to context desc bits
2399 **/
Alexander Duyck529f1f62016-01-24 21:17:10 -08002400static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
2401 u32 *td_cmd, u32 *td_offset,
2402 struct i40e_ring *tx_ring,
2403 u32 *cd_tunneling)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002404{
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002405 union {
2406 struct iphdr *v4;
2407 struct ipv6hdr *v6;
2408 unsigned char *hdr;
2409 } ip;
2410 union {
2411 struct tcphdr *tcp;
2412 struct udphdr *udp;
2413 unsigned char *hdr;
2414 } l4;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002415 unsigned char *exthdr;
Jesse Brandeburgd1bd7432016-04-01 03:56:04 -07002416 u32 offset, cmd = 0;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002417 __be16 frag_off;
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002418 u8 l4_proto = 0;
2419
Alexander Duyck529f1f62016-01-24 21:17:10 -08002420 if (skb->ip_summed != CHECKSUM_PARTIAL)
2421 return 0;
2422
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002423 ip.hdr = skb_network_header(skb);
2424 l4.hdr = skb_transport_header(skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002425
Alexander Duyck475b4202016-01-24 21:17:01 -08002426 /* compute outer L2 header size */
2427 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
2428
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002429 if (skb->encapsulation) {
Jesse Brandeburgd1bd7432016-04-01 03:56:04 -07002430 u32 tunnel = 0;
Alexander Duycka0064722016-01-24 21:16:48 -08002431 /* define outer network header type */
2432 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
Alexander Duyck475b4202016-01-24 21:17:01 -08002433 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
2434 I40E_TX_CTX_EXT_IP_IPV4 :
2435 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2436
Alexander Duycka0064722016-01-24 21:16:48 -08002437 l4_proto = ip.v4->protocol;
2438 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
Alexander Duyck475b4202016-01-24 21:17:01 -08002439 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002440
2441 exthdr = ip.hdr + sizeof(*ip.v6);
Alexander Duycka0064722016-01-24 21:16:48 -08002442 l4_proto = ip.v6->nexthdr;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002443 if (l4.hdr != exthdr)
2444 ipv6_skip_exthdr(skb, exthdr - skb->data,
2445 &l4_proto, &frag_off);
Alexander Duycka0064722016-01-24 21:16:48 -08002446 }
2447
2448 /* define outer transport */
2449 switch (l4_proto) {
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002450 case IPPROTO_UDP:
Alexander Duyck475b4202016-01-24 21:17:01 -08002451 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
Singhai, Anjali6a899022015-12-14 12:21:18 -08002452 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002453 break;
Shannon Nelsonc1d17912015-09-25 19:26:04 +00002454 case IPPROTO_GRE:
Alexander Duyck475b4202016-01-24 21:17:01 -08002455 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
Alexander Duycka0064722016-01-24 21:16:48 -08002456 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
Shannon Nelsonc1d17912015-09-25 19:26:04 +00002457 break;
Alexander Duyck577389a2016-04-02 00:06:56 -07002458 case IPPROTO_IPIP:
2459 case IPPROTO_IPV6:
2460 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2461 l4.hdr = skb_inner_network_header(skb);
2462 break;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002463 default:
Alexander Duyck529f1f62016-01-24 21:17:10 -08002464 if (*tx_flags & I40E_TX_FLAGS_TSO)
2465 return -1;
2466
2467 skb_checksum_help(skb);
2468 return 0;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002469 }
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002470
Alexander Duyck577389a2016-04-02 00:06:56 -07002471 /* compute outer L3 header size */
2472 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
2473 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
2474
2475 /* switch IP header pointer from outer to inner header */
2476 ip.hdr = skb_inner_network_header(skb);
2477
Alexander Duyck475b4202016-01-24 21:17:01 -08002478 /* compute tunnel header size */
2479 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
2480 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
2481
Alexander Duyck54532052016-01-24 21:17:29 -08002482 /* indicate if we need to offload outer UDP header */
2483 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
2484 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
2485 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2486
Alexander Duyck475b4202016-01-24 21:17:01 -08002487 /* record tunnel offload values */
2488 *cd_tunneling |= tunnel;
2489
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002490 /* switch L4 header pointer from outer to inner */
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002491 l4.hdr = skb_inner_transport_header(skb);
Alexander Duycka0064722016-01-24 21:16:48 -08002492 l4_proto = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002493
Alexander Duycka0064722016-01-24 21:16:48 -08002494 /* reset type as we transition from outer to inner headers */
2495 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
2496 if (ip.v4->version == 4)
2497 *tx_flags |= I40E_TX_FLAGS_IPV4;
2498 if (ip.v6->version == 6)
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002499 *tx_flags |= I40E_TX_FLAGS_IPV6;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002500 }
2501
2502 /* Enable IP checksum offloads */
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002503 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002504 l4_proto = ip.v4->protocol;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002505 /* the stack computes the IP header already, the only time we
2506 * need the hardware to recompute it is in the case of TSO.
2507 */
Alexander Duyck475b4202016-01-24 21:17:01 -08002508 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
2509 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
2510 I40E_TX_DESC_CMD_IIPT_IPV4;
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002511 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
Alexander Duyck475b4202016-01-24 21:17:01 -08002512 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002513
2514 exthdr = ip.hdr + sizeof(*ip.v6);
2515 l4_proto = ip.v6->nexthdr;
2516 if (l4.hdr != exthdr)
2517 ipv6_skip_exthdr(skb, exthdr - skb->data,
2518 &l4_proto, &frag_off);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002519 }
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002520
Alexander Duyck475b4202016-01-24 21:17:01 -08002521 /* compute inner L3 header size */
2522 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002523
2524 /* Enable L4 checksum offloads */
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002525 switch (l4_proto) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002526 case IPPROTO_TCP:
2527 /* enable checksum offloads */
Alexander Duyck475b4202016-01-24 21:17:01 -08002528 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
2529 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002530 break;
2531 case IPPROTO_SCTP:
2532 /* enable SCTP checksum offload */
Alexander Duyck475b4202016-01-24 21:17:01 -08002533 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
2534 offset |= (sizeof(struct sctphdr) >> 2) <<
2535 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002536 break;
2537 case IPPROTO_UDP:
2538 /* enable UDP checksum offload */
Alexander Duyck475b4202016-01-24 21:17:01 -08002539 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
2540 offset |= (sizeof(struct udphdr) >> 2) <<
2541 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002542 break;
2543 default:
Alexander Duyck529f1f62016-01-24 21:17:10 -08002544 if (*tx_flags & I40E_TX_FLAGS_TSO)
2545 return -1;
2546 skb_checksum_help(skb);
2547 return 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002548 }
Alexander Duyck475b4202016-01-24 21:17:01 -08002549
2550 *td_cmd |= cmd;
2551 *td_offset |= offset;
Alexander Duyck529f1f62016-01-24 21:17:10 -08002552
2553 return 1;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002554}
2555
2556/**
2557 * i40e_create_tx_ctx Build the Tx context descriptor
2558 * @tx_ring: ring to create the descriptor on
2559 * @cd_type_cmd_tso_mss: Quad Word 1
2560 * @cd_tunneling: Quad Word 0 - bits 0-31
2561 * @cd_l2tag2: Quad Word 0 - bits 32-63
2562 **/
2563static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
2564 const u64 cd_type_cmd_tso_mss,
2565 const u32 cd_tunneling, const u32 cd_l2tag2)
2566{
2567 struct i40e_tx_context_desc *context_desc;
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002568 int i = tx_ring->next_to_use;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002569
Jesse Brandeburgff40dd52014-02-14 02:14:41 +00002570 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
2571 !cd_tunneling && !cd_l2tag2)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002572 return;
2573
2574 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002575 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
2576
2577 i++;
2578 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002579
2580 /* cpu_to_le32 and assign to struct fields */
2581 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2582 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
Jesse Brandeburg3efbbb22014-06-04 20:41:54 +00002583 context_desc->rsvd = cpu_to_le16(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002584 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2585}
2586
2587/**
Eric Dumazet4567dc12014-10-07 13:30:23 -07002588 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2589 * @tx_ring: the ring to be checked
2590 * @size: the size buffer we want to assure is available
2591 *
2592 * Returns -EBUSY if a stop is needed, else 0
2593 **/
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002594int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
Eric Dumazet4567dc12014-10-07 13:30:23 -07002595{
2596 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2597 /* Memory barrier before checking head and tail */
2598 smp_mb();
2599
2600 /* Check again in a case another CPU has just made room available. */
2601 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2602 return -EBUSY;
2603
2604 /* A reprieve! - use start_queue because it doesn't call schedule */
2605 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2606 ++tx_ring->tx_stats.restart_queue;
2607 return 0;
2608}
2609
2610/**
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002611 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
Anjali Singhai71da6192015-02-21 06:42:35 +00002612 * @skb: send buffer
Anjali Singhai71da6192015-02-21 06:42:35 +00002613 *
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002614 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
2615 * and so we need to figure out the cases where we need to linearize the skb.
2616 *
2617 * For TSO we need to count the TSO header and segment payload separately.
2618 * As such we need to check cases where we have 7 fragments or more as we
2619 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2620 * the segment payload in the first descriptor, and another 7 for the
2621 * fragments.
Anjali Singhai71da6192015-02-21 06:42:35 +00002622 **/
Alexander Duyck2d374902016-02-17 11:02:50 -08002623bool __i40e_chk_linearize(struct sk_buff *skb)
Anjali Singhai71da6192015-02-21 06:42:35 +00002624{
Alexander Duyck2d374902016-02-17 11:02:50 -08002625 const struct skb_frag_struct *frag, *stale;
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002626 int nr_frags, sum;
Anjali Singhai71da6192015-02-21 06:42:35 +00002627
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002628 /* no need to check if number of frags is less than 7 */
Alexander Duyck2d374902016-02-17 11:02:50 -08002629 nr_frags = skb_shinfo(skb)->nr_frags;
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002630 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
Alexander Duyck2d374902016-02-17 11:02:50 -08002631 return false;
Anjali Singhai71da6192015-02-21 06:42:35 +00002632
Alexander Duyck2d374902016-02-17 11:02:50 -08002633 /* We need to walk through the list and validate that each group
2634 * of 6 fragments totals at least gso_size. However we don't need
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002635 * to perform such validation on the last 6 since the last 6 cannot
2636 * inherit any data from a descriptor after them.
Alexander Duyck2d374902016-02-17 11:02:50 -08002637 */
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002638 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
Alexander Duyck2d374902016-02-17 11:02:50 -08002639 frag = &skb_shinfo(skb)->frags[0];
2640
2641 /* Initialize size to the negative value of gso_size minus 1. We
2642 * use this as the worst case scenerio in which the frag ahead
2643 * of us only provides one byte which is why we are limited to 6
2644 * descriptors for a single transmit as the header and previous
2645 * fragment are already consuming 2 descriptors.
2646 */
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002647 sum = 1 - skb_shinfo(skb)->gso_size;
Alexander Duyck2d374902016-02-17 11:02:50 -08002648
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002649 /* Add size of frags 0 through 4 to create our initial sum */
2650 sum += skb_frag_size(frag++);
2651 sum += skb_frag_size(frag++);
2652 sum += skb_frag_size(frag++);
2653 sum += skb_frag_size(frag++);
2654 sum += skb_frag_size(frag++);
Alexander Duyck2d374902016-02-17 11:02:50 -08002655
2656 /* Walk through fragments adding latest fragment, testing it, and
2657 * then removing stale fragments from the sum.
2658 */
2659 stale = &skb_shinfo(skb)->frags[0];
2660 for (;;) {
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002661 sum += skb_frag_size(frag++);
Alexander Duyck2d374902016-02-17 11:02:50 -08002662
2663 /* if sum is negative we failed to make sufficient progress */
2664 if (sum < 0)
2665 return true;
2666
2667 /* use pre-decrement to avoid processing last fragment */
2668 if (!--nr_frags)
2669 break;
2670
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002671 sum -= skb_frag_size(stale++);
Anjali Singhai71da6192015-02-21 06:42:35 +00002672 }
2673
Alexander Duyck2d374902016-02-17 11:02:50 -08002674 return false;
Anjali Singhai71da6192015-02-21 06:42:35 +00002675}
2676
2677/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002678 * i40e_tx_map - Build the Tx descriptor
2679 * @tx_ring: ring to send buffer on
2680 * @skb: send buffer
2681 * @first: first buffer info buffer to use
2682 * @tx_flags: collected send information
2683 * @hdr_len: size of the packet header
2684 * @td_cmd: the command field in the descriptor
2685 * @td_offset: offset for checksum or crc
2686 **/
Vasu Dev38e00432014-08-01 13:27:03 -07002687#ifdef I40E_FCOE
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002688inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002689 struct i40e_tx_buffer *first, u32 tx_flags,
2690 const u8 hdr_len, u32 td_cmd, u32 td_offset)
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002691#else
2692static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2693 struct i40e_tx_buffer *first, u32 tx_flags,
2694 const u8 hdr_len, u32 td_cmd, u32 td_offset)
Vasu Dev38e00432014-08-01 13:27:03 -07002695#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002696{
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002697 unsigned int data_len = skb->data_len;
2698 unsigned int size = skb_headlen(skb);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002699 struct skb_frag_struct *frag;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002700 struct i40e_tx_buffer *tx_bi;
2701 struct i40e_tx_desc *tx_desc;
Alexander Duycka5e9c572013-09-28 06:00:27 +00002702 u16 i = tx_ring->next_to_use;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002703 u32 td_tag = 0;
2704 dma_addr_t dma;
2705 u16 gso_segs;
Anjali Singhai58044742015-09-25 18:26:13 -07002706 u16 desc_count = 0;
2707 bool tail_bump = true;
2708 bool do_rs = false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002709
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002710 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
2711 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
2712 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
2713 I40E_TX_FLAGS_VLAN_SHIFT;
2714 }
2715
Alexander Duycka5e9c572013-09-28 06:00:27 +00002716 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
2717 gso_segs = skb_shinfo(skb)->gso_segs;
2718 else
2719 gso_segs = 1;
2720
2721 /* multiply data chunks by size of headers */
2722 first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
2723 first->gso_segs = gso_segs;
2724 first->skb = skb;
2725 first->tx_flags = tx_flags;
2726
2727 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2728
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002729 tx_desc = I40E_TX_DESC(tx_ring, i);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002730 tx_bi = first;
2731
2732 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002733 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
2734
Alexander Duycka5e9c572013-09-28 06:00:27 +00002735 if (dma_mapping_error(tx_ring->dev, dma))
2736 goto dma_error;
2737
2738 /* record length, and DMA address */
2739 dma_unmap_len_set(tx_bi, len, size);
2740 dma_unmap_addr_set(tx_bi, dma, dma);
2741
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002742 /* align size to end of page */
2743 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002744 tx_desc->buffer_addr = cpu_to_le64(dma);
2745
2746 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002747 tx_desc->cmd_type_offset_bsz =
2748 build_ctob(td_cmd, td_offset,
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002749 max_data, td_tag);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002750
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002751 tx_desc++;
2752 i++;
Anjali Singhai58044742015-09-25 18:26:13 -07002753 desc_count++;
2754
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002755 if (i == tx_ring->count) {
2756 tx_desc = I40E_TX_DESC(tx_ring, 0);
2757 i = 0;
2758 }
Alexander Duycka5e9c572013-09-28 06:00:27 +00002759
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002760 dma += max_data;
2761 size -= max_data;
Alexander Duycka5e9c572013-09-28 06:00:27 +00002762
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002763 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
Alexander Duycka5e9c572013-09-28 06:00:27 +00002764 tx_desc->buffer_addr = cpu_to_le64(dma);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002765 }
2766
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002767 if (likely(!data_len))
2768 break;
2769
Alexander Duycka5e9c572013-09-28 06:00:27 +00002770 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2771 size, td_tag);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002772
2773 tx_desc++;
2774 i++;
Anjali Singhai58044742015-09-25 18:26:13 -07002775 desc_count++;
2776
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002777 if (i == tx_ring->count) {
2778 tx_desc = I40E_TX_DESC(tx_ring, 0);
2779 i = 0;
2780 }
2781
Alexander Duycka5e9c572013-09-28 06:00:27 +00002782 size = skb_frag_size(frag);
2783 data_len -= size;
2784
2785 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2786 DMA_TO_DEVICE);
2787
2788 tx_bi = &tx_ring->tx_bi[i];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002789 }
2790
Alexander Duycka5e9c572013-09-28 06:00:27 +00002791 /* set next_to_watch value indicating a packet is present */
2792 first->next_to_watch = tx_desc;
2793
2794 i++;
2795 if (i == tx_ring->count)
2796 i = 0;
2797
2798 tx_ring->next_to_use = i;
2799
Anjali Singhai58044742015-09-25 18:26:13 -07002800 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
2801 tx_ring->queue_index),
2802 first->bytecount);
Eric Dumazet4567dc12014-10-07 13:30:23 -07002803 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
Anjali Singhai58044742015-09-25 18:26:13 -07002804
2805 /* Algorithm to optimize tail and RS bit setting:
2806 * if xmit_more is supported
2807 * if xmit_more is true
2808 * do not update tail and do not mark RS bit.
2809 * if xmit_more is false and last xmit_more was false
2810 * if every packet spanned less than 4 desc
2811 * then set RS bit on 4th packet and update tail
2812 * on every packet
2813 * else
2814 * update tail and set RS bit on every packet.
2815 * if xmit_more is false and last_xmit_more was true
2816 * update tail and set RS bit.
2817 *
2818 * Optimization: wmb to be issued only in case of tail update.
2819 * Also optimize the Descriptor WB path for RS bit with the same
2820 * algorithm.
2821 *
2822 * Note: If there are less than 4 packets
2823 * pending and interrupts were disabled the service task will
2824 * trigger a force WB.
2825 */
2826 if (skb->xmit_more &&
2827 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2828 tx_ring->queue_index))) {
2829 tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2830 tail_bump = false;
2831 } else if (!skb->xmit_more &&
2832 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2833 tx_ring->queue_index)) &&
2834 (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
2835 (tx_ring->packet_stride < WB_STRIDE) &&
2836 (desc_count < WB_STRIDE)) {
2837 tx_ring->packet_stride++;
2838 } else {
2839 tx_ring->packet_stride = 0;
2840 tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2841 do_rs = true;
2842 }
2843 if (do_rs)
2844 tx_ring->packet_stride = 0;
2845
2846 tx_desc->cmd_type_offset_bsz =
2847 build_ctob(td_cmd, td_offset, size, td_tag) |
2848 cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
2849 I40E_TX_DESC_CMD_EOP) <<
2850 I40E_TXD_QW1_CMD_SHIFT);
2851
Alexander Duycka5e9c572013-09-28 06:00:27 +00002852 /* notify HW of packet */
Anjali Singhai58044742015-09-25 18:26:13 -07002853 if (!tail_bump)
Jesse Brandeburg489ce7a2015-04-27 14:57:08 -04002854 prefetchw(tx_desc + 1);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002855
Anjali Singhai58044742015-09-25 18:26:13 -07002856 if (tail_bump) {
2857 /* Force memory writes to complete before letting h/w
2858 * know there are new descriptors to fetch. (Only
2859 * applicable for weak-ordered memory model archs,
2860 * such as IA-64).
2861 */
2862 wmb();
2863 writel(i, tx_ring->tail);
2864 }
2865
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002866 return;
2867
2868dma_error:
Alexander Duycka5e9c572013-09-28 06:00:27 +00002869 dev_info(tx_ring->dev, "TX DMA map failed\n");
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002870
2871 /* clear dma mappings for failed tx_bi map */
2872 for (;;) {
2873 tx_bi = &tx_ring->tx_bi[i];
Alexander Duycka5e9c572013-09-28 06:00:27 +00002874 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002875 if (tx_bi == first)
2876 break;
2877 if (i == 0)
2878 i = tx_ring->count;
2879 i--;
2880 }
2881
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002882 tx_ring->next_to_use = i;
2883}
2884
2885/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002886 * i40e_xmit_frame_ring - Sends buffer on Tx ring
2887 * @skb: send buffer
2888 * @tx_ring: ring to send buffer on
2889 *
2890 * Returns NETDEV_TX_OK if sent, else an error code
2891 **/
2892static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2893 struct i40e_ring *tx_ring)
2894{
2895 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
2896 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2897 struct i40e_tx_buffer *first;
2898 u32 td_offset = 0;
2899 u32 tx_flags = 0;
2900 __be16 protocol;
2901 u32 td_cmd = 0;
2902 u8 hdr_len = 0;
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002903 int tso, count;
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002904 int tsyn;
Jesse Brandeburg6995b362015-08-28 17:55:54 -04002905
Jesse Brandeburgb74118f2015-10-26 19:44:30 -04002906 /* prefetch the data, we'll need it later */
2907 prefetch(skb->data);
2908
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002909 count = i40e_xmit_descriptor_count(skb);
Alexander Duyck2d374902016-02-17 11:02:50 -08002910 if (i40e_chk_linearize(skb, count)) {
2911 if (__skb_linearize(skb))
2912 goto out_drop;
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002913 count = i40e_txd_use_count(skb->len);
Alexander Duyck2d374902016-02-17 11:02:50 -08002914 tx_ring->tx_stats.tx_linearize++;
2915 }
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002916
2917 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2918 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
2919 * + 4 desc gap to avoid the cache line where head is,
2920 * + 1 desc for context descriptor,
2921 * otherwise try next time
2922 */
2923 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2924 tx_ring->tx_stats.tx_busy++;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002925 return NETDEV_TX_BUSY;
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002926 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002927
2928 /* prepare the xmit flags */
2929 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2930 goto out_drop;
2931
2932 /* obtain protocol of skb */
Vlad Yasevich3d34dd02014-08-25 10:34:52 -04002933 protocol = vlan_get_protocol(skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002934
2935 /* record the location of the first descriptor for this packet */
2936 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2937
2938 /* setup IPv4/IPv6 offloads */
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00002939 if (protocol == htons(ETH_P_IP))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002940 tx_flags |= I40E_TX_FLAGS_IPV4;
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00002941 else if (protocol == htons(ETH_P_IPV6))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002942 tx_flags |= I40E_TX_FLAGS_IPV6;
2943
Jesse Brandeburg84b07992016-04-01 03:56:05 -07002944 tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002945
2946 if (tso < 0)
2947 goto out_drop;
2948 else if (tso)
2949 tx_flags |= I40E_TX_FLAGS_TSO;
2950
Alexander Duyck3bc67972016-02-17 11:02:56 -08002951 /* Always offload the checksum, since it's in the data descriptor */
2952 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2953 tx_ring, &cd_tunneling);
2954 if (tso < 0)
2955 goto out_drop;
2956
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002957 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
2958
2959 if (tsyn)
2960 tx_flags |= I40E_TX_FLAGS_TSYN;
2961
Jakub Kicinski259afec2014-03-15 14:55:37 +00002962 skb_tx_timestamp(skb);
2963
Alexander Duyckb1941302013-09-28 06:00:32 +00002964 /* always enable CRC insertion offload */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002965 td_cmd |= I40E_TX_DESC_CMD_ICRC;
2966
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002967 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2968 cd_tunneling, cd_l2tag2);
2969
2970 /* Add Flow Director ATR if it's enabled.
2971 *
2972 * NOTE: this must always be directly before the data descriptor.
2973 */
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002974 i40e_atr(tx_ring, skb, tx_flags);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002975
2976 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2977 td_cmd, td_offset);
2978
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002979 return NETDEV_TX_OK;
2980
2981out_drop:
2982 dev_kfree_skb_any(skb);
2983 return NETDEV_TX_OK;
2984}
2985
2986/**
2987 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2988 * @skb: send buffer
2989 * @netdev: network interface device structure
2990 *
2991 * Returns NETDEV_TX_OK if sent, else an error code
2992 **/
2993netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2994{
2995 struct i40e_netdev_priv *np = netdev_priv(netdev);
2996 struct i40e_vsi *vsi = np->vsi;
Alexander Duyck9f65e15b2013-09-28 06:00:58 +00002997 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002998
2999 /* hardware can't handle really short frames, hardware padding works
3000 * beyond this point
3001 */
Alexander Duycka94d9e22014-12-03 08:17:39 -08003002 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
3003 return NETDEV_TX_OK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003004
3005 return i40e_xmit_frame_ring(skb, tx_ring);
3006}