blob: 635b3ac17877b153eba5c77a352ab8428d2b0742 [file] [log] [blame]
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
Greg Rosedc641b72013-12-18 13:45:51 +00004 * Copyright(c) 2013 - 2014 Intel Corporation.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
Greg Rosedc641b72013-12-18 13:45:51 +000015 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000017 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
Mitch Williams1c112a62014-04-04 04:43:06 +000027#include <linux/prefetch.h>
Mitch Williamsa132af22015-01-24 09:58:35 +000028#include <net/busy_poll.h>
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000029#include "i40e.h"
Jesse Brandeburg206812b2014-02-12 01:45:33 +000030#include "i40e_prototype.h"
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000031
32static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
33 u32 td_tag)
34{
35 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
36 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
37 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
38 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
39 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
40}
41
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +000042#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000043#define I40E_FD_CLEAN_DELAY 10
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000044/**
45 * i40e_program_fdir_filter - Program a Flow Director filter
Joseph Gasparakis17a73f62014-02-12 01:45:30 +000046 * @fdir_data: Packet data that will be filter parameters
47 * @raw_packet: the pre-allocated packet buffer for FDir
Jeff Kirsherb40c82e62015-02-27 09:18:34 +000048 * @pf: The PF pointer
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000049 * @add: True for add/update, False for remove
50 **/
Joseph Gasparakis17a73f62014-02-12 01:45:30 +000051int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000052 struct i40e_pf *pf, bool add)
53{
54 struct i40e_filter_program_desc *fdir_desc;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000055 struct i40e_tx_buffer *tx_buf, *first;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000056 struct i40e_tx_desc *tx_desc;
57 struct i40e_ring *tx_ring;
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +000058 unsigned int fpt, dcc;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000059 struct i40e_vsi *vsi;
60 struct device *dev;
61 dma_addr_t dma;
62 u32 td_cmd = 0;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000063 u16 delay = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000064 u16 i;
65
66 /* find existing FDIR VSI */
67 vsi = NULL;
Mitch Williams505682c2014-05-20 08:01:37 +000068 for (i = 0; i < pf->num_alloc_vsi; i++)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000069 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
70 vsi = pf->vsi[i];
71 if (!vsi)
72 return -ENOENT;
73
Alexander Duyck9f65e152013-09-28 06:00:58 +000074 tx_ring = vsi->tx_rings[0];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000075 dev = tx_ring->dev;
76
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000077 /* we need two descriptors to add/del a filter and we can wait */
78 do {
79 if (I40E_DESC_UNUSED(tx_ring) > 1)
80 break;
81 msleep_interruptible(1);
82 delay++;
83 } while (delay < I40E_FD_CLEAN_DELAY);
84
85 if (!(I40E_DESC_UNUSED(tx_ring) > 1))
86 return -EAGAIN;
87
Joseph Gasparakis17a73f62014-02-12 01:45:30 +000088 dma = dma_map_single(dev, raw_packet,
89 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000090 if (dma_mapping_error(dev, dma))
91 goto dma_fail;
92
93 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +000094 i = tx_ring->next_to_use;
95 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000096 first = &tx_ring->tx_bi[i];
97 memset(first, 0, sizeof(struct i40e_tx_buffer));
Alexander Duyckfc4ac672013-09-28 06:00:22 +000098
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000099 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000100
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000101 fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
102 I40E_TXD_FLTR_QW0_QINDEX_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000103
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000104 fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
105 I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000106
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000107 fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
108 I40E_TXD_FLTR_QW0_PCTYPE_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000109
110 /* Use LAN VSI Id if not programmed by user */
111 if (fdir_data->dest_vsi == 0)
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000112 fpt |= (pf->vsi[pf->lan_vsi]->id) <<
113 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000114 else
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000115 fpt |= ((u32)fdir_data->dest_vsi <<
116 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
117 I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000118
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000119 dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000120
121 if (add)
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000122 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
123 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000124 else
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000125 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
126 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000127
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000128 dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
129 I40E_TXD_FLTR_QW1_DEST_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000130
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000131 dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
132 I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000133
134 if (fdir_data->cnt_index != 0) {
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000135 dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
136 dcc |= ((u32)fdir_data->cnt_index <<
137 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +0000138 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000139 }
140
Jesse Brandeburg99753ea2014-06-04 04:22:49 +0000141 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
142 fdir_desc->rsvd = cpu_to_le32(0);
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000143 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000144 fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
145
146 /* Now program a dummy descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000147 i = tx_ring->next_to_use;
148 tx_desc = I40E_TX_DESC(tx_ring, i);
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000149 tx_buf = &tx_ring->tx_bi[i];
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000150
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000151 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
152
153 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000154
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000155 /* record length, and DMA address */
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000156 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000157 dma_unmap_addr_set(tx_buf, dma, dma);
158
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000159 tx_desc->buffer_addr = cpu_to_le64(dma);
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000160 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000161
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000162 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
163 tx_buf->raw_buf = (void *)raw_packet;
164
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000165 tx_desc->cmd_type_offset_bsz =
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000166 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000167
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000168 /* Force memory writes to complete before letting h/w
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000169 * know there are new descriptors to fetch.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000170 */
171 wmb();
172
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000173 /* Mark the data descriptor to be watched */
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000174 first->next_to_watch = tx_desc;
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000175
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000176 writel(tx_ring->next_to_use, tx_ring->tail);
177 return 0;
178
179dma_fail:
180 return -1;
181}
182
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000183#define IP_HEADER_OFFSET 14
184#define I40E_UDPIP_DUMMY_PACKET_LEN 42
185/**
186 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
187 * @vsi: pointer to the targeted VSI
188 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000189 * @add: true adds a filter, false removes it
190 *
191 * Returns 0 if the filters were successfully added or removed
192 **/
193static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
194 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000195 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000196{
197 struct i40e_pf *pf = vsi->back;
198 struct udphdr *udp;
199 struct iphdr *ip;
200 bool err = false;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000201 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000202 int ret;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000203 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
204 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
205 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
206
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000207 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
208 if (!raw_packet)
209 return -ENOMEM;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000210 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
211
212 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
213 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
214 + sizeof(struct iphdr));
215
216 ip->daddr = fd_data->dst_ip[0];
217 udp->dest = fd_data->dst_port;
218 ip->saddr = fd_data->src_ip[0];
219 udp->source = fd_data->src_port;
220
Kevin Scottb2d36c02014-04-09 05:58:59 +0000221 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
222 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
223 if (ret) {
224 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000225 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
226 fd_data->pctype, fd_data->fd_id, ret);
Kevin Scottb2d36c02014-04-09 05:58:59 +0000227 err = true;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000228 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000229 if (add)
230 dev_info(&pf->pdev->dev,
231 "Filter OK for PCTYPE %d loc = %d\n",
232 fd_data->pctype, fd_data->fd_id);
233 else
234 dev_info(&pf->pdev->dev,
235 "Filter deleted for PCTYPE %d loc = %d\n",
236 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000237 }
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000238 return err ? -EOPNOTSUPP : 0;
239}
240
241#define I40E_TCPIP_DUMMY_PACKET_LEN 54
242/**
243 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
244 * @vsi: pointer to the targeted VSI
245 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000246 * @add: true adds a filter, false removes it
247 *
248 * Returns 0 if the filters were successfully added or removed
249 **/
250static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
251 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000252 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000253{
254 struct i40e_pf *pf = vsi->back;
255 struct tcphdr *tcp;
256 struct iphdr *ip;
257 bool err = false;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000258 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000259 int ret;
260 /* Dummy packet */
261 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
262 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
263 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
264 0x0, 0x72, 0, 0, 0, 0};
265
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000266 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
267 if (!raw_packet)
268 return -ENOMEM;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000269 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
270
271 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
272 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
273 + sizeof(struct iphdr));
274
275 ip->daddr = fd_data->dst_ip[0];
276 tcp->dest = fd_data->dst_port;
277 ip->saddr = fd_data->src_ip[0];
278 tcp->source = fd_data->src_port;
279
280 if (add) {
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000281 pf->fd_tcp_rule++;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000282 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
Anjali Singhai Jain2e4875e2015-04-16 20:06:06 -0400283 if (I40E_DEBUG_FD & pf->hw.debug_mask)
284 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000285 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
286 }
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000287 } else {
288 pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
289 (pf->fd_tcp_rule - 1) : 0;
290 if (pf->fd_tcp_rule == 0) {
291 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
Anjali Singhai Jain2e4875e2015-04-16 20:06:06 -0400292 if (I40E_DEBUG_FD & pf->hw.debug_mask)
293 dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000294 }
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000295 }
296
Kevin Scottb2d36c02014-04-09 05:58:59 +0000297 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000298 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
299
300 if (ret) {
301 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000302 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
303 fd_data->pctype, fd_data->fd_id, ret);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000304 err = true;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000305 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000306 if (add)
307 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
308 fd_data->pctype, fd_data->fd_id);
309 else
310 dev_info(&pf->pdev->dev,
311 "Filter deleted for PCTYPE %d loc = %d\n",
312 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000313 }
314
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000315 return err ? -EOPNOTSUPP : 0;
316}
317
318/**
319 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
320 * a specific flow spec
321 * @vsi: pointer to the targeted VSI
322 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000323 * @add: true adds a filter, false removes it
324 *
Jean Sacren21d3efd2014-03-17 18:14:39 +0000325 * Always returns -EOPNOTSUPP
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000326 **/
327static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
328 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000329 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000330{
331 return -EOPNOTSUPP;
332}
333
334#define I40E_IP_DUMMY_PACKET_LEN 34
335/**
336 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
337 * a specific flow spec
338 * @vsi: pointer to the targeted VSI
339 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000340 * @add: true adds a filter, false removes it
341 *
342 * Returns 0 if the filters were successfully added or removed
343 **/
344static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
345 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000346 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000347{
348 struct i40e_pf *pf = vsi->back;
349 struct iphdr *ip;
350 bool err = false;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000351 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000352 int ret;
353 int i;
354 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
355 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
356 0, 0, 0, 0};
357
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000358 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
359 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000360 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
361 if (!raw_packet)
362 return -ENOMEM;
363 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
364 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
365
366 ip->saddr = fd_data->src_ip[0];
367 ip->daddr = fd_data->dst_ip[0];
368 ip->protocol = 0;
369
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000370 fd_data->pctype = i;
371 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
372
373 if (ret) {
374 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000375 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
376 fd_data->pctype, fd_data->fd_id, ret);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000377 err = true;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000378 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000379 if (add)
380 dev_info(&pf->pdev->dev,
381 "Filter OK for PCTYPE %d loc = %d\n",
382 fd_data->pctype, fd_data->fd_id);
383 else
384 dev_info(&pf->pdev->dev,
385 "Filter deleted for PCTYPE %d loc = %d\n",
386 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000387 }
388 }
389
390 return err ? -EOPNOTSUPP : 0;
391}
392
393/**
394 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
395 * @vsi: pointer to the targeted VSI
396 * @cmd: command to get or set RX flow classification rules
397 * @add: true adds a filter, false removes it
398 *
399 **/
400int i40e_add_del_fdir(struct i40e_vsi *vsi,
401 struct i40e_fdir_filter *input, bool add)
402{
403 struct i40e_pf *pf = vsi->back;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000404 int ret;
405
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000406 switch (input->flow_type & ~FLOW_EXT) {
407 case TCP_V4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000408 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000409 break;
410 case UDP_V4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000411 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000412 break;
413 case SCTP_V4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000414 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000415 break;
416 case IPV4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000417 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000418 break;
419 case IP_USER_FLOW:
420 switch (input->ip4_proto) {
421 case IPPROTO_TCP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000422 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000423 break;
424 case IPPROTO_UDP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000425 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000426 break;
427 case IPPROTO_SCTP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000428 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000429 break;
430 default:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000431 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000432 break;
433 }
434 break;
435 default:
Jakub Kicinskic5ffe7e2014-04-02 10:33:22 +0000436 dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000437 input->flow_type);
438 ret = -EINVAL;
439 }
440
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000441 /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000442 return ret;
443}
444
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000445/**
446 * i40e_fd_handle_status - check the Programming Status for FD
447 * @rx_ring: the Rx ring for this descriptor
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000448 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000449 * @prog_id: the id originally used for programming
450 *
451 * This is used to verify if the FD programming or invalidation
452 * requested by SW to the HW is successful or not and take actions accordingly.
453 **/
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000454static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
455 union i40e_rx_desc *rx_desc, u8 prog_id)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000456{
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000457 struct i40e_pf *pf = rx_ring->vsi->back;
458 struct pci_dev *pdev = pf->pdev;
459 u32 fcnt_prog, fcnt_avail;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000460 u32 error;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000461 u64 qw;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000462
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000463 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000464 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
465 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
466
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400467 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
Carolyn Wyborny3487b6c2015-08-27 11:42:38 -0400468 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000469 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
470 (I40E_DEBUG_FD & pf->hw.debug_mask))
471 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
Carolyn Wyborny3487b6c2015-08-27 11:42:38 -0400472 pf->fd_inv);
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000473
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000474 /* Check if the programming error is for ATR.
475 * If so, auto disable ATR and set a state for
476 * flush in progress. Next time we come here if flush is in
477 * progress do nothing, once flush is complete the state will
478 * be cleared.
479 */
480 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
481 return;
482
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000483 pf->fd_add_err++;
484 /* store the current atr filter count */
485 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
486
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000487 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
488 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
489 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
490 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
491 }
492
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000493 /* filter programming failed most likely due to table full */
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000494 fcnt_prog = i40e_get_global_fd_count(pf);
Anjali Singhai Jain12957382014-06-04 04:22:47 +0000495 fcnt_avail = pf->fdir_pf_filter_count;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000496 /* If ATR is running fcnt_prog can quickly change,
497 * if we are very close to full, it makes sense to disable
498 * FD ATR/SB and then re-enable it when there is room.
499 */
500 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000501 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
Anjali Singhai Jainb814ba62014-06-04 20:41:48 +0000502 !(pf->auto_disable_flags &
Anjali Singhai Jainb814ba62014-06-04 20:41:48 +0000503 I40E_FLAG_FD_SB_ENABLED)) {
Anjali Singhai Jain2e4875e2015-04-16 20:06:06 -0400504 if (I40E_DEBUG_FD & pf->hw.debug_mask)
505 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000506 pf->auto_disable_flags |=
507 I40E_FLAG_FD_SB_ENABLED;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000508 }
509 } else {
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000510 dev_info(&pdev->dev,
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000511 "FD filter programming failed due to incorrect filter parameters\n");
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000512 }
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400513 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
Anjali Singhai Jain13c28842014-03-06 09:00:04 +0000514 if (I40E_DEBUG_FD & pf->hw.debug_mask)
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000515 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
Anjali Singhai Jain13c28842014-03-06 09:00:04 +0000516 rx_desc->wb.qword0.hi_dword.fd_id);
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000517 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000518}
519
520/**
Alexander Duycka5e9c572013-09-28 06:00:27 +0000521 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000522 * @ring: the ring that owns the buffer
523 * @tx_buffer: the buffer to free
524 **/
Alexander Duycka5e9c572013-09-28 06:00:27 +0000525static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
526 struct i40e_tx_buffer *tx_buffer)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000527{
Alexander Duycka5e9c572013-09-28 06:00:27 +0000528 if (tx_buffer->skb) {
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000529 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
530 kfree(tx_buffer->raw_buf);
531 else
532 dev_kfree_skb_any(tx_buffer->skb);
533
Alexander Duycka5e9c572013-09-28 06:00:27 +0000534 if (dma_unmap_len(tx_buffer, len))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000535 dma_unmap_single(ring->dev,
Alexander Duyck35a1e2a2013-09-28 06:00:17 +0000536 dma_unmap_addr(tx_buffer, dma),
537 dma_unmap_len(tx_buffer, len),
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000538 DMA_TO_DEVICE);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000539 } else if (dma_unmap_len(tx_buffer, len)) {
540 dma_unmap_page(ring->dev,
541 dma_unmap_addr(tx_buffer, dma),
542 dma_unmap_len(tx_buffer, len),
543 DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000544 }
Alexander Duycka5e9c572013-09-28 06:00:27 +0000545 tx_buffer->next_to_watch = NULL;
546 tx_buffer->skb = NULL;
Alexander Duyck35a1e2a2013-09-28 06:00:17 +0000547 dma_unmap_len_set(tx_buffer, len, 0);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000548 /* tx_buffer must be completely set up in the transmit path */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000549}
550
551/**
552 * i40e_clean_tx_ring - Free any empty Tx buffers
553 * @tx_ring: ring to be cleaned
554 **/
555void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
556{
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000557 unsigned long bi_size;
558 u16 i;
559
560 /* ring already cleared, nothing to do */
561 if (!tx_ring->tx_bi)
562 return;
563
564 /* Free all the Tx ring sk_buffs */
Alexander Duycka5e9c572013-09-28 06:00:27 +0000565 for (i = 0; i < tx_ring->count; i++)
566 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000567
568 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
569 memset(tx_ring->tx_bi, 0, bi_size);
570
571 /* Zero out the descriptor ring */
572 memset(tx_ring->desc, 0, tx_ring->size);
573
574 tx_ring->next_to_use = 0;
575 tx_ring->next_to_clean = 0;
Alexander Duyck7070ce02013-09-28 06:00:37 +0000576
577 if (!tx_ring->netdev)
578 return;
579
580 /* cleanup Tx queue statistics */
581 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
582 tx_ring->queue_index));
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000583}
584
585/**
586 * i40e_free_tx_resources - Free Tx resources per queue
587 * @tx_ring: Tx descriptor ring for a specific queue
588 *
589 * Free all transmit software resources
590 **/
591void i40e_free_tx_resources(struct i40e_ring *tx_ring)
592{
593 i40e_clean_tx_ring(tx_ring);
594 kfree(tx_ring->tx_bi);
595 tx_ring->tx_bi = NULL;
596
597 if (tx_ring->desc) {
598 dma_free_coherent(tx_ring->dev, tx_ring->size,
599 tx_ring->desc, tx_ring->dma);
600 tx_ring->desc = NULL;
601 }
602}
603
Jesse Brandeburga68de582015-02-24 05:26:03 +0000604/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000605 * i40e_get_tx_pending - how many tx descriptors not processed
606 * @tx_ring: the ring of descriptors
607 *
608 * Since there is no access to the ring head register
609 * in XL710, we need to use our local copies
610 **/
Kiran Patilb03a8c12015-09-24 18:13:15 -0400611u32 i40e_get_tx_pending(struct i40e_ring *ring)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000612{
Jesse Brandeburga68de582015-02-24 05:26:03 +0000613 u32 head, tail;
614
615 head = i40e_get_head(ring);
616 tail = readl(ring->tail);
617
618 if (head != tail)
619 return (head < tail) ?
620 tail - head : (tail + ring->count - head);
621
622 return 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000623}
624
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000625#define WB_STRIDE 0x3
626
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000627/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000628 * i40e_clean_tx_irq - Reclaim resources after transmit completes
629 * @tx_ring: tx ring to clean
630 * @budget: how many cleans we're allowed
631 *
632 * Returns true if there's any budget left (e.g. the clean is finished)
633 **/
634static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
635{
636 u16 i = tx_ring->next_to_clean;
637 struct i40e_tx_buffer *tx_buf;
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000638 struct i40e_tx_desc *tx_head;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000639 struct i40e_tx_desc *tx_desc;
640 unsigned int total_packets = 0;
641 unsigned int total_bytes = 0;
642
643 tx_buf = &tx_ring->tx_bi[i];
644 tx_desc = I40E_TX_DESC(tx_ring, i);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000645 i -= tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000646
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000647 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
648
Alexander Duycka5e9c572013-09-28 06:00:27 +0000649 do {
650 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000651
652 /* if next_to_watch is not set then there is no work pending */
653 if (!eop_desc)
654 break;
655
Alexander Duycka5e9c572013-09-28 06:00:27 +0000656 /* prevent any other reads prior to eop_desc */
657 read_barrier_depends();
658
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000659 /* we have caught up to head, no work left to do */
660 if (tx_head == tx_desc)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000661 break;
662
Alexander Duyckc304fda2013-09-28 06:00:12 +0000663 /* clear next_to_watch to prevent false hangs */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000664 tx_buf->next_to_watch = NULL;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000665
Alexander Duycka5e9c572013-09-28 06:00:27 +0000666 /* update the statistics for this packet */
667 total_bytes += tx_buf->bytecount;
668 total_packets += tx_buf->gso_segs;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000669
Alexander Duycka5e9c572013-09-28 06:00:27 +0000670 /* free the skb */
Rick Jonesa81fb042014-09-17 03:56:20 +0000671 dev_consume_skb_any(tx_buf->skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000672
Alexander Duycka5e9c572013-09-28 06:00:27 +0000673 /* unmap skb header data */
674 dma_unmap_single(tx_ring->dev,
675 dma_unmap_addr(tx_buf, dma),
676 dma_unmap_len(tx_buf, len),
677 DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000678
Alexander Duycka5e9c572013-09-28 06:00:27 +0000679 /* clear tx_buffer data */
680 tx_buf->skb = NULL;
681 dma_unmap_len_set(tx_buf, len, 0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000682
Alexander Duycka5e9c572013-09-28 06:00:27 +0000683 /* unmap remaining buffers */
684 while (tx_desc != eop_desc) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000685
686 tx_buf++;
687 tx_desc++;
688 i++;
Alexander Duycka5e9c572013-09-28 06:00:27 +0000689 if (unlikely(!i)) {
690 i -= tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000691 tx_buf = tx_ring->tx_bi;
692 tx_desc = I40E_TX_DESC(tx_ring, 0);
693 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000694
Alexander Duycka5e9c572013-09-28 06:00:27 +0000695 /* unmap any remaining paged data */
696 if (dma_unmap_len(tx_buf, len)) {
697 dma_unmap_page(tx_ring->dev,
698 dma_unmap_addr(tx_buf, dma),
699 dma_unmap_len(tx_buf, len),
700 DMA_TO_DEVICE);
701 dma_unmap_len_set(tx_buf, len, 0);
702 }
703 }
704
705 /* move us one more past the eop_desc for start of next pkt */
706 tx_buf++;
707 tx_desc++;
708 i++;
709 if (unlikely(!i)) {
710 i -= tx_ring->count;
711 tx_buf = tx_ring->tx_bi;
712 tx_desc = I40E_TX_DESC(tx_ring, 0);
713 }
714
Jesse Brandeburg016890b2015-02-27 09:15:31 +0000715 prefetch(tx_desc);
716
Alexander Duycka5e9c572013-09-28 06:00:27 +0000717 /* update budget accounting */
718 budget--;
719 } while (likely(budget));
720
721 i += tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000722 tx_ring->next_to_clean = i;
Alexander Duyck980e9b12013-09-28 06:01:03 +0000723 u64_stats_update_begin(&tx_ring->syncp);
Alexander Duycka114d0a2013-09-28 06:00:43 +0000724 tx_ring->stats.bytes += total_bytes;
725 tx_ring->stats.packets += total_packets;
Alexander Duyck980e9b12013-09-28 06:01:03 +0000726 u64_stats_update_end(&tx_ring->syncp);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000727 tx_ring->q_vector->tx.total_bytes += total_bytes;
728 tx_ring->q_vector->tx.total_packets += total_packets;
Alexander Duycka5e9c572013-09-28 06:00:27 +0000729
Anjali Singhai58044742015-09-25 18:26:13 -0700730 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
731 unsigned int j = 0;
732
733 /* check to see if there are < 4 descriptors
734 * waiting to be written back, then kick the hardware to force
735 * them to be written back in case we stay in NAPI.
736 * In this mode on X722 we do not enable Interrupt.
737 */
738 j = i40e_get_tx_pending(tx_ring);
739
740 if (budget &&
741 ((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
742 !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
743 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
744 tx_ring->arm_wb = true;
745 }
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000746
Alexander Duyck7070ce02013-09-28 06:00:37 +0000747 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
748 tx_ring->queue_index),
749 total_packets, total_bytes);
750
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000751#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
752 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
753 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
754 /* Make sure that anybody stopping the queue after this
755 * sees the new next_to_clean.
756 */
757 smp_mb();
758 if (__netif_subqueue_stopped(tx_ring->netdev,
759 tx_ring->queue_index) &&
760 !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
761 netif_wake_subqueue(tx_ring->netdev,
762 tx_ring->queue_index);
763 ++tx_ring->tx_stats.restart_queue;
764 }
765 }
766
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000767 return !!budget;
768}
769
770/**
771 * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors
772 * @vsi: the VSI we care about
773 * @q_vector: the vector on which to force writeback
774 *
775 **/
Kiran Patilb03a8c12015-09-24 18:13:15 -0400776void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000777{
Anjali Singhai Jain8e0764b2015-06-05 12:20:30 -0400778 u16 flags = q_vector->tx.ring[0].flags;
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000779
Anjali Singhai Jain8e0764b2015-06-05 12:20:30 -0400780 if (flags & I40E_TXR_FLAGS_WB_ON_ITR) {
781 u32 val;
782
783 if (q_vector->arm_wb_state)
784 return;
785
786 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK;
787
788 wr32(&vsi->back->hw,
789 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
790 vsi->base_vector - 1),
791 val);
792 q_vector->arm_wb_state = true;
793 } else if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
794 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
795 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
796 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
797 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
798 /* allow 00 to be written to the index */
799
800 wr32(&vsi->back->hw,
801 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
802 vsi->base_vector - 1), val);
803 } else {
804 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
805 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
806 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
807 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
808 /* allow 00 to be written to the index */
809
810 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
811 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000812}
813
814/**
815 * i40e_set_new_dynamic_itr - Find new ITR level
816 * @rc: structure containing ring performance data
817 *
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400818 * Returns true if ITR changed, false if not
819 *
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000820 * Stores a new ITR value based on packets and byte counts during
821 * the last interrupt. The advantage of per interrupt computation
822 * is faster updates and more accurate ITR for the current traffic
823 * pattern. Constants in this function were computed based on
824 * theoretical maximum wire speed and thresholds were set based on
825 * testing data as well as attempting to minimize response time
826 * while increasing bulk throughput.
827 **/
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400828static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000829{
830 enum i40e_latency_range new_latency_range = rc->latency_range;
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400831 struct i40e_q_vector *qv = rc->ring->q_vector;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000832 u32 new_itr = rc->itr;
833 int bytes_per_int;
Jesse Brandeburg51cc6d92015-09-28 14:16:52 -0400834 int usecs;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000835
836 if (rc->total_packets == 0 || !rc->itr)
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400837 return false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000838
839 /* simple throttlerate management
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400840 * 0-10MB/s lowest (50000 ints/s)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000841 * 10-20MB/s low (20000 ints/s)
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400842 * 20-1249MB/s bulk (18000 ints/s)
843 * > 40000 Rx packets per second (8000 ints/s)
Jesse Brandeburg51cc6d92015-09-28 14:16:52 -0400844 *
845 * The math works out because the divisor is in 10^(-6) which
846 * turns the bytes/us input value into MB/s values, but
847 * make sure to use usecs, as the register values written
Jesse Brandeburgee2319c2015-09-28 14:16:54 -0400848 * are in 2 usec increments in the ITR registers, and make sure
849 * to use the smoothed values that the countdown timer gives us.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000850 */
Jesse Brandeburgee2319c2015-09-28 14:16:54 -0400851 usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
Jesse Brandeburg51cc6d92015-09-28 14:16:52 -0400852 bytes_per_int = rc->total_bytes / usecs;
Jesse Brandeburgee2319c2015-09-28 14:16:54 -0400853
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400854 switch (new_latency_range) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000855 case I40E_LOWEST_LATENCY:
856 if (bytes_per_int > 10)
857 new_latency_range = I40E_LOW_LATENCY;
858 break;
859 case I40E_LOW_LATENCY:
860 if (bytes_per_int > 20)
861 new_latency_range = I40E_BULK_LATENCY;
862 else if (bytes_per_int <= 10)
863 new_latency_range = I40E_LOWEST_LATENCY;
864 break;
865 case I40E_BULK_LATENCY:
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400866 case I40E_ULTRA_LATENCY:
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400867 default:
868 if (bytes_per_int <= 20)
869 new_latency_range = I40E_LOW_LATENCY;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000870 break;
871 }
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400872
873 /* this is to adjust RX more aggressively when streaming small
874 * packets. The value of 40000 was picked as it is just beyond
875 * what the hardware can receive per second if in low latency
876 * mode.
877 */
878#define RX_ULTRA_PACKET_RATE 40000
879
880 if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
881 (&qv->rx == rc))
882 new_latency_range = I40E_ULTRA_LATENCY;
883
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400884 rc->latency_range = new_latency_range;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000885
886 switch (new_latency_range) {
887 case I40E_LOWEST_LATENCY:
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400888 new_itr = I40E_ITR_50K;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000889 break;
890 case I40E_LOW_LATENCY:
891 new_itr = I40E_ITR_20K;
892 break;
893 case I40E_BULK_LATENCY:
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400894 new_itr = I40E_ITR_18K;
895 break;
896 case I40E_ULTRA_LATENCY:
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000897 new_itr = I40E_ITR_8K;
898 break;
899 default:
900 break;
901 }
902
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000903 rc->total_bytes = 0;
904 rc->total_packets = 0;
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400905
906 if (new_itr != rc->itr) {
907 rc->itr = new_itr;
908 return true;
909 }
910
911 return false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000912}
913
914/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000915 * i40e_clean_programming_status - clean the programming status descriptor
916 * @rx_ring: the rx ring that has this descriptor
917 * @rx_desc: the rx descriptor written back by HW
918 *
919 * Flow director should handle FD_FILTER_STATUS to check its filter programming
920 * status being successful or not and take actions accordingly. FCoE should
921 * handle its context/filter programming/invalidation status and take actions.
922 *
923 **/
924static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
925 union i40e_rx_desc *rx_desc)
926{
927 u64 qw;
928 u8 id;
929
930 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
931 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
932 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
933
934 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000935 i40e_fd_handle_status(rx_ring, rx_desc, id);
Vasu Dev38e00432014-08-01 13:27:03 -0700936#ifdef I40E_FCOE
937 else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
938 (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
939 i40e_fcoe_handle_status(rx_ring, rx_desc, id);
940#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000941}
942
943/**
944 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
945 * @tx_ring: the tx ring to set up
946 *
947 * Return 0 on success, negative on error
948 **/
949int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
950{
951 struct device *dev = tx_ring->dev;
952 int bi_size;
953
954 if (!dev)
955 return -ENOMEM;
956
Jesse Brandeburge908f812015-07-23 16:54:42 -0400957 /* warn if we are about to overwrite the pointer */
958 WARN_ON(tx_ring->tx_bi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000959 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
960 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
961 if (!tx_ring->tx_bi)
962 goto err;
963
964 /* round up to nearest 4K */
965 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000966 /* add u32 for head writeback, align after this takes care of
967 * guaranteeing this is at least one cache line in size
968 */
969 tx_ring->size += sizeof(u32);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000970 tx_ring->size = ALIGN(tx_ring->size, 4096);
971 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
972 &tx_ring->dma, GFP_KERNEL);
973 if (!tx_ring->desc) {
974 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
975 tx_ring->size);
976 goto err;
977 }
978
979 tx_ring->next_to_use = 0;
980 tx_ring->next_to_clean = 0;
981 return 0;
982
983err:
984 kfree(tx_ring->tx_bi);
985 tx_ring->tx_bi = NULL;
986 return -ENOMEM;
987}
988
989/**
990 * i40e_clean_rx_ring - Free Rx buffers
991 * @rx_ring: ring to be cleaned
992 **/
993void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
994{
995 struct device *dev = rx_ring->dev;
996 struct i40e_rx_buffer *rx_bi;
997 unsigned long bi_size;
998 u16 i;
999
1000 /* ring already cleared, nothing to do */
1001 if (!rx_ring->rx_bi)
1002 return;
1003
Mitch Williamsa132af22015-01-24 09:58:35 +00001004 if (ring_is_ps_enabled(rx_ring)) {
1005 int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
1006
1007 rx_bi = &rx_ring->rx_bi[0];
1008 if (rx_bi->hdr_buf) {
1009 dma_free_coherent(dev,
1010 bufsz,
1011 rx_bi->hdr_buf,
1012 rx_bi->dma);
1013 for (i = 0; i < rx_ring->count; i++) {
1014 rx_bi = &rx_ring->rx_bi[i];
1015 rx_bi->dma = 0;
Shannon Nelson37a29732015-02-27 09:15:19 +00001016 rx_bi->hdr_buf = NULL;
Mitch Williamsa132af22015-01-24 09:58:35 +00001017 }
1018 }
1019 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001020 /* Free all the Rx ring sk_buffs */
1021 for (i = 0; i < rx_ring->count; i++) {
1022 rx_bi = &rx_ring->rx_bi[i];
1023 if (rx_bi->dma) {
1024 dma_unmap_single(dev,
1025 rx_bi->dma,
1026 rx_ring->rx_buf_len,
1027 DMA_FROM_DEVICE);
1028 rx_bi->dma = 0;
1029 }
1030 if (rx_bi->skb) {
1031 dev_kfree_skb(rx_bi->skb);
1032 rx_bi->skb = NULL;
1033 }
1034 if (rx_bi->page) {
1035 if (rx_bi->page_dma) {
1036 dma_unmap_page(dev,
1037 rx_bi->page_dma,
1038 PAGE_SIZE / 2,
1039 DMA_FROM_DEVICE);
1040 rx_bi->page_dma = 0;
1041 }
1042 __free_page(rx_bi->page);
1043 rx_bi->page = NULL;
1044 rx_bi->page_offset = 0;
1045 }
1046 }
1047
1048 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1049 memset(rx_ring->rx_bi, 0, bi_size);
1050
1051 /* Zero out the descriptor ring */
1052 memset(rx_ring->desc, 0, rx_ring->size);
1053
1054 rx_ring->next_to_clean = 0;
1055 rx_ring->next_to_use = 0;
1056}
1057
1058/**
1059 * i40e_free_rx_resources - Free Rx resources
1060 * @rx_ring: ring to clean the resources from
1061 *
1062 * Free all receive software resources
1063 **/
1064void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1065{
1066 i40e_clean_rx_ring(rx_ring);
1067 kfree(rx_ring->rx_bi);
1068 rx_ring->rx_bi = NULL;
1069
1070 if (rx_ring->desc) {
1071 dma_free_coherent(rx_ring->dev, rx_ring->size,
1072 rx_ring->desc, rx_ring->dma);
1073 rx_ring->desc = NULL;
1074 }
1075}
1076
1077/**
Mitch Williamsa132af22015-01-24 09:58:35 +00001078 * i40e_alloc_rx_headers - allocate rx header buffers
1079 * @rx_ring: ring to alloc buffers
1080 *
1081 * Allocate rx header buffers for the entire ring. As these are static,
1082 * this is only called when setting up a new ring.
1083 **/
1084void i40e_alloc_rx_headers(struct i40e_ring *rx_ring)
1085{
1086 struct device *dev = rx_ring->dev;
1087 struct i40e_rx_buffer *rx_bi;
1088 dma_addr_t dma;
1089 void *buffer;
1090 int buf_size;
1091 int i;
1092
1093 if (rx_ring->rx_bi[0].hdr_buf)
1094 return;
1095 /* Make sure the buffers don't cross cache line boundaries. */
1096 buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
1097 buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
1098 &dma, GFP_KERNEL);
1099 if (!buffer)
1100 return;
1101 for (i = 0; i < rx_ring->count; i++) {
1102 rx_bi = &rx_ring->rx_bi[i];
1103 rx_bi->dma = dma + (i * buf_size);
1104 rx_bi->hdr_buf = buffer + (i * buf_size);
1105 }
1106}
1107
1108/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001109 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1110 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1111 *
1112 * Returns 0 on success, negative on failure
1113 **/
1114int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1115{
1116 struct device *dev = rx_ring->dev;
1117 int bi_size;
1118
Jesse Brandeburge908f812015-07-23 16:54:42 -04001119 /* warn if we are about to overwrite the pointer */
1120 WARN_ON(rx_ring->rx_bi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001121 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1122 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1123 if (!rx_ring->rx_bi)
1124 goto err;
1125
Carolyn Wybornyf217d6c2015-02-09 17:42:31 -08001126 u64_stats_init(&rx_ring->syncp);
Carolyn Wyborny638702b2015-01-24 09:58:32 +00001127
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001128 /* Round up to nearest 4K */
1129 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
1130 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
1131 : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1132 rx_ring->size = ALIGN(rx_ring->size, 4096);
1133 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1134 &rx_ring->dma, GFP_KERNEL);
1135
1136 if (!rx_ring->desc) {
1137 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1138 rx_ring->size);
1139 goto err;
1140 }
1141
1142 rx_ring->next_to_clean = 0;
1143 rx_ring->next_to_use = 0;
1144
1145 return 0;
1146err:
1147 kfree(rx_ring->rx_bi);
1148 rx_ring->rx_bi = NULL;
1149 return -ENOMEM;
1150}
1151
1152/**
1153 * i40e_release_rx_desc - Store the new tail and head values
1154 * @rx_ring: ring to bump
1155 * @val: new head index
1156 **/
1157static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1158{
1159 rx_ring->next_to_use = val;
1160 /* Force memory writes to complete before letting h/w
1161 * know there are new descriptors to fetch. (Only
1162 * applicable for weak-ordered memory model archs,
1163 * such as IA-64).
1164 */
1165 wmb();
1166 writel(val, rx_ring->tail);
1167}
1168
1169/**
Mitch Williamsa132af22015-01-24 09:58:35 +00001170 * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001171 * @rx_ring: ring to place buffers on
1172 * @cleaned_count: number of buffers to replace
1173 **/
Mitch Williamsa132af22015-01-24 09:58:35 +00001174void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
1175{
1176 u16 i = rx_ring->next_to_use;
1177 union i40e_rx_desc *rx_desc;
1178 struct i40e_rx_buffer *bi;
1179
1180 /* do nothing if no valid netdev defined */
1181 if (!rx_ring->netdev || !cleaned_count)
1182 return;
1183
1184 while (cleaned_count--) {
1185 rx_desc = I40E_RX_DESC(rx_ring, i);
1186 bi = &rx_ring->rx_bi[i];
1187
1188 if (bi->skb) /* desc is in use */
1189 goto no_buffers;
1190 if (!bi->page) {
1191 bi->page = alloc_page(GFP_ATOMIC);
1192 if (!bi->page) {
1193 rx_ring->rx_stats.alloc_page_failed++;
1194 goto no_buffers;
1195 }
1196 }
1197
1198 if (!bi->page_dma) {
1199 /* use a half page if we're re-using */
1200 bi->page_offset ^= PAGE_SIZE / 2;
1201 bi->page_dma = dma_map_page(rx_ring->dev,
1202 bi->page,
1203 bi->page_offset,
1204 PAGE_SIZE / 2,
1205 DMA_FROM_DEVICE);
1206 if (dma_mapping_error(rx_ring->dev,
1207 bi->page_dma)) {
1208 rx_ring->rx_stats.alloc_page_failed++;
1209 bi->page_dma = 0;
1210 goto no_buffers;
1211 }
1212 }
1213
1214 dma_sync_single_range_for_device(rx_ring->dev,
1215 bi->dma,
1216 0,
1217 rx_ring->rx_hdr_len,
1218 DMA_FROM_DEVICE);
1219 /* Refresh the desc even if buffer_addrs didn't change
1220 * because each write-back erases this info.
1221 */
1222 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1223 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1224 i++;
1225 if (i == rx_ring->count)
1226 i = 0;
1227 }
1228
1229no_buffers:
1230 if (rx_ring->next_to_use != i)
1231 i40e_release_rx_desc(rx_ring, i);
1232}
1233
1234/**
1235 * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
1236 * @rx_ring: ring to place buffers on
1237 * @cleaned_count: number of buffers to replace
1238 **/
1239void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001240{
1241 u16 i = rx_ring->next_to_use;
1242 union i40e_rx_desc *rx_desc;
1243 struct i40e_rx_buffer *bi;
1244 struct sk_buff *skb;
1245
1246 /* do nothing if no valid netdev defined */
1247 if (!rx_ring->netdev || !cleaned_count)
1248 return;
1249
1250 while (cleaned_count--) {
1251 rx_desc = I40E_RX_DESC(rx_ring, i);
1252 bi = &rx_ring->rx_bi[i];
1253 skb = bi->skb;
1254
1255 if (!skb) {
1256 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1257 rx_ring->rx_buf_len);
1258 if (!skb) {
Mitch Williams420136c2013-12-18 13:45:59 +00001259 rx_ring->rx_stats.alloc_buff_failed++;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001260 goto no_buffers;
1261 }
1262 /* initialize queue mapping */
1263 skb_record_rx_queue(skb, rx_ring->queue_index);
1264 bi->skb = skb;
1265 }
1266
1267 if (!bi->dma) {
1268 bi->dma = dma_map_single(rx_ring->dev,
1269 skb->data,
1270 rx_ring->rx_buf_len,
1271 DMA_FROM_DEVICE);
1272 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
Mitch Williams420136c2013-12-18 13:45:59 +00001273 rx_ring->rx_stats.alloc_buff_failed++;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001274 bi->dma = 0;
1275 goto no_buffers;
1276 }
1277 }
1278
Mitch Williamsa132af22015-01-24 09:58:35 +00001279 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
1280 rx_desc->read.hdr_addr = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001281 i++;
1282 if (i == rx_ring->count)
1283 i = 0;
1284 }
1285
1286no_buffers:
1287 if (rx_ring->next_to_use != i)
1288 i40e_release_rx_desc(rx_ring, i);
1289}
1290
1291/**
1292 * i40e_receive_skb - Send a completed packet up the stack
1293 * @rx_ring: rx ring in play
1294 * @skb: packet to send up
1295 * @vlan_tag: vlan tag for packet
1296 **/
1297static void i40e_receive_skb(struct i40e_ring *rx_ring,
1298 struct sk_buff *skb, u16 vlan_tag)
1299{
1300 struct i40e_q_vector *q_vector = rx_ring->q_vector;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001301
1302 if (vlan_tag & VLAN_VID_MASK)
1303 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1304
Alexander Duyck8b650352015-09-24 09:04:32 -07001305 napi_gro_receive(&q_vector->napi, skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001306}
1307
1308/**
1309 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1310 * @vsi: the VSI we care about
1311 * @skb: skb currently being received and modified
1312 * @rx_status: status value of last descriptor in packet
1313 * @rx_error: error value of last descriptor in packet
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001314 * @rx_ptype: ptype value of last descriptor in packet
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001315 **/
1316static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1317 struct sk_buff *skb,
1318 u32 rx_status,
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001319 u32 rx_error,
1320 u16 rx_ptype)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001321{
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001322 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
1323 bool ipv4 = false, ipv6 = false;
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001324 bool ipv4_tunnel, ipv6_tunnel;
1325 __wsum rx_udp_csum;
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001326 struct iphdr *iph;
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001327 __sum16 csum;
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001328
Anjali Singhai Jainf8faaa42015-02-24 06:58:48 +00001329 ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
1330 (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
1331 ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
1332 (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001333
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001334 skb->ip_summed = CHECKSUM_NONE;
1335
1336 /* Rx csum enabled and ip headers found? */
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001337 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001338 return;
1339
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001340 /* did the hardware decode the packet and checksum? */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001341 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001342 return;
1343
1344 /* both known and outer_ip must be set for the below code to work */
1345 if (!(decoded.known && decoded.outer_ip))
1346 return;
1347
1348 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1349 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
1350 ipv4 = true;
1351 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1352 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
1353 ipv6 = true;
1354
1355 if (ipv4 &&
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001356 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1357 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001358 goto checksum_fail;
1359
Jesse Brandeburgddf1d0d2014-02-13 03:48:39 -08001360 /* likely incorrect csum if alternate IP extension headers found */
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001361 if (ipv6 &&
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001362 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001363 /* don't increment checksum err here, non-fatal err */
Shannon Nelson8ee75a82013-12-21 05:44:46 +00001364 return;
1365
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001366 /* there was some L4 error, count error and punt packet to the stack */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001367 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001368 goto checksum_fail;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001369
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001370 /* handle packets that were not able to be checksummed due
1371 * to arrival speed, in this case the stack can compute
1372 * the csum.
1373 */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001374 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001375 return;
1376
1377 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
1378 * it in the driver, hardware does not do it for us.
1379 * Since L3L4P bit was set we assume a valid IHL value (>=5)
1380 * so the total length of IPv4 header is IHL*4 bytes
1381 * The UDP_0 bit *may* bet set if the *inner* header is UDP
1382 */
Anjali Singhai Jain527274c2015-06-05 12:20:31 -04001383 if (!(vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) &&
1384 (ipv4_tunnel)) {
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001385 skb->transport_header = skb->mac_header +
1386 sizeof(struct ethhdr) +
1387 (ip_hdr(skb)->ihl * 4);
1388
1389 /* Add 4 bytes for VLAN tagged packets */
1390 skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
1391 skb->protocol == htons(ETH_P_8021AD))
1392 ? VLAN_HLEN : 0;
1393
Anjali Singhaif6385972014-12-19 02:58:11 +00001394 if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
1395 (udp_hdr(skb)->check != 0)) {
1396 rx_udp_csum = udp_csum(skb);
1397 iph = ip_hdr(skb);
1398 csum = csum_tcpudp_magic(
1399 iph->saddr, iph->daddr,
1400 (skb->len - skb_transport_offset(skb)),
1401 IPPROTO_UDP, rx_udp_csum);
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001402
Anjali Singhaif6385972014-12-19 02:58:11 +00001403 if (udp_hdr(skb)->check != csum)
1404 goto checksum_fail;
1405
1406 } /* else its GRE and so no outer UDP header */
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001407 }
1408
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001409 skb->ip_summed = CHECKSUM_UNNECESSARY;
Tom Herbertfa4ba692014-08-27 21:27:32 -07001410 skb->csum_level = ipv4_tunnel || ipv6_tunnel;
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001411
1412 return;
1413
1414checksum_fail:
1415 vsi->back->hw_csum_rx_error++;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001416}
1417
1418/**
1419 * i40e_rx_hash - returns the hash value from the Rx descriptor
1420 * @ring: descriptor ring
1421 * @rx_desc: specific descriptor
1422 **/
1423static inline u32 i40e_rx_hash(struct i40e_ring *ring,
1424 union i40e_rx_desc *rx_desc)
1425{
Jesse Brandeburg8a494922013-11-20 10:02:49 +00001426 const __le64 rss_mask =
1427 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1428 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1429
1430 if ((ring->netdev->features & NETIF_F_RXHASH) &&
1431 (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
1432 return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1433 else
1434 return 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001435}
1436
1437/**
Jesse Brandeburg206812b2014-02-12 01:45:33 +00001438 * i40e_ptype_to_hash - get a hash type
1439 * @ptype: the ptype value from the descriptor
1440 *
1441 * Returns a hash type to be used by skb_set_hash
1442 **/
1443static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
1444{
1445 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1446
1447 if (!decoded.known)
1448 return PKT_HASH_TYPE_NONE;
1449
1450 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1451 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1452 return PKT_HASH_TYPE_L4;
1453 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1454 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1455 return PKT_HASH_TYPE_L3;
1456 else
1457 return PKT_HASH_TYPE_L2;
1458}
1459
1460/**
Mitch Williamsa132af22015-01-24 09:58:35 +00001461 * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001462 * @rx_ring: rx ring to clean
1463 * @budget: how many cleans we're allowed
1464 *
1465 * Returns true if there's any budget left (e.g. the clean is finished)
1466 **/
Mitch Williamsa132af22015-01-24 09:58:35 +00001467static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001468{
1469 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1470 u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
1471 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
Jiang Liu8dc55622015-08-17 11:19:02 +08001472 const int current_node = numa_mem_id();
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001473 struct i40e_vsi *vsi = rx_ring->vsi;
1474 u16 i = rx_ring->next_to_clean;
1475 union i40e_rx_desc *rx_desc;
1476 u32 rx_error, rx_status;
Jesse Brandeburg206812b2014-02-12 01:45:33 +00001477 u8 rx_ptype;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001478 u64 qword;
1479
Eric W. Biederman390f86d2014-03-14 17:59:10 -07001480 if (budget <= 0)
1481 return 0;
1482
Mitch Williamsa132af22015-01-24 09:58:35 +00001483 do {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001484 struct i40e_rx_buffer *rx_bi;
1485 struct sk_buff *skb;
1486 u16 vlan_tag;
Mitch Williamsa132af22015-01-24 09:58:35 +00001487 /* return some buffers to hardware, one at a time is too slow */
1488 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1489 i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count);
1490 cleaned_count = 0;
1491 }
1492
1493 i = rx_ring->next_to_clean;
1494 rx_desc = I40E_RX_DESC(rx_ring, i);
1495 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1496 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1497 I40E_RXD_QW1_STATUS_SHIFT;
1498
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001499 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
Mitch Williamsa132af22015-01-24 09:58:35 +00001500 break;
1501
1502 /* This memory barrier is needed to keep us from reading
1503 * any other fields out of the rx_desc until we know the
1504 * DD bit is set.
1505 */
Alexander Duyck67317162015-04-08 18:49:43 -07001506 dma_rmb();
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001507 if (i40e_rx_is_programming_status(qword)) {
1508 i40e_clean_programming_status(rx_ring, rx_desc);
Mitch Williamsa132af22015-01-24 09:58:35 +00001509 I40E_RX_INCREMENT(rx_ring, i);
1510 continue;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001511 }
1512 rx_bi = &rx_ring->rx_bi[i];
1513 skb = rx_bi->skb;
Mitch Williamsa132af22015-01-24 09:58:35 +00001514 if (likely(!skb)) {
1515 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1516 rx_ring->rx_hdr_len);
Jesse Brandeburg8b6ed9c2015-03-31 00:45:01 -07001517 if (!skb) {
Mitch Williamsa132af22015-01-24 09:58:35 +00001518 rx_ring->rx_stats.alloc_buff_failed++;
Jesse Brandeburg8b6ed9c2015-03-31 00:45:01 -07001519 break;
1520 }
1521
Mitch Williamsa132af22015-01-24 09:58:35 +00001522 /* initialize queue mapping */
1523 skb_record_rx_queue(skb, rx_ring->queue_index);
1524 /* we are reusing so sync this buffer for CPU use */
1525 dma_sync_single_range_for_cpu(rx_ring->dev,
1526 rx_bi->dma,
1527 0,
1528 rx_ring->rx_hdr_len,
1529 DMA_FROM_DEVICE);
1530 }
Mitch Williams829af3a2013-12-18 13:46:00 +00001531 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1532 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1533 rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
1534 I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
1535 rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
1536 I40E_RXD_QW1_LENGTH_SPH_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001537
Mitch Williams829af3a2013-12-18 13:46:00 +00001538 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1539 I40E_RXD_QW1_ERROR_SHIFT;
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001540 rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1541 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001542
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001543 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1544 I40E_RXD_QW1_PTYPE_SHIFT;
Mitch Williamsa132af22015-01-24 09:58:35 +00001545 prefetch(rx_bi->page);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001546 rx_bi->skb = NULL;
Mitch Williamsa132af22015-01-24 09:58:35 +00001547 cleaned_count++;
1548 if (rx_hbo || rx_sph) {
1549 int len;
Jesse Brandeburg6995b362015-08-28 17:55:54 -04001550
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001551 if (rx_hbo)
1552 len = I40E_RX_HDR_SIZE;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001553 else
Mitch Williamsa132af22015-01-24 09:58:35 +00001554 len = rx_header_len;
1555 memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
1556 } else if (skb->len == 0) {
1557 int len;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001558
Mitch Williamsa132af22015-01-24 09:58:35 +00001559 len = (rx_packet_len > skb_headlen(skb) ?
1560 skb_headlen(skb) : rx_packet_len);
1561 memcpy(__skb_put(skb, len),
1562 rx_bi->page + rx_bi->page_offset,
1563 len);
1564 rx_bi->page_offset += len;
1565 rx_packet_len -= len;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001566 }
1567
1568 /* Get the rest of the data if this was a header split */
Mitch Williamsa132af22015-01-24 09:58:35 +00001569 if (rx_packet_len) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001570 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1571 rx_bi->page,
1572 rx_bi->page_offset,
1573 rx_packet_len);
1574
1575 skb->len += rx_packet_len;
1576 skb->data_len += rx_packet_len;
1577 skb->truesize += rx_packet_len;
1578
1579 if ((page_count(rx_bi->page) == 1) &&
1580 (page_to_nid(rx_bi->page) == current_node))
1581 get_page(rx_bi->page);
1582 else
1583 rx_bi->page = NULL;
1584
1585 dma_unmap_page(rx_ring->dev,
1586 rx_bi->page_dma,
1587 PAGE_SIZE / 2,
1588 DMA_FROM_DEVICE);
1589 rx_bi->page_dma = 0;
1590 }
Mitch Williamsa132af22015-01-24 09:58:35 +00001591 I40E_RX_INCREMENT(rx_ring, i);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001592
1593 if (unlikely(
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001594 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001595 struct i40e_rx_buffer *next_buffer;
1596
1597 next_buffer = &rx_ring->rx_bi[i];
Mitch Williamsa132af22015-01-24 09:58:35 +00001598 next_buffer->skb = skb;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001599 rx_ring->rx_stats.non_eop_descs++;
Mitch Williamsa132af22015-01-24 09:58:35 +00001600 continue;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001601 }
1602
1603 /* ERR_MASK will only have valid bits if EOP set */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001604 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001605 dev_kfree_skb_any(skb);
Mitch Williamsa132af22015-01-24 09:58:35 +00001606 continue;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001607 }
1608
Jesse Brandeburg206812b2014-02-12 01:45:33 +00001609 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1610 i40e_ptype_to_hash(rx_ptype));
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00001611 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1612 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1613 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1614 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1615 rx_ring->last_rx_timestamp = jiffies;
1616 }
1617
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001618 /* probably a little skewed due to removing CRC */
1619 total_rx_bytes += skb->len;
1620 total_rx_packets++;
1621
1622 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001623
1624 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1625
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001626 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001627 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1628 : 0;
Vasu Dev38e00432014-08-01 13:27:03 -07001629#ifdef I40E_FCOE
1630 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1631 dev_kfree_skb_any(skb);
Mitch Williamsa132af22015-01-24 09:58:35 +00001632 continue;
Vasu Dev38e00432014-08-01 13:27:03 -07001633 }
1634#endif
Mitch Williamsa132af22015-01-24 09:58:35 +00001635 skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001636 i40e_receive_skb(rx_ring, skb, vlan_tag);
1637
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001638 rx_desc->wb.qword1.status_error_len = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001639
Mitch Williamsa132af22015-01-24 09:58:35 +00001640 } while (likely(total_rx_packets < budget));
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001641
Alexander Duyck980e9b12013-09-28 06:01:03 +00001642 u64_stats_update_begin(&rx_ring->syncp);
Alexander Duycka114d0a2013-09-28 06:00:43 +00001643 rx_ring->stats.packets += total_rx_packets;
1644 rx_ring->stats.bytes += total_rx_bytes;
Alexander Duyck980e9b12013-09-28 06:01:03 +00001645 u64_stats_update_end(&rx_ring->syncp);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001646 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1647 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1648
Mitch Williamsa132af22015-01-24 09:58:35 +00001649 return total_rx_packets;
1650}
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001651
Mitch Williamsa132af22015-01-24 09:58:35 +00001652/**
1653 * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
1654 * @rx_ring: rx ring to clean
1655 * @budget: how many cleans we're allowed
1656 *
1657 * Returns number of packets cleaned
1658 **/
1659static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
1660{
1661 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1662 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1663 struct i40e_vsi *vsi = rx_ring->vsi;
1664 union i40e_rx_desc *rx_desc;
1665 u32 rx_error, rx_status;
1666 u16 rx_packet_len;
1667 u8 rx_ptype;
1668 u64 qword;
1669 u16 i;
1670
1671 do {
1672 struct i40e_rx_buffer *rx_bi;
1673 struct sk_buff *skb;
1674 u16 vlan_tag;
1675 /* return some buffers to hardware, one at a time is too slow */
1676 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1677 i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count);
1678 cleaned_count = 0;
1679 }
1680
1681 i = rx_ring->next_to_clean;
1682 rx_desc = I40E_RX_DESC(rx_ring, i);
1683 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1684 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1685 I40E_RXD_QW1_STATUS_SHIFT;
1686
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001687 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
Mitch Williamsa132af22015-01-24 09:58:35 +00001688 break;
1689
1690 /* This memory barrier is needed to keep us from reading
1691 * any other fields out of the rx_desc until we know the
1692 * DD bit is set.
1693 */
Alexander Duyck67317162015-04-08 18:49:43 -07001694 dma_rmb();
Mitch Williamsa132af22015-01-24 09:58:35 +00001695
1696 if (i40e_rx_is_programming_status(qword)) {
1697 i40e_clean_programming_status(rx_ring, rx_desc);
1698 I40E_RX_INCREMENT(rx_ring, i);
1699 continue;
1700 }
1701 rx_bi = &rx_ring->rx_bi[i];
1702 skb = rx_bi->skb;
1703 prefetch(skb->data);
1704
1705 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1706 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1707
1708 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1709 I40E_RXD_QW1_ERROR_SHIFT;
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001710 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
Mitch Williamsa132af22015-01-24 09:58:35 +00001711
1712 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1713 I40E_RXD_QW1_PTYPE_SHIFT;
1714 rx_bi->skb = NULL;
1715 cleaned_count++;
1716
1717 /* Get the header and possibly the whole packet
1718 * If this is an skb from previous receive dma will be 0
1719 */
1720 skb_put(skb, rx_packet_len);
1721 dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
1722 DMA_FROM_DEVICE);
1723 rx_bi->dma = 0;
1724
1725 I40E_RX_INCREMENT(rx_ring, i);
1726
1727 if (unlikely(
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001728 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
Mitch Williamsa132af22015-01-24 09:58:35 +00001729 rx_ring->rx_stats.non_eop_descs++;
1730 continue;
1731 }
1732
1733 /* ERR_MASK will only have valid bits if EOP set */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001734 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
Mitch Williamsa132af22015-01-24 09:58:35 +00001735 dev_kfree_skb_any(skb);
Mitch Williamsa132af22015-01-24 09:58:35 +00001736 continue;
1737 }
1738
1739 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1740 i40e_ptype_to_hash(rx_ptype));
1741 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1742 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1743 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1744 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1745 rx_ring->last_rx_timestamp = jiffies;
1746 }
1747
1748 /* probably a little skewed due to removing CRC */
1749 total_rx_bytes += skb->len;
1750 total_rx_packets++;
1751
1752 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1753
1754 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1755
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001756 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
Mitch Williamsa132af22015-01-24 09:58:35 +00001757 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1758 : 0;
1759#ifdef I40E_FCOE
1760 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1761 dev_kfree_skb_any(skb);
1762 continue;
1763 }
1764#endif
1765 i40e_receive_skb(rx_ring, skb, vlan_tag);
1766
Mitch Williamsa132af22015-01-24 09:58:35 +00001767 rx_desc->wb.qword1.status_error_len = 0;
1768 } while (likely(total_rx_packets < budget));
1769
1770 u64_stats_update_begin(&rx_ring->syncp);
1771 rx_ring->stats.packets += total_rx_packets;
1772 rx_ring->stats.bytes += total_rx_bytes;
1773 u64_stats_update_end(&rx_ring->syncp);
1774 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1775 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1776
1777 return total_rx_packets;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001778}
1779
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001780static u32 i40e_buildreg_itr(const int type, const u16 itr)
1781{
1782 u32 val;
1783
1784 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1785 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1786 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1787 (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
1788
1789 return val;
1790}
1791
1792/* a small macro to shorten up some long lines */
1793#define INTREG I40E_PFINT_DYN_CTLN
1794
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001795/**
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001796 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
1797 * @vsi: the VSI we care about
1798 * @q_vector: q_vector for which itr is being updated and interrupt enabled
1799 *
1800 **/
1801static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
1802 struct i40e_q_vector *q_vector)
1803{
1804 struct i40e_hw *hw = &vsi->back->hw;
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001805 bool rx = false, tx = false;
1806 u32 rxval, txval;
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001807 int vector;
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001808
1809 vector = (q_vector->v_idx + vsi->base_vector);
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001810
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001811 /* avoid dynamic calculation if in countdown mode OR if
1812 * all dynamic is disabled
1813 */
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001814 rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
1815
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001816 if (q_vector->itr_countdown > 0 ||
1817 (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) &&
1818 !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) {
1819 goto enable_int;
1820 }
1821
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001822 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001823 rx = i40e_set_new_dynamic_itr(&q_vector->rx);
1824 rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001825 }
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001826
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001827 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001828 tx = i40e_set_new_dynamic_itr(&q_vector->tx);
1829 txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001830 }
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001831
1832 if (rx || tx) {
1833 /* get the higher of the two ITR adjustments and
1834 * use the same value for both ITR registers
1835 * when in adaptive mode (Rx and/or Tx)
1836 */
1837 u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
1838
1839 q_vector->tx.itr = q_vector->rx.itr = itr;
1840 txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
1841 tx = true;
1842 rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
1843 rx = true;
1844 }
1845
1846 /* only need to enable the interrupt once, but need
1847 * to possibly update both ITR values
1848 */
1849 if (rx) {
1850 /* set the INTENA_MSK_MASK so that this first write
1851 * won't actually enable the interrupt, instead just
1852 * updating the ITR (it's bit 31 PF and VF)
1853 */
1854 rxval |= BIT(31);
1855 /* don't check _DOWN because interrupt isn't being enabled */
1856 wr32(hw, INTREG(vector - 1), rxval);
1857 }
1858
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001859enable_int:
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001860 if (!test_bit(__I40E_DOWN, &vsi->state))
1861 wr32(hw, INTREG(vector - 1), txval);
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001862
1863 if (q_vector->itr_countdown)
1864 q_vector->itr_countdown--;
1865 else
1866 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1867
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001868}
1869
1870/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001871 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1872 * @napi: napi struct with our devices info in it
1873 * @budget: amount of work driver is allowed to do this pass, in packets
1874 *
1875 * This function will clean all queues associated with a q_vector.
1876 *
1877 * Returns the amount of work done
1878 **/
1879int i40e_napi_poll(struct napi_struct *napi, int budget)
1880{
1881 struct i40e_q_vector *q_vector =
1882 container_of(napi, struct i40e_q_vector, napi);
1883 struct i40e_vsi *vsi = q_vector->vsi;
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001884 struct i40e_ring *ring;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001885 bool clean_complete = true;
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001886 bool arm_wb = false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001887 int budget_per_ring;
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07001888 int work_done = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001889
1890 if (test_bit(__I40E_DOWN, &vsi->state)) {
1891 napi_complete(napi);
1892 return 0;
1893 }
1894
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001895 /* Since the actual Tx work is minimal, we can give the Tx a larger
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001896 * budget and be more aggressive about cleaning up the Tx descriptors.
1897 */
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001898 i40e_for_each_ring(ring, q_vector->tx) {
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001899 clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001900 arm_wb |= ring->arm_wb;
Jesse Brandeburg0deda862015-07-23 16:54:34 -04001901 ring->arm_wb = false;
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001902 }
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001903
Alexander Duyckc67cace2015-09-24 09:04:26 -07001904 /* Handle case where we are called by netpoll with a budget of 0 */
1905 if (budget <= 0)
1906 goto tx_only;
1907
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001908 /* We attempt to distribute budget to each Rx queue fairly, but don't
1909 * allow the budget to go below 1 because that would exit polling early.
1910 */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001911 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001912
Mitch Williamsa132af22015-01-24 09:58:35 +00001913 i40e_for_each_ring(ring, q_vector->rx) {
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07001914 int cleaned;
1915
Mitch Williamsa132af22015-01-24 09:58:35 +00001916 if (ring_is_ps_enabled(ring))
1917 cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
1918 else
1919 cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07001920
1921 work_done += cleaned;
Mitch Williamsa132af22015-01-24 09:58:35 +00001922 /* if we didn't clean as many as budgeted, we must be done */
1923 clean_complete &= (budget_per_ring != cleaned);
1924 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001925
1926 /* If work not completed, return budget and polling will return */
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001927 if (!clean_complete) {
Alexander Duyckc67cace2015-09-24 09:04:26 -07001928tx_only:
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001929 if (arm_wb)
1930 i40e_force_wb(vsi, q_vector);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001931 return budget;
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001932 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001933
Anjali Singhai Jain8e0764b2015-06-05 12:20:30 -04001934 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
1935 q_vector->arm_wb_state = false;
1936
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001937 /* Work is done so exit the polling mode and re-enable the interrupt */
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07001938 napi_complete_done(napi, work_done);
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001939 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
1940 i40e_update_enable_itr(vsi, q_vector);
1941 } else { /* Legacy mode */
1942 struct i40e_hw *hw = &vsi->back->hw;
1943 /* We re-enable the queue 0 cause, but
1944 * don't worry about dynamic_enable
1945 * because we left it on for the other
1946 * possible interrupts during napi
1947 */
1948 u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) |
1949 I40E_QINT_RQCTL_CAUSE_ENA_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001950
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001951 wr32(hw, I40E_QINT_RQCTL(0), qval);
1952 qval = rd32(hw, I40E_QINT_TQCTL(0)) |
1953 I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1954 wr32(hw, I40E_QINT_TQCTL(0), qval);
1955 i40e_irq_dynamic_enable_icr0(vsi->back);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001956 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001957 return 0;
1958}
1959
1960/**
1961 * i40e_atr - Add a Flow Director ATR filter
1962 * @tx_ring: ring to add programming descriptor to
1963 * @skb: send buffer
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04001964 * @tx_flags: send tx flags
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001965 * @protocol: wire protocol
1966 **/
1967static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04001968 u32 tx_flags, __be16 protocol)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001969{
1970 struct i40e_filter_program_desc *fdir_desc;
1971 struct i40e_pf *pf = tx_ring->vsi->back;
1972 union {
1973 unsigned char *network;
1974 struct iphdr *ipv4;
1975 struct ipv6hdr *ipv6;
1976 } hdr;
1977 struct tcphdr *th;
1978 unsigned int hlen;
1979 u32 flex_ptype, dtype_cmd;
Alexander Duyckfc4ac672013-09-28 06:00:22 +00001980 u16 i;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001981
1982 /* make sure ATR is enabled */
Jesse Brandeburg60ea5f82014-01-17 15:36:34 -08001983 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001984 return;
1985
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00001986 if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1987 return;
1988
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001989 /* if sampling is disabled do nothing */
1990 if (!tx_ring->atr_sample_rate)
1991 return;
1992
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04001993 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001994 return;
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04001995
1996 if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) {
1997 /* snag network header to get L4 type and address */
1998 hdr.network = skb_network_header(skb);
1999
2000 /* Currently only IPv4/IPv6 with TCP is supported
2001 * access ihl as u8 to avoid unaligned access on ia64
2002 */
2003 if (tx_flags & I40E_TX_FLAGS_IPV4)
2004 hlen = (hdr.network[0] & 0x0F) << 2;
2005 else if (protocol == htons(ETH_P_IPV6))
2006 hlen = sizeof(struct ipv6hdr);
2007 else
2008 return;
2009 } else {
2010 hdr.network = skb_inner_network_header(skb);
2011 hlen = skb_inner_network_header_len(skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002012 }
2013
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002014 /* Currently only IPv4/IPv6 with TCP is supported
2015 * Note: tx_flags gets modified to reflect inner protocols in
2016 * tx_enable_csum function if encap is enabled.
2017 */
2018 if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
2019 (hdr.ipv4->protocol != IPPROTO_TCP))
2020 return;
2021 else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
2022 (hdr.ipv6->nexthdr != IPPROTO_TCP))
2023 return;
2024
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002025 th = (struct tcphdr *)(hdr.network + hlen);
2026
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00002027 /* Due to lack of space, no more new filters can be programmed */
2028 if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
2029 return;
Anjali Singhai Jain52eb95e2015-06-05 12:20:33 -04002030 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) {
2031 /* HW ATR eviction will take care of removing filters on FIN
2032 * and RST packets.
2033 */
2034 if (th->fin || th->rst)
2035 return;
2036 }
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00002037
2038 tx_ring->atr_count++;
2039
Anjali Singhai Jaince806782014-03-06 08:59:54 +00002040 /* sample on all syn/fin/rst packets or once every atr sample rate */
2041 if (!th->fin &&
2042 !th->syn &&
2043 !th->rst &&
2044 (tx_ring->atr_count < tx_ring->atr_sample_rate))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002045 return;
2046
2047 tx_ring->atr_count = 0;
2048
2049 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002050 i = tx_ring->next_to_use;
2051 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2052
2053 i++;
2054 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002055
2056 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2057 I40E_TXD_FLTR_QW0_QINDEX_MASK;
2058 flex_ptype |= (protocol == htons(ETH_P_IP)) ?
2059 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2060 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2061 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2062 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2063
2064 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2065
2066 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2067
Anjali Singhai Jaince806782014-03-06 08:59:54 +00002068 dtype_cmd |= (th->fin || th->rst) ?
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002069 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2070 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2071 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2072 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2073
2074 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2075 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2076
2077 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2078 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2079
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +00002080 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
Anjali Singhai Jain60ccd452015-04-16 20:06:01 -04002081 if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL))
2082 dtype_cmd |=
2083 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2084 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2085 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2086 else
2087 dtype_cmd |=
2088 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2089 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2090 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +00002091
Anjali Singhai Jain52eb95e2015-06-05 12:20:33 -04002092 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
2093 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2094
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002095 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
Jesse Brandeburg99753ea2014-06-04 04:22:49 +00002096 fdir_desc->rsvd = cpu_to_le32(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002097 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
Jesse Brandeburg99753ea2014-06-04 04:22:49 +00002098 fdir_desc->fd_id = cpu_to_le32(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002099}
2100
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002101/**
2102 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2103 * @skb: send buffer
2104 * @tx_ring: ring to send buffer on
2105 * @flags: the tx flags to be set
2106 *
2107 * Checks the skb and set up correspondingly several generic transmit flags
2108 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2109 *
2110 * Returns error code indicate the frame should be dropped upon error and the
2111 * otherwise returns 0 to indicate the flags has been set properly.
2112 **/
Vasu Dev38e00432014-08-01 13:27:03 -07002113#ifdef I40E_FCOE
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002114inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002115 struct i40e_ring *tx_ring,
2116 u32 *flags)
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002117#else
2118static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2119 struct i40e_ring *tx_ring,
2120 u32 *flags)
Vasu Dev38e00432014-08-01 13:27:03 -07002121#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002122{
2123 __be16 protocol = skb->protocol;
2124 u32 tx_flags = 0;
2125
Greg Rose31eaacc2015-03-31 00:45:03 -07002126 if (protocol == htons(ETH_P_8021Q) &&
2127 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2128 /* When HW VLAN acceleration is turned off by the user the
2129 * stack sets the protocol to 8021q so that the driver
2130 * can take any steps required to support the SW only
2131 * VLAN handling. In our case the driver doesn't need
2132 * to take any further steps so just set the protocol
2133 * to the encapsulated ethertype.
2134 */
2135 skb->protocol = vlan_get_protocol(skb);
2136 goto out;
2137 }
2138
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002139 /* if we have a HW VLAN tag being added, default to the HW one */
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01002140 if (skb_vlan_tag_present(skb)) {
2141 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002142 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2143 /* else if it is a SW VLAN, check the next protocol and store the tag */
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00002144 } else if (protocol == htons(ETH_P_8021Q)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002145 struct vlan_hdr *vhdr, _vhdr;
Jesse Brandeburg6995b362015-08-28 17:55:54 -04002146
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002147 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2148 if (!vhdr)
2149 return -EINVAL;
2150
2151 protocol = vhdr->h_vlan_encapsulated_proto;
2152 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2153 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2154 }
2155
Neerav Parikhd40d00b2015-02-24 06:58:40 +00002156 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2157 goto out;
2158
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002159 /* Insert 802.1p priority into VLAN header */
Vasu Dev38e00432014-08-01 13:27:03 -07002160 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2161 (skb->priority != TC_PRIO_CONTROL)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002162 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2163 tx_flags |= (skb->priority & 0x7) <<
2164 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2165 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2166 struct vlan_ethhdr *vhdr;
Francois Romieudd225bc2014-03-30 03:14:48 +00002167 int rc;
2168
2169 rc = skb_cow_head(skb, 0);
2170 if (rc < 0)
2171 return rc;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002172 vhdr = (struct vlan_ethhdr *)skb->data;
2173 vhdr->h_vlan_TCI = htons(tx_flags >>
2174 I40E_TX_FLAGS_VLAN_SHIFT);
2175 } else {
2176 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2177 }
2178 }
Neerav Parikhd40d00b2015-02-24 06:58:40 +00002179
2180out:
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002181 *flags = tx_flags;
2182 return 0;
2183}
2184
2185/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002186 * i40e_tso - set up the tso context descriptor
2187 * @tx_ring: ptr to the ring to send
2188 * @skb: ptr to the skb we're sending
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002189 * @hdr_len: ptr to the size of the packet header
Jean Sacren554f4542015-10-13 01:06:28 -06002190 * @cd_type_cmd_tso_mss: ptr to u64 object
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002191 * @cd_tunneling: ptr to context descriptor bits
2192 *
2193 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2194 **/
2195static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002196 u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
2197 u32 *cd_tunneling)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002198{
2199 u32 cd_cmd, cd_tso_len, cd_mss;
Francois Romieudd225bc2014-03-30 03:14:48 +00002200 struct ipv6hdr *ipv6h;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002201 struct tcphdr *tcph;
2202 struct iphdr *iph;
2203 u32 l4len;
2204 int err;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002205
2206 if (!skb_is_gso(skb))
2207 return 0;
2208
Francois Romieudd225bc2014-03-30 03:14:48 +00002209 err = skb_cow_head(skb, 0);
2210 if (err < 0)
2211 return err;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002212
Anjali Singhaidf230752014-12-19 02:58:16 +00002213 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
2214 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
2215
2216 if (iph->version == 4) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002217 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
2218 iph->tot_len = 0;
2219 iph->check = 0;
2220 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
2221 0, IPPROTO_TCP, 0);
Anjali Singhaidf230752014-12-19 02:58:16 +00002222 } else if (ipv6h->version == 6) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002223 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
2224 ipv6h->payload_len = 0;
2225 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
2226 0, IPPROTO_TCP, 0);
2227 }
2228
2229 l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
2230 *hdr_len = (skb->encapsulation
2231 ? (skb_inner_transport_header(skb) - skb->data)
2232 : skb_transport_offset(skb)) + l4len;
2233
2234 /* find the field values */
2235 cd_cmd = I40E_TX_CTX_DESC_TSO;
2236 cd_tso_len = skb->len - *hdr_len;
2237 cd_mss = skb_shinfo(skb)->gso_size;
Mitch Williams829af3a2013-12-18 13:46:00 +00002238 *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2239 ((u64)cd_tso_len <<
2240 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2241 ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002242 return 1;
2243}
2244
2245/**
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002246 * i40e_tsyn - set up the tsyn context descriptor
2247 * @tx_ring: ptr to the ring to send
2248 * @skb: ptr to the skb we're sending
2249 * @tx_flags: the collected send information
Jean Sacren554f4542015-10-13 01:06:28 -06002250 * @cd_type_cmd_tso_mss: ptr to u64 object
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002251 *
2252 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2253 **/
2254static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2255 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2256{
2257 struct i40e_pf *pf;
2258
2259 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2260 return 0;
2261
2262 /* Tx timestamps cannot be sampled when doing TSO */
2263 if (tx_flags & I40E_TX_FLAGS_TSO)
2264 return 0;
2265
2266 /* only timestamp the outbound packet if the user has requested it and
2267 * we are not already transmitting a packet to be timestamped
2268 */
2269 pf = i40e_netdev_to_pf(tx_ring->netdev);
Jacob Keller22b47772014-12-14 01:55:09 +00002270 if (!(pf->flags & I40E_FLAG_PTP))
2271 return 0;
2272
Jakub Kicinski9ce34f02014-03-15 14:55:42 +00002273 if (pf->ptp_tx &&
2274 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002275 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2276 pf->ptp_tx_skb = skb_get(skb);
2277 } else {
2278 return 0;
2279 }
2280
2281 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
2282 I40E_TXD_CTX_QW1_CMD_SHIFT;
2283
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002284 return 1;
2285}
2286
2287/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002288 * i40e_tx_enable_csum - Enable Tx checksum offloads
2289 * @skb: send buffer
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002290 * @tx_flags: pointer to Tx flags currently set
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002291 * @td_cmd: Tx descriptor command bits to set
2292 * @td_offset: Tx descriptor header offsets to set
Jean Sacren554f4542015-10-13 01:06:28 -06002293 * @tx_ring: Tx descriptor ring
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002294 * @cd_tunneling: ptr to context desc bits
2295 **/
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002296static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002297 u32 *td_cmd, u32 *td_offset,
2298 struct i40e_ring *tx_ring,
2299 u32 *cd_tunneling)
2300{
2301 struct ipv6hdr *this_ipv6_hdr;
2302 unsigned int this_tcp_hdrlen;
2303 struct iphdr *this_ip_hdr;
2304 u32 network_hdr_len;
2305 u8 l4_hdr = 0;
Anjali Singhai Jain527274c2015-06-05 12:20:31 -04002306 struct udphdr *oudph;
2307 struct iphdr *oiph;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002308 u32 l4_tunnel = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002309
2310 if (skb->encapsulation) {
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002311 switch (ip_hdr(skb)->protocol) {
2312 case IPPROTO_UDP:
Anjali Singhai Jain527274c2015-06-05 12:20:31 -04002313 oudph = udp_hdr(skb);
2314 oiph = ip_hdr(skb);
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002315 l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002316 *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002317 break;
Shannon Nelsonc1d17912015-09-25 19:26:04 +00002318 case IPPROTO_GRE:
2319 l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING;
2320 break;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002321 default:
2322 return;
2323 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002324 network_hdr_len = skb_inner_network_header_len(skb);
2325 this_ip_hdr = inner_ip_hdr(skb);
2326 this_ipv6_hdr = inner_ipv6_hdr(skb);
2327 this_tcp_hdrlen = inner_tcp_hdrlen(skb);
2328
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002329 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2330 if (*tx_flags & I40E_TX_FLAGS_TSO) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002331 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
2332 ip_hdr(skb)->check = 0;
2333 } else {
2334 *cd_tunneling |=
2335 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2336 }
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002337 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
Anjali Singhaidf230752014-12-19 02:58:16 +00002338 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002339 if (*tx_flags & I40E_TX_FLAGS_TSO)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002340 ip_hdr(skb)->check = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002341 }
2342
2343 /* Now set the ctx descriptor fields */
2344 *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002345 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
2346 l4_tunnel |
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002347 ((skb_inner_network_offset(skb) -
2348 skb_transport_offset(skb)) >> 1) <<
2349 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
Anjali Singhaidf230752014-12-19 02:58:16 +00002350 if (this_ip_hdr->version == 6) {
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002351 *tx_flags &= ~I40E_TX_FLAGS_IPV4;
2352 *tx_flags |= I40E_TX_FLAGS_IPV6;
Anjali Singhaidf230752014-12-19 02:58:16 +00002353 }
Anjali Singhai Jain527274c2015-06-05 12:20:31 -04002354 if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
2355 (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) &&
2356 (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
2357 oudph->check = ~csum_tcpudp_magic(oiph->saddr,
2358 oiph->daddr,
2359 (skb->len - skb_transport_offset(skb)),
2360 IPPROTO_UDP, 0);
2361 *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2362 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002363 } else {
2364 network_hdr_len = skb_network_header_len(skb);
2365 this_ip_hdr = ip_hdr(skb);
2366 this_ipv6_hdr = ipv6_hdr(skb);
2367 this_tcp_hdrlen = tcp_hdrlen(skb);
2368 }
2369
2370 /* Enable IP checksum offloads */
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002371 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002372 l4_hdr = this_ip_hdr->protocol;
2373 /* the stack computes the IP header already, the only time we
2374 * need the hardware to recompute it is in the case of TSO.
2375 */
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002376 if (*tx_flags & I40E_TX_FLAGS_TSO) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002377 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
2378 this_ip_hdr->check = 0;
2379 } else {
2380 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
2381 }
2382 /* Now set the td_offset for IP header length */
2383 *td_offset = (network_hdr_len >> 2) <<
2384 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002385 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002386 l4_hdr = this_ipv6_hdr->nexthdr;
2387 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
2388 /* Now set the td_offset for IP header length */
2389 *td_offset = (network_hdr_len >> 2) <<
2390 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2391 }
2392 /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
2393 *td_offset |= (skb_network_offset(skb) >> 1) <<
2394 I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
2395
2396 /* Enable L4 checksum offloads */
2397 switch (l4_hdr) {
2398 case IPPROTO_TCP:
2399 /* enable checksum offloads */
2400 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
2401 *td_offset |= (this_tcp_hdrlen >> 2) <<
2402 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2403 break;
2404 case IPPROTO_SCTP:
2405 /* enable SCTP checksum offload */
2406 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
2407 *td_offset |= (sizeof(struct sctphdr) >> 2) <<
2408 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2409 break;
2410 case IPPROTO_UDP:
2411 /* enable UDP checksum offload */
2412 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
2413 *td_offset |= (sizeof(struct udphdr) >> 2) <<
2414 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2415 break;
2416 default:
2417 break;
2418 }
2419}
2420
2421/**
2422 * i40e_create_tx_ctx Build the Tx context descriptor
2423 * @tx_ring: ring to create the descriptor on
2424 * @cd_type_cmd_tso_mss: Quad Word 1
2425 * @cd_tunneling: Quad Word 0 - bits 0-31
2426 * @cd_l2tag2: Quad Word 0 - bits 32-63
2427 **/
2428static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
2429 const u64 cd_type_cmd_tso_mss,
2430 const u32 cd_tunneling, const u32 cd_l2tag2)
2431{
2432 struct i40e_tx_context_desc *context_desc;
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002433 int i = tx_ring->next_to_use;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002434
Jesse Brandeburgff40dd52014-02-14 02:14:41 +00002435 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
2436 !cd_tunneling && !cd_l2tag2)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002437 return;
2438
2439 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002440 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
2441
2442 i++;
2443 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002444
2445 /* cpu_to_le32 and assign to struct fields */
2446 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2447 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
Jesse Brandeburg3efbbb22014-06-04 20:41:54 +00002448 context_desc->rsvd = cpu_to_le16(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002449 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2450}
2451
2452/**
Eric Dumazet4567dc12014-10-07 13:30:23 -07002453 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2454 * @tx_ring: the ring to be checked
2455 * @size: the size buffer we want to assure is available
2456 *
2457 * Returns -EBUSY if a stop is needed, else 0
2458 **/
2459static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2460{
2461 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2462 /* Memory barrier before checking head and tail */
2463 smp_mb();
2464
2465 /* Check again in a case another CPU has just made room available. */
2466 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2467 return -EBUSY;
2468
2469 /* A reprieve! - use start_queue because it doesn't call schedule */
2470 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2471 ++tx_ring->tx_stats.restart_queue;
2472 return 0;
2473}
2474
2475/**
2476 * i40e_maybe_stop_tx - 1st level check for tx stop conditions
2477 * @tx_ring: the ring to be checked
2478 * @size: the size buffer we want to assure is available
2479 *
2480 * Returns 0 if stop is not needed
2481 **/
2482#ifdef I40E_FCOE
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002483inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
Eric Dumazet4567dc12014-10-07 13:30:23 -07002484#else
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002485static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
Eric Dumazet4567dc12014-10-07 13:30:23 -07002486#endif
2487{
2488 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
2489 return 0;
2490 return __i40e_maybe_stop_tx(tx_ring, size);
2491}
2492
2493/**
Anjali Singhai71da6192015-02-21 06:42:35 +00002494 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
2495 * @skb: send buffer
2496 * @tx_flags: collected send information
Anjali Singhai71da6192015-02-21 06:42:35 +00002497 *
2498 * Note: Our HW can't scatter-gather more than 8 fragments to build
2499 * a packet on the wire and so we need to figure out the cases where we
2500 * need to linearize the skb.
2501 **/
Anjali Singhai Jain30520832015-05-08 15:35:52 -07002502static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
Anjali Singhai71da6192015-02-21 06:42:35 +00002503{
2504 struct skb_frag_struct *frag;
2505 bool linearize = false;
2506 unsigned int size = 0;
2507 u16 num_frags;
2508 u16 gso_segs;
2509
2510 num_frags = skb_shinfo(skb)->nr_frags;
2511 gso_segs = skb_shinfo(skb)->gso_segs;
2512
2513 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
Anjali Singhai Jain30520832015-05-08 15:35:52 -07002514 u16 j = 0;
Anjali Singhai71da6192015-02-21 06:42:35 +00002515
2516 if (num_frags < (I40E_MAX_BUFFER_TXD))
2517 goto linearize_chk_done;
2518 /* try the simple math, if we have too many frags per segment */
2519 if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
2520 I40E_MAX_BUFFER_TXD) {
2521 linearize = true;
2522 goto linearize_chk_done;
2523 }
2524 frag = &skb_shinfo(skb)->frags[0];
Anjali Singhai71da6192015-02-21 06:42:35 +00002525 /* we might still have more fragments per segment */
2526 do {
2527 size += skb_frag_size(frag);
2528 frag++; j++;
Anjali Singhai Jain30520832015-05-08 15:35:52 -07002529 if ((size >= skb_shinfo(skb)->gso_size) &&
2530 (j < I40E_MAX_BUFFER_TXD)) {
2531 size = (size % skb_shinfo(skb)->gso_size);
2532 j = (size) ? 1 : 0;
2533 }
Anjali Singhai71da6192015-02-21 06:42:35 +00002534 if (j == I40E_MAX_BUFFER_TXD) {
Anjali Singhai Jain30520832015-05-08 15:35:52 -07002535 linearize = true;
2536 break;
Anjali Singhai71da6192015-02-21 06:42:35 +00002537 }
2538 num_frags--;
2539 } while (num_frags);
2540 } else {
2541 if (num_frags >= I40E_MAX_BUFFER_TXD)
2542 linearize = true;
2543 }
2544
2545linearize_chk_done:
2546 return linearize;
2547}
2548
2549/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002550 * i40e_tx_map - Build the Tx descriptor
2551 * @tx_ring: ring to send buffer on
2552 * @skb: send buffer
2553 * @first: first buffer info buffer to use
2554 * @tx_flags: collected send information
2555 * @hdr_len: size of the packet header
2556 * @td_cmd: the command field in the descriptor
2557 * @td_offset: offset for checksum or crc
2558 **/
Vasu Dev38e00432014-08-01 13:27:03 -07002559#ifdef I40E_FCOE
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002560inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002561 struct i40e_tx_buffer *first, u32 tx_flags,
2562 const u8 hdr_len, u32 td_cmd, u32 td_offset)
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002563#else
2564static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2565 struct i40e_tx_buffer *first, u32 tx_flags,
2566 const u8 hdr_len, u32 td_cmd, u32 td_offset)
Vasu Dev38e00432014-08-01 13:27:03 -07002567#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002568{
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002569 unsigned int data_len = skb->data_len;
2570 unsigned int size = skb_headlen(skb);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002571 struct skb_frag_struct *frag;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002572 struct i40e_tx_buffer *tx_bi;
2573 struct i40e_tx_desc *tx_desc;
Alexander Duycka5e9c572013-09-28 06:00:27 +00002574 u16 i = tx_ring->next_to_use;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002575 u32 td_tag = 0;
2576 dma_addr_t dma;
2577 u16 gso_segs;
Anjali Singhai58044742015-09-25 18:26:13 -07002578 u16 desc_count = 0;
2579 bool tail_bump = true;
2580 bool do_rs = false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002581
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002582 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
2583 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
2584 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
2585 I40E_TX_FLAGS_VLAN_SHIFT;
2586 }
2587
Alexander Duycka5e9c572013-09-28 06:00:27 +00002588 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
2589 gso_segs = skb_shinfo(skb)->gso_segs;
2590 else
2591 gso_segs = 1;
2592
2593 /* multiply data chunks by size of headers */
2594 first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
2595 first->gso_segs = gso_segs;
2596 first->skb = skb;
2597 first->tx_flags = tx_flags;
2598
2599 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2600
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002601 tx_desc = I40E_TX_DESC(tx_ring, i);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002602 tx_bi = first;
2603
2604 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2605 if (dma_mapping_error(tx_ring->dev, dma))
2606 goto dma_error;
2607
2608 /* record length, and DMA address */
2609 dma_unmap_len_set(tx_bi, len, size);
2610 dma_unmap_addr_set(tx_bi, dma, dma);
2611
2612 tx_desc->buffer_addr = cpu_to_le64(dma);
2613
2614 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002615 tx_desc->cmd_type_offset_bsz =
2616 build_ctob(td_cmd, td_offset,
2617 I40E_MAX_DATA_PER_TXD, td_tag);
2618
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002619 tx_desc++;
2620 i++;
Anjali Singhai58044742015-09-25 18:26:13 -07002621 desc_count++;
2622
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002623 if (i == tx_ring->count) {
2624 tx_desc = I40E_TX_DESC(tx_ring, 0);
2625 i = 0;
2626 }
Alexander Duycka5e9c572013-09-28 06:00:27 +00002627
2628 dma += I40E_MAX_DATA_PER_TXD;
2629 size -= I40E_MAX_DATA_PER_TXD;
2630
2631 tx_desc->buffer_addr = cpu_to_le64(dma);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002632 }
2633
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002634 if (likely(!data_len))
2635 break;
2636
Alexander Duycka5e9c572013-09-28 06:00:27 +00002637 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2638 size, td_tag);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002639
2640 tx_desc++;
2641 i++;
Anjali Singhai58044742015-09-25 18:26:13 -07002642 desc_count++;
2643
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002644 if (i == tx_ring->count) {
2645 tx_desc = I40E_TX_DESC(tx_ring, 0);
2646 i = 0;
2647 }
2648
Alexander Duycka5e9c572013-09-28 06:00:27 +00002649 size = skb_frag_size(frag);
2650 data_len -= size;
2651
2652 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2653 DMA_TO_DEVICE);
2654
2655 tx_bi = &tx_ring->tx_bi[i];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002656 }
2657
Alexander Duycka5e9c572013-09-28 06:00:27 +00002658 /* set next_to_watch value indicating a packet is present */
2659 first->next_to_watch = tx_desc;
2660
2661 i++;
2662 if (i == tx_ring->count)
2663 i = 0;
2664
2665 tx_ring->next_to_use = i;
2666
Anjali Singhai58044742015-09-25 18:26:13 -07002667 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
2668 tx_ring->queue_index),
2669 first->bytecount);
Eric Dumazet4567dc12014-10-07 13:30:23 -07002670 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
Anjali Singhai58044742015-09-25 18:26:13 -07002671
2672 /* Algorithm to optimize tail and RS bit setting:
2673 * if xmit_more is supported
2674 * if xmit_more is true
2675 * do not update tail and do not mark RS bit.
2676 * if xmit_more is false and last xmit_more was false
2677 * if every packet spanned less than 4 desc
2678 * then set RS bit on 4th packet and update tail
2679 * on every packet
2680 * else
2681 * update tail and set RS bit on every packet.
2682 * if xmit_more is false and last_xmit_more was true
2683 * update tail and set RS bit.
2684 *
2685 * Optimization: wmb to be issued only in case of tail update.
2686 * Also optimize the Descriptor WB path for RS bit with the same
2687 * algorithm.
2688 *
2689 * Note: If there are less than 4 packets
2690 * pending and interrupts were disabled the service task will
2691 * trigger a force WB.
2692 */
2693 if (skb->xmit_more &&
2694 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2695 tx_ring->queue_index))) {
2696 tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2697 tail_bump = false;
2698 } else if (!skb->xmit_more &&
2699 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2700 tx_ring->queue_index)) &&
2701 (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
2702 (tx_ring->packet_stride < WB_STRIDE) &&
2703 (desc_count < WB_STRIDE)) {
2704 tx_ring->packet_stride++;
2705 } else {
2706 tx_ring->packet_stride = 0;
2707 tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2708 do_rs = true;
2709 }
2710 if (do_rs)
2711 tx_ring->packet_stride = 0;
2712
2713 tx_desc->cmd_type_offset_bsz =
2714 build_ctob(td_cmd, td_offset, size, td_tag) |
2715 cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
2716 I40E_TX_DESC_CMD_EOP) <<
2717 I40E_TXD_QW1_CMD_SHIFT);
2718
Alexander Duycka5e9c572013-09-28 06:00:27 +00002719 /* notify HW of packet */
Anjali Singhai58044742015-09-25 18:26:13 -07002720 if (!tail_bump)
Jesse Brandeburg489ce7a2015-04-27 14:57:08 -04002721 prefetchw(tx_desc + 1);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002722
Anjali Singhai58044742015-09-25 18:26:13 -07002723 if (tail_bump) {
2724 /* Force memory writes to complete before letting h/w
2725 * know there are new descriptors to fetch. (Only
2726 * applicable for weak-ordered memory model archs,
2727 * such as IA-64).
2728 */
2729 wmb();
2730 writel(i, tx_ring->tail);
2731 }
2732
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002733 return;
2734
2735dma_error:
Alexander Duycka5e9c572013-09-28 06:00:27 +00002736 dev_info(tx_ring->dev, "TX DMA map failed\n");
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002737
2738 /* clear dma mappings for failed tx_bi map */
2739 for (;;) {
2740 tx_bi = &tx_ring->tx_bi[i];
Alexander Duycka5e9c572013-09-28 06:00:27 +00002741 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002742 if (tx_bi == first)
2743 break;
2744 if (i == 0)
2745 i = tx_ring->count;
2746 i--;
2747 }
2748
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002749 tx_ring->next_to_use = i;
2750}
2751
2752/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002753 * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
2754 * @skb: send buffer
2755 * @tx_ring: ring to send buffer on
2756 *
2757 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
2758 * there is not enough descriptors available in this ring since we need at least
2759 * one descriptor.
2760 **/
Vasu Dev38e00432014-08-01 13:27:03 -07002761#ifdef I40E_FCOE
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002762inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002763 struct i40e_ring *tx_ring)
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002764#else
2765static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
2766 struct i40e_ring *tx_ring)
Vasu Dev38e00432014-08-01 13:27:03 -07002767#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002768{
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002769 unsigned int f;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002770 int count = 0;
2771
2772 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2773 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
Jesse Brandeburgbe560522014-02-06 05:51:13 +00002774 * + 4 desc gap to avoid the cache line where head is,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002775 * + 1 desc for context descriptor,
2776 * otherwise try next time
2777 */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002778 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2779 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
Jesse Brandeburg980093e2014-05-10 04:49:12 +00002780
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002781 count += TXD_USE_COUNT(skb_headlen(skb));
Jesse Brandeburgbe560522014-02-06 05:51:13 +00002782 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002783 tx_ring->tx_stats.tx_busy++;
2784 return 0;
2785 }
2786 return count;
2787}
2788
2789/**
2790 * i40e_xmit_frame_ring - Sends buffer on Tx ring
2791 * @skb: send buffer
2792 * @tx_ring: ring to send buffer on
2793 *
2794 * Returns NETDEV_TX_OK if sent, else an error code
2795 **/
2796static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2797 struct i40e_ring *tx_ring)
2798{
2799 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
2800 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2801 struct i40e_tx_buffer *first;
2802 u32 td_offset = 0;
2803 u32 tx_flags = 0;
2804 __be16 protocol;
2805 u32 td_cmd = 0;
2806 u8 hdr_len = 0;
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002807 int tsyn;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002808 int tso;
Jesse Brandeburg6995b362015-08-28 17:55:54 -04002809
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002810 if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
2811 return NETDEV_TX_BUSY;
2812
2813 /* prepare the xmit flags */
2814 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2815 goto out_drop;
2816
2817 /* obtain protocol of skb */
Vlad Yasevich3d34dd02014-08-25 10:34:52 -04002818 protocol = vlan_get_protocol(skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002819
2820 /* record the location of the first descriptor for this packet */
2821 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2822
2823 /* setup IPv4/IPv6 offloads */
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00002824 if (protocol == htons(ETH_P_IP))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002825 tx_flags |= I40E_TX_FLAGS_IPV4;
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00002826 else if (protocol == htons(ETH_P_IPV6))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002827 tx_flags |= I40E_TX_FLAGS_IPV6;
2828
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002829 tso = i40e_tso(tx_ring, skb, &hdr_len,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002830 &cd_type_cmd_tso_mss, &cd_tunneling);
2831
2832 if (tso < 0)
2833 goto out_drop;
2834 else if (tso)
2835 tx_flags |= I40E_TX_FLAGS_TSO;
2836
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002837 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
2838
2839 if (tsyn)
2840 tx_flags |= I40E_TX_FLAGS_TSYN;
2841
Anjali Singhai Jain2fc3d712015-08-27 11:42:29 -04002842 if (i40e_chk_linearize(skb, tx_flags)) {
Anjali Singhai71da6192015-02-21 06:42:35 +00002843 if (skb_linearize(skb))
2844 goto out_drop;
Anjali Singhai Jain2fc3d712015-08-27 11:42:29 -04002845 tx_ring->tx_stats.tx_linearize++;
2846 }
Jakub Kicinski259afec2014-03-15 14:55:37 +00002847 skb_tx_timestamp(skb);
2848
Alexander Duyckb1941302013-09-28 06:00:32 +00002849 /* always enable CRC insertion offload */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002850 td_cmd |= I40E_TX_DESC_CMD_ICRC;
2851
Alexander Duyckb1941302013-09-28 06:00:32 +00002852 /* Always offload the checksum, since it's in the data descriptor */
2853 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2854 tx_flags |= I40E_TX_FLAGS_CSUM;
2855
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002856 i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002857 tx_ring, &cd_tunneling);
Alexander Duyckb1941302013-09-28 06:00:32 +00002858 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002859
2860 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2861 cd_tunneling, cd_l2tag2);
2862
2863 /* Add Flow Director ATR if it's enabled.
2864 *
2865 * NOTE: this must always be directly before the data descriptor.
2866 */
2867 i40e_atr(tx_ring, skb, tx_flags, protocol);
2868
2869 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2870 td_cmd, td_offset);
2871
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002872 return NETDEV_TX_OK;
2873
2874out_drop:
2875 dev_kfree_skb_any(skb);
2876 return NETDEV_TX_OK;
2877}
2878
2879/**
2880 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2881 * @skb: send buffer
2882 * @netdev: network interface device structure
2883 *
2884 * Returns NETDEV_TX_OK if sent, else an error code
2885 **/
2886netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2887{
2888 struct i40e_netdev_priv *np = netdev_priv(netdev);
2889 struct i40e_vsi *vsi = np->vsi;
Alexander Duyck9f65e152013-09-28 06:00:58 +00002890 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002891
2892 /* hardware can't handle really short frames, hardware padding works
2893 * beyond this point
2894 */
Alexander Duycka94d9e22014-12-03 08:17:39 -08002895 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
2896 return NETDEV_TX_OK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002897
2898 return i40e_xmit_frame_ring(skb, tx_ring);
2899}