blob: 9a800f99ec1362d373aeb41a68240b39573eefec [file] [log] [blame]
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
Greg Rosedc641b72013-12-18 13:45:51 +00004 * Copyright(c) 2013 - 2014 Intel Corporation.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
Greg Rosedc641b72013-12-18 13:45:51 +000015 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000017 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
Mitch Williams1c112a62014-04-04 04:43:06 +000027#include <linux/prefetch.h>
Mitch Williamsa132af22015-01-24 09:58:35 +000028#include <net/busy_poll.h>
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000029#include "i40e.h"
Jesse Brandeburg206812b2014-02-12 01:45:33 +000030#include "i40e_prototype.h"
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000031
32static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
33 u32 td_tag)
34{
35 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
36 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
37 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
38 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
39 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
40}
41
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +000042#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000043#define I40E_FD_CLEAN_DELAY 10
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000044/**
45 * i40e_program_fdir_filter - Program a Flow Director filter
Joseph Gasparakis17a73f62014-02-12 01:45:30 +000046 * @fdir_data: Packet data that will be filter parameters
47 * @raw_packet: the pre-allocated packet buffer for FDir
Jeff Kirsherb40c82e62015-02-27 09:18:34 +000048 * @pf: The PF pointer
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000049 * @add: True for add/update, False for remove
50 **/
Joseph Gasparakis17a73f62014-02-12 01:45:30 +000051int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000052 struct i40e_pf *pf, bool add)
53{
54 struct i40e_filter_program_desc *fdir_desc;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000055 struct i40e_tx_buffer *tx_buf, *first;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000056 struct i40e_tx_desc *tx_desc;
57 struct i40e_ring *tx_ring;
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +000058 unsigned int fpt, dcc;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000059 struct i40e_vsi *vsi;
60 struct device *dev;
61 dma_addr_t dma;
62 u32 td_cmd = 0;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000063 u16 delay = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000064 u16 i;
65
66 /* find existing FDIR VSI */
67 vsi = NULL;
Mitch Williams505682c2014-05-20 08:01:37 +000068 for (i = 0; i < pf->num_alloc_vsi; i++)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000069 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
70 vsi = pf->vsi[i];
71 if (!vsi)
72 return -ENOENT;
73
Alexander Duyck9f65e152013-09-28 06:00:58 +000074 tx_ring = vsi->tx_rings[0];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000075 dev = tx_ring->dev;
76
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000077 /* we need two descriptors to add/del a filter and we can wait */
78 do {
79 if (I40E_DESC_UNUSED(tx_ring) > 1)
80 break;
81 msleep_interruptible(1);
82 delay++;
83 } while (delay < I40E_FD_CLEAN_DELAY);
84
85 if (!(I40E_DESC_UNUSED(tx_ring) > 1))
86 return -EAGAIN;
87
Joseph Gasparakis17a73f62014-02-12 01:45:30 +000088 dma = dma_map_single(dev, raw_packet,
89 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000090 if (dma_mapping_error(dev, dma))
91 goto dma_fail;
92
93 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +000094 i = tx_ring->next_to_use;
95 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000096 first = &tx_ring->tx_bi[i];
97 memset(first, 0, sizeof(struct i40e_tx_buffer));
Alexander Duyckfc4ac672013-09-28 06:00:22 +000098
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +000099 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000100
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000101 fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
102 I40E_TXD_FLTR_QW0_QINDEX_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000103
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000104 fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
105 I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000106
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000107 fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
108 I40E_TXD_FLTR_QW0_PCTYPE_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000109
110 /* Use LAN VSI Id if not programmed by user */
111 if (fdir_data->dest_vsi == 0)
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000112 fpt |= (pf->vsi[pf->lan_vsi]->id) <<
113 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000114 else
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000115 fpt |= ((u32)fdir_data->dest_vsi <<
116 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
117 I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000118
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000119 dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000120
121 if (add)
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000122 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
123 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000124 else
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000125 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
126 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000127
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000128 dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
129 I40E_TXD_FLTR_QW1_DEST_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000130
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000131 dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
132 I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000133
134 if (fdir_data->cnt_index != 0) {
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000135 dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
136 dcc |= ((u32)fdir_data->cnt_index <<
137 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +0000138 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000139 }
140
Jesse Brandeburg99753ea2014-06-04 04:22:49 +0000141 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
142 fdir_desc->rsvd = cpu_to_le32(0);
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000143 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000144 fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
145
146 /* Now program a dummy descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000147 i = tx_ring->next_to_use;
148 tx_desc = I40E_TX_DESC(tx_ring, i);
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000149 tx_buf = &tx_ring->tx_bi[i];
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000150
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000151 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
152
153 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000154
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000155 /* record length, and DMA address */
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000156 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000157 dma_unmap_addr_set(tx_buf, dma, dma);
158
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000159 tx_desc->buffer_addr = cpu_to_le64(dma);
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000160 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000161
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000162 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
163 tx_buf->raw_buf = (void *)raw_packet;
164
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000165 tx_desc->cmd_type_offset_bsz =
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000166 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000167
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000168 /* Force memory writes to complete before letting h/w
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000169 * know there are new descriptors to fetch.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000170 */
171 wmb();
172
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000173 /* Mark the data descriptor to be watched */
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000174 first->next_to_watch = tx_desc;
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000175
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000176 writel(tx_ring->next_to_use, tx_ring->tail);
177 return 0;
178
179dma_fail:
180 return -1;
181}
182
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000183#define IP_HEADER_OFFSET 14
184#define I40E_UDPIP_DUMMY_PACKET_LEN 42
185/**
186 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
187 * @vsi: pointer to the targeted VSI
188 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000189 * @add: true adds a filter, false removes it
190 *
191 * Returns 0 if the filters were successfully added or removed
192 **/
193static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
194 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000195 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000196{
197 struct i40e_pf *pf = vsi->back;
198 struct udphdr *udp;
199 struct iphdr *ip;
200 bool err = false;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000201 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000202 int ret;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000203 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
204 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
205 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
206
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000207 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
208 if (!raw_packet)
209 return -ENOMEM;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000210 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
211
212 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
213 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
214 + sizeof(struct iphdr));
215
216 ip->daddr = fd_data->dst_ip[0];
217 udp->dest = fd_data->dst_port;
218 ip->saddr = fd_data->src_ip[0];
219 udp->source = fd_data->src_port;
220
Kevin Scottb2d36c02014-04-09 05:58:59 +0000221 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
222 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
223 if (ret) {
224 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000225 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
226 fd_data->pctype, fd_data->fd_id, ret);
Kevin Scottb2d36c02014-04-09 05:58:59 +0000227 err = true;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000228 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000229 if (add)
230 dev_info(&pf->pdev->dev,
231 "Filter OK for PCTYPE %d loc = %d\n",
232 fd_data->pctype, fd_data->fd_id);
233 else
234 dev_info(&pf->pdev->dev,
235 "Filter deleted for PCTYPE %d loc = %d\n",
236 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000237 }
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000238 return err ? -EOPNOTSUPP : 0;
239}
240
241#define I40E_TCPIP_DUMMY_PACKET_LEN 54
242/**
243 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
244 * @vsi: pointer to the targeted VSI
245 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000246 * @add: true adds a filter, false removes it
247 *
248 * Returns 0 if the filters were successfully added or removed
249 **/
250static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
251 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000252 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000253{
254 struct i40e_pf *pf = vsi->back;
255 struct tcphdr *tcp;
256 struct iphdr *ip;
257 bool err = false;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000258 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000259 int ret;
260 /* Dummy packet */
261 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
262 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
263 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
264 0x0, 0x72, 0, 0, 0, 0};
265
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000266 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
267 if (!raw_packet)
268 return -ENOMEM;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000269 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
270
271 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
272 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
273 + sizeof(struct iphdr));
274
275 ip->daddr = fd_data->dst_ip[0];
276 tcp->dest = fd_data->dst_port;
277 ip->saddr = fd_data->src_ip[0];
278 tcp->source = fd_data->src_port;
279
280 if (add) {
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000281 pf->fd_tcp_rule++;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000282 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
Anjali Singhai Jain2e4875e2015-04-16 20:06:06 -0400283 if (I40E_DEBUG_FD & pf->hw.debug_mask)
284 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000285 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
286 }
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000287 } else {
288 pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
289 (pf->fd_tcp_rule - 1) : 0;
290 if (pf->fd_tcp_rule == 0) {
291 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
Anjali Singhai Jain2e4875e2015-04-16 20:06:06 -0400292 if (I40E_DEBUG_FD & pf->hw.debug_mask)
293 dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000294 }
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000295 }
296
Kevin Scottb2d36c02014-04-09 05:58:59 +0000297 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000298 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
299
300 if (ret) {
301 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000302 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
303 fd_data->pctype, fd_data->fd_id, ret);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000304 err = true;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000305 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000306 if (add)
307 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
308 fd_data->pctype, fd_data->fd_id);
309 else
310 dev_info(&pf->pdev->dev,
311 "Filter deleted for PCTYPE %d loc = %d\n",
312 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000313 }
314
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000315 return err ? -EOPNOTSUPP : 0;
316}
317
318/**
319 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
320 * a specific flow spec
321 * @vsi: pointer to the targeted VSI
322 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000323 * @add: true adds a filter, false removes it
324 *
Jean Sacren21d3efd2014-03-17 18:14:39 +0000325 * Always returns -EOPNOTSUPP
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000326 **/
327static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
328 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000329 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000330{
331 return -EOPNOTSUPP;
332}
333
334#define I40E_IP_DUMMY_PACKET_LEN 34
335/**
336 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
337 * a specific flow spec
338 * @vsi: pointer to the targeted VSI
339 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000340 * @add: true adds a filter, false removes it
341 *
342 * Returns 0 if the filters were successfully added or removed
343 **/
344static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
345 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000346 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000347{
348 struct i40e_pf *pf = vsi->back;
349 struct iphdr *ip;
350 bool err = false;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000351 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000352 int ret;
353 int i;
354 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
355 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
356 0, 0, 0, 0};
357
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000358 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
359 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000360 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
361 if (!raw_packet)
362 return -ENOMEM;
363 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
364 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
365
366 ip->saddr = fd_data->src_ip[0];
367 ip->daddr = fd_data->dst_ip[0];
368 ip->protocol = 0;
369
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000370 fd_data->pctype = i;
371 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
372
373 if (ret) {
374 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000375 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
376 fd_data->pctype, fd_data->fd_id, ret);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000377 err = true;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000378 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000379 if (add)
380 dev_info(&pf->pdev->dev,
381 "Filter OK for PCTYPE %d loc = %d\n",
382 fd_data->pctype, fd_data->fd_id);
383 else
384 dev_info(&pf->pdev->dev,
385 "Filter deleted for PCTYPE %d loc = %d\n",
386 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000387 }
388 }
389
390 return err ? -EOPNOTSUPP : 0;
391}
392
393/**
394 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
395 * @vsi: pointer to the targeted VSI
396 * @cmd: command to get or set RX flow classification rules
397 * @add: true adds a filter, false removes it
398 *
399 **/
400int i40e_add_del_fdir(struct i40e_vsi *vsi,
401 struct i40e_fdir_filter *input, bool add)
402{
403 struct i40e_pf *pf = vsi->back;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000404 int ret;
405
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000406 switch (input->flow_type & ~FLOW_EXT) {
407 case TCP_V4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000408 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000409 break;
410 case UDP_V4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000411 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000412 break;
413 case SCTP_V4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000414 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000415 break;
416 case IPV4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000417 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000418 break;
419 case IP_USER_FLOW:
420 switch (input->ip4_proto) {
421 case IPPROTO_TCP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000422 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000423 break;
424 case IPPROTO_UDP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000425 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000426 break;
427 case IPPROTO_SCTP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000428 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000429 break;
430 default:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000431 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000432 break;
433 }
434 break;
435 default:
Jakub Kicinskic5ffe7e2014-04-02 10:33:22 +0000436 dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000437 input->flow_type);
438 ret = -EINVAL;
439 }
440
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000441 /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000442 return ret;
443}
444
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000445/**
446 * i40e_fd_handle_status - check the Programming Status for FD
447 * @rx_ring: the Rx ring for this descriptor
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000448 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000449 * @prog_id: the id originally used for programming
450 *
451 * This is used to verify if the FD programming or invalidation
452 * requested by SW to the HW is successful or not and take actions accordingly.
453 **/
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000454static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
455 union i40e_rx_desc *rx_desc, u8 prog_id)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000456{
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000457 struct i40e_pf *pf = rx_ring->vsi->back;
458 struct pci_dev *pdev = pf->pdev;
459 u32 fcnt_prog, fcnt_avail;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000460 u32 error;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000461 u64 qw;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000462
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000463 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000464 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
465 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
466
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400467 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000468 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
469 (I40E_DEBUG_FD & pf->hw.debug_mask))
470 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
471 rx_desc->wb.qword0.hi_dword.fd_id);
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000472
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000473 /* Check if the programming error is for ATR.
474 * If so, auto disable ATR and set a state for
475 * flush in progress. Next time we come here if flush is in
476 * progress do nothing, once flush is complete the state will
477 * be cleared.
478 */
479 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
480 return;
481
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000482 pf->fd_add_err++;
483 /* store the current atr filter count */
484 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
485
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000486 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
487 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
488 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
489 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
490 }
491
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000492 /* filter programming failed most likely due to table full */
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000493 fcnt_prog = i40e_get_global_fd_count(pf);
Anjali Singhai Jain12957382014-06-04 04:22:47 +0000494 fcnt_avail = pf->fdir_pf_filter_count;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000495 /* If ATR is running fcnt_prog can quickly change,
496 * if we are very close to full, it makes sense to disable
497 * FD ATR/SB and then re-enable it when there is room.
498 */
499 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000500 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
Anjali Singhai Jainb814ba62014-06-04 20:41:48 +0000501 !(pf->auto_disable_flags &
Anjali Singhai Jainb814ba62014-06-04 20:41:48 +0000502 I40E_FLAG_FD_SB_ENABLED)) {
Anjali Singhai Jain2e4875e2015-04-16 20:06:06 -0400503 if (I40E_DEBUG_FD & pf->hw.debug_mask)
504 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000505 pf->auto_disable_flags |=
506 I40E_FLAG_FD_SB_ENABLED;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000507 }
508 } else {
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000509 dev_info(&pdev->dev,
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000510 "FD filter programming failed due to incorrect filter parameters\n");
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000511 }
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400512 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
Anjali Singhai Jain13c28842014-03-06 09:00:04 +0000513 if (I40E_DEBUG_FD & pf->hw.debug_mask)
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000514 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
Anjali Singhai Jain13c28842014-03-06 09:00:04 +0000515 rx_desc->wb.qword0.hi_dword.fd_id);
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000516 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000517}
518
519/**
Alexander Duycka5e9c572013-09-28 06:00:27 +0000520 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000521 * @ring: the ring that owns the buffer
522 * @tx_buffer: the buffer to free
523 **/
Alexander Duycka5e9c572013-09-28 06:00:27 +0000524static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
525 struct i40e_tx_buffer *tx_buffer)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000526{
Alexander Duycka5e9c572013-09-28 06:00:27 +0000527 if (tx_buffer->skb) {
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000528 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
529 kfree(tx_buffer->raw_buf);
530 else
531 dev_kfree_skb_any(tx_buffer->skb);
532
Alexander Duycka5e9c572013-09-28 06:00:27 +0000533 if (dma_unmap_len(tx_buffer, len))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000534 dma_unmap_single(ring->dev,
Alexander Duyck35a1e2a2013-09-28 06:00:17 +0000535 dma_unmap_addr(tx_buffer, dma),
536 dma_unmap_len(tx_buffer, len),
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000537 DMA_TO_DEVICE);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000538 } else if (dma_unmap_len(tx_buffer, len)) {
539 dma_unmap_page(ring->dev,
540 dma_unmap_addr(tx_buffer, dma),
541 dma_unmap_len(tx_buffer, len),
542 DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000543 }
Alexander Duycka5e9c572013-09-28 06:00:27 +0000544 tx_buffer->next_to_watch = NULL;
545 tx_buffer->skb = NULL;
Alexander Duyck35a1e2a2013-09-28 06:00:17 +0000546 dma_unmap_len_set(tx_buffer, len, 0);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000547 /* tx_buffer must be completely set up in the transmit path */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000548}
549
550/**
551 * i40e_clean_tx_ring - Free any empty Tx buffers
552 * @tx_ring: ring to be cleaned
553 **/
554void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
555{
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000556 unsigned long bi_size;
557 u16 i;
558
559 /* ring already cleared, nothing to do */
560 if (!tx_ring->tx_bi)
561 return;
562
563 /* Free all the Tx ring sk_buffs */
Alexander Duycka5e9c572013-09-28 06:00:27 +0000564 for (i = 0; i < tx_ring->count; i++)
565 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000566
567 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
568 memset(tx_ring->tx_bi, 0, bi_size);
569
570 /* Zero out the descriptor ring */
571 memset(tx_ring->desc, 0, tx_ring->size);
572
573 tx_ring->next_to_use = 0;
574 tx_ring->next_to_clean = 0;
Alexander Duyck7070ce02013-09-28 06:00:37 +0000575
576 if (!tx_ring->netdev)
577 return;
578
579 /* cleanup Tx queue statistics */
580 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
581 tx_ring->queue_index));
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000582}
583
584/**
585 * i40e_free_tx_resources - Free Tx resources per queue
586 * @tx_ring: Tx descriptor ring for a specific queue
587 *
588 * Free all transmit software resources
589 **/
590void i40e_free_tx_resources(struct i40e_ring *tx_ring)
591{
592 i40e_clean_tx_ring(tx_ring);
593 kfree(tx_ring->tx_bi);
594 tx_ring->tx_bi = NULL;
595
596 if (tx_ring->desc) {
597 dma_free_coherent(tx_ring->dev, tx_ring->size,
598 tx_ring->desc, tx_ring->dma);
599 tx_ring->desc = NULL;
600 }
601}
602
Jesse Brandeburga68de582015-02-24 05:26:03 +0000603
604/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000605 * i40e_get_tx_pending - how many tx descriptors not processed
606 * @tx_ring: the ring of descriptors
607 *
608 * Since there is no access to the ring head register
609 * in XL710, we need to use our local copies
610 **/
611static u32 i40e_get_tx_pending(struct i40e_ring *ring)
612{
Jesse Brandeburga68de582015-02-24 05:26:03 +0000613 u32 head, tail;
614
615 head = i40e_get_head(ring);
616 tail = readl(ring->tail);
617
618 if (head != tail)
619 return (head < tail) ?
620 tail - head : (tail + ring->count - head);
621
622 return 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000623}
624
625/**
626 * i40e_check_tx_hang - Is there a hang in the Tx queue
627 * @tx_ring: the ring of descriptors
628 **/
629static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
630{
Jesse Brandeburga68de582015-02-24 05:26:03 +0000631 u32 tx_done = tx_ring->stats.packets;
632 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000633 u32 tx_pending = i40e_get_tx_pending(tx_ring);
Anjali Singhai Jain810b3ae2014-07-10 07:58:25 +0000634 struct i40e_pf *pf = tx_ring->vsi->back;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000635 bool ret = false;
636
637 clear_check_for_tx_hang(tx_ring);
638
639 /* Check for a hung queue, but be thorough. This verifies
640 * that a transmit has been completed since the previous
641 * check AND there is at least one packet pending. The
642 * ARMED bit is set to indicate a potential hang. The
643 * bit is cleared if a pause frame is received to remove
644 * false hang detection due to PFC or 802.3x frames. By
645 * requiring this to fail twice we avoid races with
646 * PFC clearing the ARMED bit and conditions where we
647 * run the check_tx_hang logic with a transmit completion
648 * pending but without time to complete it yet.
649 */
Jesse Brandeburga68de582015-02-24 05:26:03 +0000650 if ((tx_done_old == tx_done) && tx_pending) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000651 /* make sure it is true for two checks in a row */
652 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
653 &tx_ring->state);
Jesse Brandeburga68de582015-02-24 05:26:03 +0000654 } else if (tx_done_old == tx_done &&
655 (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
Anjali Singhai Jain810b3ae2014-07-10 07:58:25 +0000656 if (I40E_DEBUG_FLOW & pf->hw.debug_mask)
657 dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d",
658 tx_pending, tx_ring->queue_index);
659 pf->tx_sluggish_count++;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000660 } else {
661 /* update completed stats and disarm the hang check */
Jesse Brandeburga68de582015-02-24 05:26:03 +0000662 tx_ring->tx_stats.tx_done_old = tx_done;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000663 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
664 }
665
666 return ret;
667}
668
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000669#define WB_STRIDE 0x3
670
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000671/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000672 * i40e_clean_tx_irq - Reclaim resources after transmit completes
673 * @tx_ring: tx ring to clean
674 * @budget: how many cleans we're allowed
675 *
676 * Returns true if there's any budget left (e.g. the clean is finished)
677 **/
678static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
679{
680 u16 i = tx_ring->next_to_clean;
681 struct i40e_tx_buffer *tx_buf;
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000682 struct i40e_tx_desc *tx_head;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000683 struct i40e_tx_desc *tx_desc;
684 unsigned int total_packets = 0;
685 unsigned int total_bytes = 0;
686
687 tx_buf = &tx_ring->tx_bi[i];
688 tx_desc = I40E_TX_DESC(tx_ring, i);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000689 i -= tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000690
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000691 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
692
Alexander Duycka5e9c572013-09-28 06:00:27 +0000693 do {
694 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000695
696 /* if next_to_watch is not set then there is no work pending */
697 if (!eop_desc)
698 break;
699
Alexander Duycka5e9c572013-09-28 06:00:27 +0000700 /* prevent any other reads prior to eop_desc */
701 read_barrier_depends();
702
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000703 /* we have caught up to head, no work left to do */
704 if (tx_head == tx_desc)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000705 break;
706
Alexander Duyckc304fda2013-09-28 06:00:12 +0000707 /* clear next_to_watch to prevent false hangs */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000708 tx_buf->next_to_watch = NULL;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000709
Alexander Duycka5e9c572013-09-28 06:00:27 +0000710 /* update the statistics for this packet */
711 total_bytes += tx_buf->bytecount;
712 total_packets += tx_buf->gso_segs;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000713
Alexander Duycka5e9c572013-09-28 06:00:27 +0000714 /* free the skb */
Rick Jonesa81fb042014-09-17 03:56:20 +0000715 dev_consume_skb_any(tx_buf->skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000716
Alexander Duycka5e9c572013-09-28 06:00:27 +0000717 /* unmap skb header data */
718 dma_unmap_single(tx_ring->dev,
719 dma_unmap_addr(tx_buf, dma),
720 dma_unmap_len(tx_buf, len),
721 DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000722
Alexander Duycka5e9c572013-09-28 06:00:27 +0000723 /* clear tx_buffer data */
724 tx_buf->skb = NULL;
725 dma_unmap_len_set(tx_buf, len, 0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000726
Alexander Duycka5e9c572013-09-28 06:00:27 +0000727 /* unmap remaining buffers */
728 while (tx_desc != eop_desc) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000729
730 tx_buf++;
731 tx_desc++;
732 i++;
Alexander Duycka5e9c572013-09-28 06:00:27 +0000733 if (unlikely(!i)) {
734 i -= tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000735 tx_buf = tx_ring->tx_bi;
736 tx_desc = I40E_TX_DESC(tx_ring, 0);
737 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000738
Alexander Duycka5e9c572013-09-28 06:00:27 +0000739 /* unmap any remaining paged data */
740 if (dma_unmap_len(tx_buf, len)) {
741 dma_unmap_page(tx_ring->dev,
742 dma_unmap_addr(tx_buf, dma),
743 dma_unmap_len(tx_buf, len),
744 DMA_TO_DEVICE);
745 dma_unmap_len_set(tx_buf, len, 0);
746 }
747 }
748
749 /* move us one more past the eop_desc for start of next pkt */
750 tx_buf++;
751 tx_desc++;
752 i++;
753 if (unlikely(!i)) {
754 i -= tx_ring->count;
755 tx_buf = tx_ring->tx_bi;
756 tx_desc = I40E_TX_DESC(tx_ring, 0);
757 }
758
Jesse Brandeburg016890b2015-02-27 09:15:31 +0000759 prefetch(tx_desc);
760
Alexander Duycka5e9c572013-09-28 06:00:27 +0000761 /* update budget accounting */
762 budget--;
763 } while (likely(budget));
764
765 i += tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000766 tx_ring->next_to_clean = i;
Alexander Duyck980e9b12013-09-28 06:01:03 +0000767 u64_stats_update_begin(&tx_ring->syncp);
Alexander Duycka114d0a2013-09-28 06:00:43 +0000768 tx_ring->stats.bytes += total_bytes;
769 tx_ring->stats.packets += total_packets;
Alexander Duyck980e9b12013-09-28 06:01:03 +0000770 u64_stats_update_end(&tx_ring->syncp);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000771 tx_ring->q_vector->tx.total_bytes += total_bytes;
772 tx_ring->q_vector->tx.total_packets += total_packets;
Alexander Duycka5e9c572013-09-28 06:00:27 +0000773
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000774 /* check to see if there are any non-cache aligned descriptors
775 * waiting to be written back, and kick the hardware to force
776 * them to be written back in case of napi polling
777 */
778 if (budget &&
779 !((i & WB_STRIDE) == WB_STRIDE) &&
780 !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
781 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
782 tx_ring->arm_wb = true;
783 else
784 tx_ring->arm_wb = false;
785
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000786 if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
787 /* schedule immediate reset if we believe we hung */
788 dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
789 " VSI <%d>\n"
790 " Tx Queue <%d>\n"
791 " next_to_use <%x>\n"
792 " next_to_clean <%x>\n",
793 tx_ring->vsi->seid,
794 tx_ring->queue_index,
795 tx_ring->next_to_use, i);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000796
797 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
798
799 dev_info(tx_ring->dev,
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000800 "tx hang detected on queue %d, reset requested\n",
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000801 tx_ring->queue_index);
802
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000803 /* do not fire the reset immediately, wait for the stack to
804 * decide we are truly stuck, also prevents every queue from
805 * simultaneously requesting a reset
806 */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000807
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000808 /* the adapter is about to reset, no point in enabling polling */
809 budget = 1;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000810 }
811
Alexander Duyck7070ce02013-09-28 06:00:37 +0000812 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
813 tx_ring->queue_index),
814 total_packets, total_bytes);
815
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000816#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
817 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
818 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
819 /* Make sure that anybody stopping the queue after this
820 * sees the new next_to_clean.
821 */
822 smp_mb();
823 if (__netif_subqueue_stopped(tx_ring->netdev,
824 tx_ring->queue_index) &&
825 !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
826 netif_wake_subqueue(tx_ring->netdev,
827 tx_ring->queue_index);
828 ++tx_ring->tx_stats.restart_queue;
829 }
830 }
831
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000832 return !!budget;
833}
834
835/**
836 * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors
837 * @vsi: the VSI we care about
838 * @q_vector: the vector on which to force writeback
839 *
840 **/
841static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
842{
Anjali Singhai Jain8e0764b2015-06-05 12:20:30 -0400843 u16 flags = q_vector->tx.ring[0].flags;
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000844
Anjali Singhai Jain8e0764b2015-06-05 12:20:30 -0400845 if (flags & I40E_TXR_FLAGS_WB_ON_ITR) {
846 u32 val;
847
848 if (q_vector->arm_wb_state)
849 return;
850
851 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK;
852
853 wr32(&vsi->back->hw,
854 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
855 vsi->base_vector - 1),
856 val);
857 q_vector->arm_wb_state = true;
858 } else if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
859 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
860 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
861 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
862 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
863 /* allow 00 to be written to the index */
864
865 wr32(&vsi->back->hw,
866 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
867 vsi->base_vector - 1), val);
868 } else {
869 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
870 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
871 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
872 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
873 /* allow 00 to be written to the index */
874
875 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
876 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000877}
878
879/**
880 * i40e_set_new_dynamic_itr - Find new ITR level
881 * @rc: structure containing ring performance data
882 *
883 * Stores a new ITR value based on packets and byte counts during
884 * the last interrupt. The advantage of per interrupt computation
885 * is faster updates and more accurate ITR for the current traffic
886 * pattern. Constants in this function were computed based on
887 * theoretical maximum wire speed and thresholds were set based on
888 * testing data as well as attempting to minimize response time
889 * while increasing bulk throughput.
890 **/
891static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
892{
893 enum i40e_latency_range new_latency_range = rc->latency_range;
894 u32 new_itr = rc->itr;
895 int bytes_per_int;
896
897 if (rc->total_packets == 0 || !rc->itr)
898 return;
899
900 /* simple throttlerate management
901 * 0-10MB/s lowest (100000 ints/s)
902 * 10-20MB/s low (20000 ints/s)
903 * 20-1249MB/s bulk (8000 ints/s)
904 */
905 bytes_per_int = rc->total_bytes / rc->itr;
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400906 switch (new_latency_range) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000907 case I40E_LOWEST_LATENCY:
908 if (bytes_per_int > 10)
909 new_latency_range = I40E_LOW_LATENCY;
910 break;
911 case I40E_LOW_LATENCY:
912 if (bytes_per_int > 20)
913 new_latency_range = I40E_BULK_LATENCY;
914 else if (bytes_per_int <= 10)
915 new_latency_range = I40E_LOWEST_LATENCY;
916 break;
917 case I40E_BULK_LATENCY:
918 if (bytes_per_int <= 20)
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400919 new_latency_range = I40E_LOW_LATENCY;
920 break;
921 default:
922 if (bytes_per_int <= 20)
923 new_latency_range = I40E_LOW_LATENCY;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000924 break;
925 }
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400926 rc->latency_range = new_latency_range;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000927
928 switch (new_latency_range) {
929 case I40E_LOWEST_LATENCY:
930 new_itr = I40E_ITR_100K;
931 break;
932 case I40E_LOW_LATENCY:
933 new_itr = I40E_ITR_20K;
934 break;
935 case I40E_BULK_LATENCY:
936 new_itr = I40E_ITR_8K;
937 break;
938 default:
939 break;
940 }
941
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400942 if (new_itr != rc->itr)
943 rc->itr = new_itr;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000944
945 rc->total_bytes = 0;
946 rc->total_packets = 0;
947}
948
949/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000950 * i40e_clean_programming_status - clean the programming status descriptor
951 * @rx_ring: the rx ring that has this descriptor
952 * @rx_desc: the rx descriptor written back by HW
953 *
954 * Flow director should handle FD_FILTER_STATUS to check its filter programming
955 * status being successful or not and take actions accordingly. FCoE should
956 * handle its context/filter programming/invalidation status and take actions.
957 *
958 **/
959static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
960 union i40e_rx_desc *rx_desc)
961{
962 u64 qw;
963 u8 id;
964
965 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
966 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
967 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
968
969 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000970 i40e_fd_handle_status(rx_ring, rx_desc, id);
Vasu Dev38e00432014-08-01 13:27:03 -0700971#ifdef I40E_FCOE
972 else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
973 (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
974 i40e_fcoe_handle_status(rx_ring, rx_desc, id);
975#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000976}
977
978/**
979 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
980 * @tx_ring: the tx ring to set up
981 *
982 * Return 0 on success, negative on error
983 **/
984int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
985{
986 struct device *dev = tx_ring->dev;
987 int bi_size;
988
989 if (!dev)
990 return -ENOMEM;
991
992 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
993 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
994 if (!tx_ring->tx_bi)
995 goto err;
996
997 /* round up to nearest 4K */
998 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000999 /* add u32 for head writeback, align after this takes care of
1000 * guaranteeing this is at least one cache line in size
1001 */
1002 tx_ring->size += sizeof(u32);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001003 tx_ring->size = ALIGN(tx_ring->size, 4096);
1004 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1005 &tx_ring->dma, GFP_KERNEL);
1006 if (!tx_ring->desc) {
1007 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1008 tx_ring->size);
1009 goto err;
1010 }
1011
1012 tx_ring->next_to_use = 0;
1013 tx_ring->next_to_clean = 0;
1014 return 0;
1015
1016err:
1017 kfree(tx_ring->tx_bi);
1018 tx_ring->tx_bi = NULL;
1019 return -ENOMEM;
1020}
1021
1022/**
1023 * i40e_clean_rx_ring - Free Rx buffers
1024 * @rx_ring: ring to be cleaned
1025 **/
1026void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1027{
1028 struct device *dev = rx_ring->dev;
1029 struct i40e_rx_buffer *rx_bi;
1030 unsigned long bi_size;
1031 u16 i;
1032
1033 /* ring already cleared, nothing to do */
1034 if (!rx_ring->rx_bi)
1035 return;
1036
Mitch Williamsa132af22015-01-24 09:58:35 +00001037 if (ring_is_ps_enabled(rx_ring)) {
1038 int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
1039
1040 rx_bi = &rx_ring->rx_bi[0];
1041 if (rx_bi->hdr_buf) {
1042 dma_free_coherent(dev,
1043 bufsz,
1044 rx_bi->hdr_buf,
1045 rx_bi->dma);
1046 for (i = 0; i < rx_ring->count; i++) {
1047 rx_bi = &rx_ring->rx_bi[i];
1048 rx_bi->dma = 0;
Shannon Nelson37a29732015-02-27 09:15:19 +00001049 rx_bi->hdr_buf = NULL;
Mitch Williamsa132af22015-01-24 09:58:35 +00001050 }
1051 }
1052 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001053 /* Free all the Rx ring sk_buffs */
1054 for (i = 0; i < rx_ring->count; i++) {
1055 rx_bi = &rx_ring->rx_bi[i];
1056 if (rx_bi->dma) {
1057 dma_unmap_single(dev,
1058 rx_bi->dma,
1059 rx_ring->rx_buf_len,
1060 DMA_FROM_DEVICE);
1061 rx_bi->dma = 0;
1062 }
1063 if (rx_bi->skb) {
1064 dev_kfree_skb(rx_bi->skb);
1065 rx_bi->skb = NULL;
1066 }
1067 if (rx_bi->page) {
1068 if (rx_bi->page_dma) {
1069 dma_unmap_page(dev,
1070 rx_bi->page_dma,
1071 PAGE_SIZE / 2,
1072 DMA_FROM_DEVICE);
1073 rx_bi->page_dma = 0;
1074 }
1075 __free_page(rx_bi->page);
1076 rx_bi->page = NULL;
1077 rx_bi->page_offset = 0;
1078 }
1079 }
1080
1081 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1082 memset(rx_ring->rx_bi, 0, bi_size);
1083
1084 /* Zero out the descriptor ring */
1085 memset(rx_ring->desc, 0, rx_ring->size);
1086
1087 rx_ring->next_to_clean = 0;
1088 rx_ring->next_to_use = 0;
1089}
1090
1091/**
1092 * i40e_free_rx_resources - Free Rx resources
1093 * @rx_ring: ring to clean the resources from
1094 *
1095 * Free all receive software resources
1096 **/
1097void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1098{
1099 i40e_clean_rx_ring(rx_ring);
1100 kfree(rx_ring->rx_bi);
1101 rx_ring->rx_bi = NULL;
1102
1103 if (rx_ring->desc) {
1104 dma_free_coherent(rx_ring->dev, rx_ring->size,
1105 rx_ring->desc, rx_ring->dma);
1106 rx_ring->desc = NULL;
1107 }
1108}
1109
1110/**
Mitch Williamsa132af22015-01-24 09:58:35 +00001111 * i40e_alloc_rx_headers - allocate rx header buffers
1112 * @rx_ring: ring to alloc buffers
1113 *
1114 * Allocate rx header buffers for the entire ring. As these are static,
1115 * this is only called when setting up a new ring.
1116 **/
1117void i40e_alloc_rx_headers(struct i40e_ring *rx_ring)
1118{
1119 struct device *dev = rx_ring->dev;
1120 struct i40e_rx_buffer *rx_bi;
1121 dma_addr_t dma;
1122 void *buffer;
1123 int buf_size;
1124 int i;
1125
1126 if (rx_ring->rx_bi[0].hdr_buf)
1127 return;
1128 /* Make sure the buffers don't cross cache line boundaries. */
1129 buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
1130 buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
1131 &dma, GFP_KERNEL);
1132 if (!buffer)
1133 return;
1134 for (i = 0; i < rx_ring->count; i++) {
1135 rx_bi = &rx_ring->rx_bi[i];
1136 rx_bi->dma = dma + (i * buf_size);
1137 rx_bi->hdr_buf = buffer + (i * buf_size);
1138 }
1139}
1140
1141/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001142 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1143 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1144 *
1145 * Returns 0 on success, negative on failure
1146 **/
1147int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1148{
1149 struct device *dev = rx_ring->dev;
1150 int bi_size;
1151
1152 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1153 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1154 if (!rx_ring->rx_bi)
1155 goto err;
1156
Carolyn Wybornyf217d6c2015-02-09 17:42:31 -08001157 u64_stats_init(&rx_ring->syncp);
Carolyn Wyborny638702b2015-01-24 09:58:32 +00001158
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001159 /* Round up to nearest 4K */
1160 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
1161 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
1162 : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1163 rx_ring->size = ALIGN(rx_ring->size, 4096);
1164 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1165 &rx_ring->dma, GFP_KERNEL);
1166
1167 if (!rx_ring->desc) {
1168 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1169 rx_ring->size);
1170 goto err;
1171 }
1172
1173 rx_ring->next_to_clean = 0;
1174 rx_ring->next_to_use = 0;
1175
1176 return 0;
1177err:
1178 kfree(rx_ring->rx_bi);
1179 rx_ring->rx_bi = NULL;
1180 return -ENOMEM;
1181}
1182
1183/**
1184 * i40e_release_rx_desc - Store the new tail and head values
1185 * @rx_ring: ring to bump
1186 * @val: new head index
1187 **/
1188static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1189{
1190 rx_ring->next_to_use = val;
1191 /* Force memory writes to complete before letting h/w
1192 * know there are new descriptors to fetch. (Only
1193 * applicable for weak-ordered memory model archs,
1194 * such as IA-64).
1195 */
1196 wmb();
1197 writel(val, rx_ring->tail);
1198}
1199
1200/**
Mitch Williamsa132af22015-01-24 09:58:35 +00001201 * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001202 * @rx_ring: ring to place buffers on
1203 * @cleaned_count: number of buffers to replace
1204 **/
Mitch Williamsa132af22015-01-24 09:58:35 +00001205void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
1206{
1207 u16 i = rx_ring->next_to_use;
1208 union i40e_rx_desc *rx_desc;
1209 struct i40e_rx_buffer *bi;
1210
1211 /* do nothing if no valid netdev defined */
1212 if (!rx_ring->netdev || !cleaned_count)
1213 return;
1214
1215 while (cleaned_count--) {
1216 rx_desc = I40E_RX_DESC(rx_ring, i);
1217 bi = &rx_ring->rx_bi[i];
1218
1219 if (bi->skb) /* desc is in use */
1220 goto no_buffers;
1221 if (!bi->page) {
1222 bi->page = alloc_page(GFP_ATOMIC);
1223 if (!bi->page) {
1224 rx_ring->rx_stats.alloc_page_failed++;
1225 goto no_buffers;
1226 }
1227 }
1228
1229 if (!bi->page_dma) {
1230 /* use a half page if we're re-using */
1231 bi->page_offset ^= PAGE_SIZE / 2;
1232 bi->page_dma = dma_map_page(rx_ring->dev,
1233 bi->page,
1234 bi->page_offset,
1235 PAGE_SIZE / 2,
1236 DMA_FROM_DEVICE);
1237 if (dma_mapping_error(rx_ring->dev,
1238 bi->page_dma)) {
1239 rx_ring->rx_stats.alloc_page_failed++;
1240 bi->page_dma = 0;
1241 goto no_buffers;
1242 }
1243 }
1244
1245 dma_sync_single_range_for_device(rx_ring->dev,
1246 bi->dma,
1247 0,
1248 rx_ring->rx_hdr_len,
1249 DMA_FROM_DEVICE);
1250 /* Refresh the desc even if buffer_addrs didn't change
1251 * because each write-back erases this info.
1252 */
1253 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1254 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1255 i++;
1256 if (i == rx_ring->count)
1257 i = 0;
1258 }
1259
1260no_buffers:
1261 if (rx_ring->next_to_use != i)
1262 i40e_release_rx_desc(rx_ring, i);
1263}
1264
1265/**
1266 * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
1267 * @rx_ring: ring to place buffers on
1268 * @cleaned_count: number of buffers to replace
1269 **/
1270void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001271{
1272 u16 i = rx_ring->next_to_use;
1273 union i40e_rx_desc *rx_desc;
1274 struct i40e_rx_buffer *bi;
1275 struct sk_buff *skb;
1276
1277 /* do nothing if no valid netdev defined */
1278 if (!rx_ring->netdev || !cleaned_count)
1279 return;
1280
1281 while (cleaned_count--) {
1282 rx_desc = I40E_RX_DESC(rx_ring, i);
1283 bi = &rx_ring->rx_bi[i];
1284 skb = bi->skb;
1285
1286 if (!skb) {
1287 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1288 rx_ring->rx_buf_len);
1289 if (!skb) {
Mitch Williams420136c2013-12-18 13:45:59 +00001290 rx_ring->rx_stats.alloc_buff_failed++;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001291 goto no_buffers;
1292 }
1293 /* initialize queue mapping */
1294 skb_record_rx_queue(skb, rx_ring->queue_index);
1295 bi->skb = skb;
1296 }
1297
1298 if (!bi->dma) {
1299 bi->dma = dma_map_single(rx_ring->dev,
1300 skb->data,
1301 rx_ring->rx_buf_len,
1302 DMA_FROM_DEVICE);
1303 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
Mitch Williams420136c2013-12-18 13:45:59 +00001304 rx_ring->rx_stats.alloc_buff_failed++;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001305 bi->dma = 0;
1306 goto no_buffers;
1307 }
1308 }
1309
Mitch Williamsa132af22015-01-24 09:58:35 +00001310 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
1311 rx_desc->read.hdr_addr = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001312 i++;
1313 if (i == rx_ring->count)
1314 i = 0;
1315 }
1316
1317no_buffers:
1318 if (rx_ring->next_to_use != i)
1319 i40e_release_rx_desc(rx_ring, i);
1320}
1321
1322/**
1323 * i40e_receive_skb - Send a completed packet up the stack
1324 * @rx_ring: rx ring in play
1325 * @skb: packet to send up
1326 * @vlan_tag: vlan tag for packet
1327 **/
1328static void i40e_receive_skb(struct i40e_ring *rx_ring,
1329 struct sk_buff *skb, u16 vlan_tag)
1330{
1331 struct i40e_q_vector *q_vector = rx_ring->q_vector;
1332 struct i40e_vsi *vsi = rx_ring->vsi;
1333 u64 flags = vsi->back->flags;
1334
1335 if (vlan_tag & VLAN_VID_MASK)
1336 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1337
1338 if (flags & I40E_FLAG_IN_NETPOLL)
1339 netif_rx(skb);
1340 else
1341 napi_gro_receive(&q_vector->napi, skb);
1342}
1343
1344/**
1345 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1346 * @vsi: the VSI we care about
1347 * @skb: skb currently being received and modified
1348 * @rx_status: status value of last descriptor in packet
1349 * @rx_error: error value of last descriptor in packet
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001350 * @rx_ptype: ptype value of last descriptor in packet
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001351 **/
1352static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1353 struct sk_buff *skb,
1354 u32 rx_status,
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001355 u32 rx_error,
1356 u16 rx_ptype)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001357{
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001358 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
1359 bool ipv4 = false, ipv6 = false;
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001360 bool ipv4_tunnel, ipv6_tunnel;
1361 __wsum rx_udp_csum;
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001362 struct iphdr *iph;
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001363 __sum16 csum;
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001364
Anjali Singhai Jainf8faaa42015-02-24 06:58:48 +00001365 ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
1366 (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
1367 ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
1368 (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001369
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001370 skb->ip_summed = CHECKSUM_NONE;
1371
1372 /* Rx csum enabled and ip headers found? */
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001373 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001374 return;
1375
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001376 /* did the hardware decode the packet and checksum? */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001377 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001378 return;
1379
1380 /* both known and outer_ip must be set for the below code to work */
1381 if (!(decoded.known && decoded.outer_ip))
1382 return;
1383
1384 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1385 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
1386 ipv4 = true;
1387 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1388 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
1389 ipv6 = true;
1390
1391 if (ipv4 &&
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001392 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1393 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001394 goto checksum_fail;
1395
Jesse Brandeburgddf1d0d2014-02-13 03:48:39 -08001396 /* likely incorrect csum if alternate IP extension headers found */
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001397 if (ipv6 &&
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001398 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001399 /* don't increment checksum err here, non-fatal err */
Shannon Nelson8ee75a82013-12-21 05:44:46 +00001400 return;
1401
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001402 /* there was some L4 error, count error and punt packet to the stack */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001403 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001404 goto checksum_fail;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001405
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001406 /* handle packets that were not able to be checksummed due
1407 * to arrival speed, in this case the stack can compute
1408 * the csum.
1409 */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001410 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001411 return;
1412
1413 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
1414 * it in the driver, hardware does not do it for us.
1415 * Since L3L4P bit was set we assume a valid IHL value (>=5)
1416 * so the total length of IPv4 header is IHL*4 bytes
1417 * The UDP_0 bit *may* bet set if the *inner* header is UDP
1418 */
Anjali Singhai Jain527274c2015-06-05 12:20:31 -04001419 if (!(vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) &&
1420 (ipv4_tunnel)) {
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001421 skb->transport_header = skb->mac_header +
1422 sizeof(struct ethhdr) +
1423 (ip_hdr(skb)->ihl * 4);
1424
1425 /* Add 4 bytes for VLAN tagged packets */
1426 skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
1427 skb->protocol == htons(ETH_P_8021AD))
1428 ? VLAN_HLEN : 0;
1429
Anjali Singhaif6385972014-12-19 02:58:11 +00001430 if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
1431 (udp_hdr(skb)->check != 0)) {
1432 rx_udp_csum = udp_csum(skb);
1433 iph = ip_hdr(skb);
1434 csum = csum_tcpudp_magic(
1435 iph->saddr, iph->daddr,
1436 (skb->len - skb_transport_offset(skb)),
1437 IPPROTO_UDP, rx_udp_csum);
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001438
Anjali Singhaif6385972014-12-19 02:58:11 +00001439 if (udp_hdr(skb)->check != csum)
1440 goto checksum_fail;
1441
1442 } /* else its GRE and so no outer UDP header */
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001443 }
1444
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001445 skb->ip_summed = CHECKSUM_UNNECESSARY;
Tom Herbertfa4ba692014-08-27 21:27:32 -07001446 skb->csum_level = ipv4_tunnel || ipv6_tunnel;
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001447
1448 return;
1449
1450checksum_fail:
1451 vsi->back->hw_csum_rx_error++;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001452}
1453
1454/**
1455 * i40e_rx_hash - returns the hash value from the Rx descriptor
1456 * @ring: descriptor ring
1457 * @rx_desc: specific descriptor
1458 **/
1459static inline u32 i40e_rx_hash(struct i40e_ring *ring,
1460 union i40e_rx_desc *rx_desc)
1461{
Jesse Brandeburg8a494922013-11-20 10:02:49 +00001462 const __le64 rss_mask =
1463 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1464 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1465
1466 if ((ring->netdev->features & NETIF_F_RXHASH) &&
1467 (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
1468 return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1469 else
1470 return 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001471}
1472
1473/**
Jesse Brandeburg206812b2014-02-12 01:45:33 +00001474 * i40e_ptype_to_hash - get a hash type
1475 * @ptype: the ptype value from the descriptor
1476 *
1477 * Returns a hash type to be used by skb_set_hash
1478 **/
1479static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
1480{
1481 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1482
1483 if (!decoded.known)
1484 return PKT_HASH_TYPE_NONE;
1485
1486 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1487 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1488 return PKT_HASH_TYPE_L4;
1489 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1490 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1491 return PKT_HASH_TYPE_L3;
1492 else
1493 return PKT_HASH_TYPE_L2;
1494}
1495
1496/**
Mitch Williamsa132af22015-01-24 09:58:35 +00001497 * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001498 * @rx_ring: rx ring to clean
1499 * @budget: how many cleans we're allowed
1500 *
1501 * Returns true if there's any budget left (e.g. the clean is finished)
1502 **/
Mitch Williamsa132af22015-01-24 09:58:35 +00001503static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001504{
1505 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1506 u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
1507 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1508 const int current_node = numa_node_id();
1509 struct i40e_vsi *vsi = rx_ring->vsi;
1510 u16 i = rx_ring->next_to_clean;
1511 union i40e_rx_desc *rx_desc;
1512 u32 rx_error, rx_status;
Jesse Brandeburg206812b2014-02-12 01:45:33 +00001513 u8 rx_ptype;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001514 u64 qword;
1515
Eric W. Biederman390f86d2014-03-14 17:59:10 -07001516 if (budget <= 0)
1517 return 0;
1518
Mitch Williamsa132af22015-01-24 09:58:35 +00001519 do {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001520 struct i40e_rx_buffer *rx_bi;
1521 struct sk_buff *skb;
1522 u16 vlan_tag;
Mitch Williamsa132af22015-01-24 09:58:35 +00001523 /* return some buffers to hardware, one at a time is too slow */
1524 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1525 i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count);
1526 cleaned_count = 0;
1527 }
1528
1529 i = rx_ring->next_to_clean;
1530 rx_desc = I40E_RX_DESC(rx_ring, i);
1531 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1532 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1533 I40E_RXD_QW1_STATUS_SHIFT;
1534
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001535 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
Mitch Williamsa132af22015-01-24 09:58:35 +00001536 break;
1537
1538 /* This memory barrier is needed to keep us from reading
1539 * any other fields out of the rx_desc until we know the
1540 * DD bit is set.
1541 */
Alexander Duyck67317162015-04-08 18:49:43 -07001542 dma_rmb();
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001543 if (i40e_rx_is_programming_status(qword)) {
1544 i40e_clean_programming_status(rx_ring, rx_desc);
Mitch Williamsa132af22015-01-24 09:58:35 +00001545 I40E_RX_INCREMENT(rx_ring, i);
1546 continue;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001547 }
1548 rx_bi = &rx_ring->rx_bi[i];
1549 skb = rx_bi->skb;
Mitch Williamsa132af22015-01-24 09:58:35 +00001550 if (likely(!skb)) {
1551 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1552 rx_ring->rx_hdr_len);
Jesse Brandeburg8b6ed9c2015-03-31 00:45:01 -07001553 if (!skb) {
Mitch Williamsa132af22015-01-24 09:58:35 +00001554 rx_ring->rx_stats.alloc_buff_failed++;
Jesse Brandeburg8b6ed9c2015-03-31 00:45:01 -07001555 break;
1556 }
1557
Mitch Williamsa132af22015-01-24 09:58:35 +00001558 /* initialize queue mapping */
1559 skb_record_rx_queue(skb, rx_ring->queue_index);
1560 /* we are reusing so sync this buffer for CPU use */
1561 dma_sync_single_range_for_cpu(rx_ring->dev,
1562 rx_bi->dma,
1563 0,
1564 rx_ring->rx_hdr_len,
1565 DMA_FROM_DEVICE);
1566 }
Mitch Williams829af3a2013-12-18 13:46:00 +00001567 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1568 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1569 rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
1570 I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
1571 rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
1572 I40E_RXD_QW1_LENGTH_SPH_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001573
Mitch Williams829af3a2013-12-18 13:46:00 +00001574 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1575 I40E_RXD_QW1_ERROR_SHIFT;
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001576 rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1577 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001578
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001579 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1580 I40E_RXD_QW1_PTYPE_SHIFT;
Mitch Williamsa132af22015-01-24 09:58:35 +00001581 prefetch(rx_bi->page);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001582 rx_bi->skb = NULL;
Mitch Williamsa132af22015-01-24 09:58:35 +00001583 cleaned_count++;
1584 if (rx_hbo || rx_sph) {
1585 int len;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001586 if (rx_hbo)
1587 len = I40E_RX_HDR_SIZE;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001588 else
Mitch Williamsa132af22015-01-24 09:58:35 +00001589 len = rx_header_len;
1590 memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
1591 } else if (skb->len == 0) {
1592 int len;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001593
Mitch Williamsa132af22015-01-24 09:58:35 +00001594 len = (rx_packet_len > skb_headlen(skb) ?
1595 skb_headlen(skb) : rx_packet_len);
1596 memcpy(__skb_put(skb, len),
1597 rx_bi->page + rx_bi->page_offset,
1598 len);
1599 rx_bi->page_offset += len;
1600 rx_packet_len -= len;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001601 }
1602
1603 /* Get the rest of the data if this was a header split */
Mitch Williamsa132af22015-01-24 09:58:35 +00001604 if (rx_packet_len) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001605 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1606 rx_bi->page,
1607 rx_bi->page_offset,
1608 rx_packet_len);
1609
1610 skb->len += rx_packet_len;
1611 skb->data_len += rx_packet_len;
1612 skb->truesize += rx_packet_len;
1613
1614 if ((page_count(rx_bi->page) == 1) &&
1615 (page_to_nid(rx_bi->page) == current_node))
1616 get_page(rx_bi->page);
1617 else
1618 rx_bi->page = NULL;
1619
1620 dma_unmap_page(rx_ring->dev,
1621 rx_bi->page_dma,
1622 PAGE_SIZE / 2,
1623 DMA_FROM_DEVICE);
1624 rx_bi->page_dma = 0;
1625 }
Mitch Williamsa132af22015-01-24 09:58:35 +00001626 I40E_RX_INCREMENT(rx_ring, i);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001627
1628 if (unlikely(
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001629 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001630 struct i40e_rx_buffer *next_buffer;
1631
1632 next_buffer = &rx_ring->rx_bi[i];
Mitch Williamsa132af22015-01-24 09:58:35 +00001633 next_buffer->skb = skb;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001634 rx_ring->rx_stats.non_eop_descs++;
Mitch Williamsa132af22015-01-24 09:58:35 +00001635 continue;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001636 }
1637
1638 /* ERR_MASK will only have valid bits if EOP set */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001639 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001640 dev_kfree_skb_any(skb);
Mitch Williamsa132af22015-01-24 09:58:35 +00001641 continue;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001642 }
1643
Jesse Brandeburg206812b2014-02-12 01:45:33 +00001644 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1645 i40e_ptype_to_hash(rx_ptype));
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00001646 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1647 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1648 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1649 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1650 rx_ring->last_rx_timestamp = jiffies;
1651 }
1652
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001653 /* probably a little skewed due to removing CRC */
1654 total_rx_bytes += skb->len;
1655 total_rx_packets++;
1656
1657 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001658
1659 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1660
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001661 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001662 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1663 : 0;
Vasu Dev38e00432014-08-01 13:27:03 -07001664#ifdef I40E_FCOE
1665 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1666 dev_kfree_skb_any(skb);
Mitch Williamsa132af22015-01-24 09:58:35 +00001667 continue;
Vasu Dev38e00432014-08-01 13:27:03 -07001668 }
1669#endif
Mitch Williamsa132af22015-01-24 09:58:35 +00001670 skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001671 i40e_receive_skb(rx_ring, skb, vlan_tag);
1672
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001673 rx_desc->wb.qword1.status_error_len = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001674
Mitch Williamsa132af22015-01-24 09:58:35 +00001675 } while (likely(total_rx_packets < budget));
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001676
Alexander Duyck980e9b12013-09-28 06:01:03 +00001677 u64_stats_update_begin(&rx_ring->syncp);
Alexander Duycka114d0a2013-09-28 06:00:43 +00001678 rx_ring->stats.packets += total_rx_packets;
1679 rx_ring->stats.bytes += total_rx_bytes;
Alexander Duyck980e9b12013-09-28 06:01:03 +00001680 u64_stats_update_end(&rx_ring->syncp);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001681 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1682 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1683
Mitch Williamsa132af22015-01-24 09:58:35 +00001684 return total_rx_packets;
1685}
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001686
Mitch Williamsa132af22015-01-24 09:58:35 +00001687/**
1688 * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
1689 * @rx_ring: rx ring to clean
1690 * @budget: how many cleans we're allowed
1691 *
1692 * Returns number of packets cleaned
1693 **/
1694static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
1695{
1696 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1697 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1698 struct i40e_vsi *vsi = rx_ring->vsi;
1699 union i40e_rx_desc *rx_desc;
1700 u32 rx_error, rx_status;
1701 u16 rx_packet_len;
1702 u8 rx_ptype;
1703 u64 qword;
1704 u16 i;
1705
1706 do {
1707 struct i40e_rx_buffer *rx_bi;
1708 struct sk_buff *skb;
1709 u16 vlan_tag;
1710 /* return some buffers to hardware, one at a time is too slow */
1711 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1712 i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count);
1713 cleaned_count = 0;
1714 }
1715
1716 i = rx_ring->next_to_clean;
1717 rx_desc = I40E_RX_DESC(rx_ring, i);
1718 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1719 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1720 I40E_RXD_QW1_STATUS_SHIFT;
1721
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001722 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
Mitch Williamsa132af22015-01-24 09:58:35 +00001723 break;
1724
1725 /* This memory barrier is needed to keep us from reading
1726 * any other fields out of the rx_desc until we know the
1727 * DD bit is set.
1728 */
Alexander Duyck67317162015-04-08 18:49:43 -07001729 dma_rmb();
Mitch Williamsa132af22015-01-24 09:58:35 +00001730
1731 if (i40e_rx_is_programming_status(qword)) {
1732 i40e_clean_programming_status(rx_ring, rx_desc);
1733 I40E_RX_INCREMENT(rx_ring, i);
1734 continue;
1735 }
1736 rx_bi = &rx_ring->rx_bi[i];
1737 skb = rx_bi->skb;
1738 prefetch(skb->data);
1739
1740 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1741 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1742
1743 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1744 I40E_RXD_QW1_ERROR_SHIFT;
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001745 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
Mitch Williamsa132af22015-01-24 09:58:35 +00001746
1747 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1748 I40E_RXD_QW1_PTYPE_SHIFT;
1749 rx_bi->skb = NULL;
1750 cleaned_count++;
1751
1752 /* Get the header and possibly the whole packet
1753 * If this is an skb from previous receive dma will be 0
1754 */
1755 skb_put(skb, rx_packet_len);
1756 dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
1757 DMA_FROM_DEVICE);
1758 rx_bi->dma = 0;
1759
1760 I40E_RX_INCREMENT(rx_ring, i);
1761
1762 if (unlikely(
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001763 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
Mitch Williamsa132af22015-01-24 09:58:35 +00001764 rx_ring->rx_stats.non_eop_descs++;
1765 continue;
1766 }
1767
1768 /* ERR_MASK will only have valid bits if EOP set */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001769 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
Mitch Williamsa132af22015-01-24 09:58:35 +00001770 dev_kfree_skb_any(skb);
1771 /* TODO: shouldn't we increment a counter indicating the
1772 * drop?
1773 */
1774 continue;
1775 }
1776
1777 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1778 i40e_ptype_to_hash(rx_ptype));
1779 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1780 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1781 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1782 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1783 rx_ring->last_rx_timestamp = jiffies;
1784 }
1785
1786 /* probably a little skewed due to removing CRC */
1787 total_rx_bytes += skb->len;
1788 total_rx_packets++;
1789
1790 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1791
1792 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1793
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001794 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
Mitch Williamsa132af22015-01-24 09:58:35 +00001795 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1796 : 0;
1797#ifdef I40E_FCOE
1798 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1799 dev_kfree_skb_any(skb);
1800 continue;
1801 }
1802#endif
1803 i40e_receive_skb(rx_ring, skb, vlan_tag);
1804
Mitch Williamsa132af22015-01-24 09:58:35 +00001805 rx_desc->wb.qword1.status_error_len = 0;
1806 } while (likely(total_rx_packets < budget));
1807
1808 u64_stats_update_begin(&rx_ring->syncp);
1809 rx_ring->stats.packets += total_rx_packets;
1810 rx_ring->stats.bytes += total_rx_bytes;
1811 u64_stats_update_end(&rx_ring->syncp);
1812 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1813 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1814
1815 return total_rx_packets;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001816}
1817
1818/**
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001819 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
1820 * @vsi: the VSI we care about
1821 * @q_vector: q_vector for which itr is being updated and interrupt enabled
1822 *
1823 **/
1824static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
1825 struct i40e_q_vector *q_vector)
1826{
1827 struct i40e_hw *hw = &vsi->back->hw;
1828 u16 old_itr;
1829 int vector;
1830 u32 val;
1831
1832 vector = (q_vector->v_idx + vsi->base_vector);
1833 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
1834 old_itr = q_vector->rx.itr;
1835 i40e_set_new_dynamic_itr(&q_vector->rx);
1836 if (old_itr != q_vector->rx.itr) {
1837 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1838 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1839 (I40E_RX_ITR <<
1840 I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1841 (q_vector->rx.itr <<
1842 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
1843 } else {
1844 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1845 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1846 (I40E_ITR_NONE <<
1847 I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
1848 }
1849 if (!test_bit(__I40E_DOWN, &vsi->state))
1850 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
1851 } else {
1852 i40e_irq_dynamic_enable(vsi,
1853 q_vector->v_idx + vsi->base_vector);
1854 }
1855 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
1856 old_itr = q_vector->tx.itr;
1857 i40e_set_new_dynamic_itr(&q_vector->tx);
1858 if (old_itr != q_vector->tx.itr) {
1859 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1860 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1861 (I40E_TX_ITR <<
1862 I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1863 (q_vector->tx.itr <<
1864 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
1865 } else {
1866 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1867 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1868 (I40E_ITR_NONE <<
1869 I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
1870 }
1871 if (!test_bit(__I40E_DOWN, &vsi->state))
1872 wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->v_idx +
1873 vsi->base_vector - 1), val);
1874 } else {
1875 i40e_irq_dynamic_enable(vsi,
1876 q_vector->v_idx + vsi->base_vector);
1877 }
1878}
1879
1880/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001881 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1882 * @napi: napi struct with our devices info in it
1883 * @budget: amount of work driver is allowed to do this pass, in packets
1884 *
1885 * This function will clean all queues associated with a q_vector.
1886 *
1887 * Returns the amount of work done
1888 **/
1889int i40e_napi_poll(struct napi_struct *napi, int budget)
1890{
1891 struct i40e_q_vector *q_vector =
1892 container_of(napi, struct i40e_q_vector, napi);
1893 struct i40e_vsi *vsi = q_vector->vsi;
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001894 struct i40e_ring *ring;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001895 bool clean_complete = true;
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001896 bool arm_wb = false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001897 int budget_per_ring;
Mitch Williamsa132af22015-01-24 09:58:35 +00001898 int cleaned;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001899
1900 if (test_bit(__I40E_DOWN, &vsi->state)) {
1901 napi_complete(napi);
1902 return 0;
1903 }
1904
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001905 /* Since the actual Tx work is minimal, we can give the Tx a larger
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001906 * budget and be more aggressive about cleaning up the Tx descriptors.
1907 */
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001908 i40e_for_each_ring(ring, q_vector->tx) {
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001909 clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001910 arm_wb |= ring->arm_wb;
1911 }
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001912
1913 /* We attempt to distribute budget to each Rx queue fairly, but don't
1914 * allow the budget to go below 1 because that would exit polling early.
1915 */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001916 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00001917
Mitch Williamsa132af22015-01-24 09:58:35 +00001918 i40e_for_each_ring(ring, q_vector->rx) {
1919 if (ring_is_ps_enabled(ring))
1920 cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
1921 else
1922 cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
1923 /* if we didn't clean as many as budgeted, we must be done */
1924 clean_complete &= (budget_per_ring != cleaned);
1925 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001926
1927 /* If work not completed, return budget and polling will return */
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001928 if (!clean_complete) {
1929 if (arm_wb)
1930 i40e_force_wb(vsi, q_vector);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001931 return budget;
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00001932 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001933
Anjali Singhai Jain8e0764b2015-06-05 12:20:30 -04001934 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
1935 q_vector->arm_wb_state = false;
1936
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001937 /* Work is done so exit the polling mode and re-enable the interrupt */
1938 napi_complete(napi);
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001939 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
1940 i40e_update_enable_itr(vsi, q_vector);
1941 } else { /* Legacy mode */
1942 struct i40e_hw *hw = &vsi->back->hw;
1943 /* We re-enable the queue 0 cause, but
1944 * don't worry about dynamic_enable
1945 * because we left it on for the other
1946 * possible interrupts during napi
1947 */
1948 u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) |
1949 I40E_QINT_RQCTL_CAUSE_ENA_MASK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001950
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001951 wr32(hw, I40E_QINT_RQCTL(0), qval);
1952 qval = rd32(hw, I40E_QINT_TQCTL(0)) |
1953 I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1954 wr32(hw, I40E_QINT_TQCTL(0), qval);
1955 i40e_irq_dynamic_enable_icr0(vsi->back);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001956 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001957 return 0;
1958}
1959
1960/**
1961 * i40e_atr - Add a Flow Director ATR filter
1962 * @tx_ring: ring to add programming descriptor to
1963 * @skb: send buffer
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04001964 * @tx_flags: send tx flags
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001965 * @protocol: wire protocol
1966 **/
1967static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04001968 u32 tx_flags, __be16 protocol)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001969{
1970 struct i40e_filter_program_desc *fdir_desc;
1971 struct i40e_pf *pf = tx_ring->vsi->back;
1972 union {
1973 unsigned char *network;
1974 struct iphdr *ipv4;
1975 struct ipv6hdr *ipv6;
1976 } hdr;
1977 struct tcphdr *th;
1978 unsigned int hlen;
1979 u32 flex_ptype, dtype_cmd;
Alexander Duyckfc4ac672013-09-28 06:00:22 +00001980 u16 i;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001981
1982 /* make sure ATR is enabled */
Jesse Brandeburg60ea5f82014-01-17 15:36:34 -08001983 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001984 return;
1985
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00001986 if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1987 return;
1988
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001989 /* if sampling is disabled do nothing */
1990 if (!tx_ring->atr_sample_rate)
1991 return;
1992
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04001993 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001994 return;
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04001995
1996 if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) {
1997 /* snag network header to get L4 type and address */
1998 hdr.network = skb_network_header(skb);
1999
2000 /* Currently only IPv4/IPv6 with TCP is supported
2001 * access ihl as u8 to avoid unaligned access on ia64
2002 */
2003 if (tx_flags & I40E_TX_FLAGS_IPV4)
2004 hlen = (hdr.network[0] & 0x0F) << 2;
2005 else if (protocol == htons(ETH_P_IPV6))
2006 hlen = sizeof(struct ipv6hdr);
2007 else
2008 return;
2009 } else {
2010 hdr.network = skb_inner_network_header(skb);
2011 hlen = skb_inner_network_header_len(skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002012 }
2013
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002014 /* Currently only IPv4/IPv6 with TCP is supported
2015 * Note: tx_flags gets modified to reflect inner protocols in
2016 * tx_enable_csum function if encap is enabled.
2017 */
2018 if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
2019 (hdr.ipv4->protocol != IPPROTO_TCP))
2020 return;
2021 else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
2022 (hdr.ipv6->nexthdr != IPPROTO_TCP))
2023 return;
2024
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002025 th = (struct tcphdr *)(hdr.network + hlen);
2026
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00002027 /* Due to lack of space, no more new filters can be programmed */
2028 if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
2029 return;
Anjali Singhai Jain52eb95e2015-06-05 12:20:33 -04002030 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) {
2031 /* HW ATR eviction will take care of removing filters on FIN
2032 * and RST packets.
2033 */
2034 if (th->fin || th->rst)
2035 return;
2036 }
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00002037
2038 tx_ring->atr_count++;
2039
Anjali Singhai Jaince806782014-03-06 08:59:54 +00002040 /* sample on all syn/fin/rst packets or once every atr sample rate */
2041 if (!th->fin &&
2042 !th->syn &&
2043 !th->rst &&
2044 (tx_ring->atr_count < tx_ring->atr_sample_rate))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002045 return;
2046
2047 tx_ring->atr_count = 0;
2048
2049 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002050 i = tx_ring->next_to_use;
2051 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2052
2053 i++;
2054 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002055
2056 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2057 I40E_TXD_FLTR_QW0_QINDEX_MASK;
2058 flex_ptype |= (protocol == htons(ETH_P_IP)) ?
2059 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2060 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2061 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2062 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2063
2064 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2065
2066 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2067
Anjali Singhai Jaince806782014-03-06 08:59:54 +00002068 dtype_cmd |= (th->fin || th->rst) ?
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002069 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2070 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2071 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2072 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2073
2074 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2075 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2076
2077 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2078 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2079
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +00002080 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
Anjali Singhai Jain60ccd452015-04-16 20:06:01 -04002081 if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL))
2082 dtype_cmd |=
2083 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2084 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2085 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2086 else
2087 dtype_cmd |=
2088 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2089 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2090 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +00002091
Anjali Singhai Jain52eb95e2015-06-05 12:20:33 -04002092 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
2093 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2094
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002095 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
Jesse Brandeburg99753ea2014-06-04 04:22:49 +00002096 fdir_desc->rsvd = cpu_to_le32(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002097 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
Jesse Brandeburg99753ea2014-06-04 04:22:49 +00002098 fdir_desc->fd_id = cpu_to_le32(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002099}
2100
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002101/**
2102 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2103 * @skb: send buffer
2104 * @tx_ring: ring to send buffer on
2105 * @flags: the tx flags to be set
2106 *
2107 * Checks the skb and set up correspondingly several generic transmit flags
2108 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2109 *
2110 * Returns error code indicate the frame should be dropped upon error and the
2111 * otherwise returns 0 to indicate the flags has been set properly.
2112 **/
Vasu Dev38e00432014-08-01 13:27:03 -07002113#ifdef I40E_FCOE
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002114inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002115 struct i40e_ring *tx_ring,
2116 u32 *flags)
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002117#else
2118static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2119 struct i40e_ring *tx_ring,
2120 u32 *flags)
Vasu Dev38e00432014-08-01 13:27:03 -07002121#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002122{
2123 __be16 protocol = skb->protocol;
2124 u32 tx_flags = 0;
2125
Greg Rose31eaacc2015-03-31 00:45:03 -07002126 if (protocol == htons(ETH_P_8021Q) &&
2127 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2128 /* When HW VLAN acceleration is turned off by the user the
2129 * stack sets the protocol to 8021q so that the driver
2130 * can take any steps required to support the SW only
2131 * VLAN handling. In our case the driver doesn't need
2132 * to take any further steps so just set the protocol
2133 * to the encapsulated ethertype.
2134 */
2135 skb->protocol = vlan_get_protocol(skb);
2136 goto out;
2137 }
2138
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002139 /* if we have a HW VLAN tag being added, default to the HW one */
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01002140 if (skb_vlan_tag_present(skb)) {
2141 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002142 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2143 /* else if it is a SW VLAN, check the next protocol and store the tag */
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00002144 } else if (protocol == htons(ETH_P_8021Q)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002145 struct vlan_hdr *vhdr, _vhdr;
2146 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2147 if (!vhdr)
2148 return -EINVAL;
2149
2150 protocol = vhdr->h_vlan_encapsulated_proto;
2151 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2152 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2153 }
2154
Neerav Parikhd40d00b2015-02-24 06:58:40 +00002155 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2156 goto out;
2157
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002158 /* Insert 802.1p priority into VLAN header */
Vasu Dev38e00432014-08-01 13:27:03 -07002159 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2160 (skb->priority != TC_PRIO_CONTROL)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002161 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2162 tx_flags |= (skb->priority & 0x7) <<
2163 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2164 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2165 struct vlan_ethhdr *vhdr;
Francois Romieudd225bc2014-03-30 03:14:48 +00002166 int rc;
2167
2168 rc = skb_cow_head(skb, 0);
2169 if (rc < 0)
2170 return rc;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002171 vhdr = (struct vlan_ethhdr *)skb->data;
2172 vhdr->h_vlan_TCI = htons(tx_flags >>
2173 I40E_TX_FLAGS_VLAN_SHIFT);
2174 } else {
2175 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2176 }
2177 }
Neerav Parikhd40d00b2015-02-24 06:58:40 +00002178
2179out:
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002180 *flags = tx_flags;
2181 return 0;
2182}
2183
2184/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002185 * i40e_tso - set up the tso context descriptor
2186 * @tx_ring: ptr to the ring to send
2187 * @skb: ptr to the skb we're sending
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002188 * @hdr_len: ptr to the size of the packet header
2189 * @cd_tunneling: ptr to context descriptor bits
2190 *
2191 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2192 **/
2193static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002194 u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
2195 u32 *cd_tunneling)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002196{
2197 u32 cd_cmd, cd_tso_len, cd_mss;
Francois Romieudd225bc2014-03-30 03:14:48 +00002198 struct ipv6hdr *ipv6h;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002199 struct tcphdr *tcph;
2200 struct iphdr *iph;
2201 u32 l4len;
2202 int err;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002203
2204 if (!skb_is_gso(skb))
2205 return 0;
2206
Francois Romieudd225bc2014-03-30 03:14:48 +00002207 err = skb_cow_head(skb, 0);
2208 if (err < 0)
2209 return err;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002210
Anjali Singhaidf230752014-12-19 02:58:16 +00002211 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
2212 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
2213
2214 if (iph->version == 4) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002215 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
2216 iph->tot_len = 0;
2217 iph->check = 0;
2218 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
2219 0, IPPROTO_TCP, 0);
Anjali Singhaidf230752014-12-19 02:58:16 +00002220 } else if (ipv6h->version == 6) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002221 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
2222 ipv6h->payload_len = 0;
2223 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
2224 0, IPPROTO_TCP, 0);
2225 }
2226
2227 l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
2228 *hdr_len = (skb->encapsulation
2229 ? (skb_inner_transport_header(skb) - skb->data)
2230 : skb_transport_offset(skb)) + l4len;
2231
2232 /* find the field values */
2233 cd_cmd = I40E_TX_CTX_DESC_TSO;
2234 cd_tso_len = skb->len - *hdr_len;
2235 cd_mss = skb_shinfo(skb)->gso_size;
Mitch Williams829af3a2013-12-18 13:46:00 +00002236 *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2237 ((u64)cd_tso_len <<
2238 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2239 ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002240 return 1;
2241}
2242
2243/**
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002244 * i40e_tsyn - set up the tsyn context descriptor
2245 * @tx_ring: ptr to the ring to send
2246 * @skb: ptr to the skb we're sending
2247 * @tx_flags: the collected send information
2248 *
2249 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2250 **/
2251static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2252 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2253{
2254 struct i40e_pf *pf;
2255
2256 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2257 return 0;
2258
2259 /* Tx timestamps cannot be sampled when doing TSO */
2260 if (tx_flags & I40E_TX_FLAGS_TSO)
2261 return 0;
2262
2263 /* only timestamp the outbound packet if the user has requested it and
2264 * we are not already transmitting a packet to be timestamped
2265 */
2266 pf = i40e_netdev_to_pf(tx_ring->netdev);
Jacob Keller22b47772014-12-14 01:55:09 +00002267 if (!(pf->flags & I40E_FLAG_PTP))
2268 return 0;
2269
Jakub Kicinski9ce34f02014-03-15 14:55:42 +00002270 if (pf->ptp_tx &&
2271 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002272 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2273 pf->ptp_tx_skb = skb_get(skb);
2274 } else {
2275 return 0;
2276 }
2277
2278 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
2279 I40E_TXD_CTX_QW1_CMD_SHIFT;
2280
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002281 return 1;
2282}
2283
2284/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002285 * i40e_tx_enable_csum - Enable Tx checksum offloads
2286 * @skb: send buffer
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002287 * @tx_flags: pointer to Tx flags currently set
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002288 * @td_cmd: Tx descriptor command bits to set
2289 * @td_offset: Tx descriptor header offsets to set
2290 * @cd_tunneling: ptr to context desc bits
2291 **/
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002292static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002293 u32 *td_cmd, u32 *td_offset,
2294 struct i40e_ring *tx_ring,
2295 u32 *cd_tunneling)
2296{
2297 struct ipv6hdr *this_ipv6_hdr;
2298 unsigned int this_tcp_hdrlen;
2299 struct iphdr *this_ip_hdr;
2300 u32 network_hdr_len;
2301 u8 l4_hdr = 0;
Anjali Singhai Jain527274c2015-06-05 12:20:31 -04002302 struct udphdr *oudph;
2303 struct iphdr *oiph;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002304 u32 l4_tunnel = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002305
2306 if (skb->encapsulation) {
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002307 switch (ip_hdr(skb)->protocol) {
2308 case IPPROTO_UDP:
Anjali Singhai Jain527274c2015-06-05 12:20:31 -04002309 oudph = udp_hdr(skb);
2310 oiph = ip_hdr(skb);
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002311 l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002312 *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002313 break;
2314 default:
2315 return;
2316 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002317 network_hdr_len = skb_inner_network_header_len(skb);
2318 this_ip_hdr = inner_ip_hdr(skb);
2319 this_ipv6_hdr = inner_ipv6_hdr(skb);
2320 this_tcp_hdrlen = inner_tcp_hdrlen(skb);
2321
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002322 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2323 if (*tx_flags & I40E_TX_FLAGS_TSO) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002324 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
2325 ip_hdr(skb)->check = 0;
2326 } else {
2327 *cd_tunneling |=
2328 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2329 }
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002330 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
Anjali Singhaidf230752014-12-19 02:58:16 +00002331 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002332 if (*tx_flags & I40E_TX_FLAGS_TSO)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002333 ip_hdr(skb)->check = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002334 }
2335
2336 /* Now set the ctx descriptor fields */
2337 *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002338 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
2339 l4_tunnel |
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002340 ((skb_inner_network_offset(skb) -
2341 skb_transport_offset(skb)) >> 1) <<
2342 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
Anjali Singhaidf230752014-12-19 02:58:16 +00002343 if (this_ip_hdr->version == 6) {
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002344 *tx_flags &= ~I40E_TX_FLAGS_IPV4;
2345 *tx_flags |= I40E_TX_FLAGS_IPV6;
Anjali Singhaidf230752014-12-19 02:58:16 +00002346 }
Anjali Singhai Jain527274c2015-06-05 12:20:31 -04002347 if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
2348 (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) &&
2349 (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
2350 oudph->check = ~csum_tcpudp_magic(oiph->saddr,
2351 oiph->daddr,
2352 (skb->len - skb_transport_offset(skb)),
2353 IPPROTO_UDP, 0);
2354 *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2355 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002356 } else {
2357 network_hdr_len = skb_network_header_len(skb);
2358 this_ip_hdr = ip_hdr(skb);
2359 this_ipv6_hdr = ipv6_hdr(skb);
2360 this_tcp_hdrlen = tcp_hdrlen(skb);
2361 }
2362
2363 /* Enable IP checksum offloads */
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002364 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002365 l4_hdr = this_ip_hdr->protocol;
2366 /* the stack computes the IP header already, the only time we
2367 * need the hardware to recompute it is in the case of TSO.
2368 */
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002369 if (*tx_flags & I40E_TX_FLAGS_TSO) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002370 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
2371 this_ip_hdr->check = 0;
2372 } else {
2373 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
2374 }
2375 /* Now set the td_offset for IP header length */
2376 *td_offset = (network_hdr_len >> 2) <<
2377 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002378 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002379 l4_hdr = this_ipv6_hdr->nexthdr;
2380 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
2381 /* Now set the td_offset for IP header length */
2382 *td_offset = (network_hdr_len >> 2) <<
2383 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2384 }
2385 /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
2386 *td_offset |= (skb_network_offset(skb) >> 1) <<
2387 I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
2388
2389 /* Enable L4 checksum offloads */
2390 switch (l4_hdr) {
2391 case IPPROTO_TCP:
2392 /* enable checksum offloads */
2393 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
2394 *td_offset |= (this_tcp_hdrlen >> 2) <<
2395 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2396 break;
2397 case IPPROTO_SCTP:
2398 /* enable SCTP checksum offload */
2399 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
2400 *td_offset |= (sizeof(struct sctphdr) >> 2) <<
2401 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2402 break;
2403 case IPPROTO_UDP:
2404 /* enable UDP checksum offload */
2405 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
2406 *td_offset |= (sizeof(struct udphdr) >> 2) <<
2407 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2408 break;
2409 default:
2410 break;
2411 }
2412}
2413
2414/**
2415 * i40e_create_tx_ctx Build the Tx context descriptor
2416 * @tx_ring: ring to create the descriptor on
2417 * @cd_type_cmd_tso_mss: Quad Word 1
2418 * @cd_tunneling: Quad Word 0 - bits 0-31
2419 * @cd_l2tag2: Quad Word 0 - bits 32-63
2420 **/
2421static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
2422 const u64 cd_type_cmd_tso_mss,
2423 const u32 cd_tunneling, const u32 cd_l2tag2)
2424{
2425 struct i40e_tx_context_desc *context_desc;
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002426 int i = tx_ring->next_to_use;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002427
Jesse Brandeburgff40dd52014-02-14 02:14:41 +00002428 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
2429 !cd_tunneling && !cd_l2tag2)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002430 return;
2431
2432 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002433 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
2434
2435 i++;
2436 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002437
2438 /* cpu_to_le32 and assign to struct fields */
2439 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2440 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
Jesse Brandeburg3efbbb22014-06-04 20:41:54 +00002441 context_desc->rsvd = cpu_to_le16(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002442 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2443}
2444
2445/**
Eric Dumazet4567dc12014-10-07 13:30:23 -07002446 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2447 * @tx_ring: the ring to be checked
2448 * @size: the size buffer we want to assure is available
2449 *
2450 * Returns -EBUSY if a stop is needed, else 0
2451 **/
2452static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2453{
2454 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2455 /* Memory barrier before checking head and tail */
2456 smp_mb();
2457
2458 /* Check again in a case another CPU has just made room available. */
2459 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2460 return -EBUSY;
2461
2462 /* A reprieve! - use start_queue because it doesn't call schedule */
2463 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2464 ++tx_ring->tx_stats.restart_queue;
2465 return 0;
2466}
2467
2468/**
2469 * i40e_maybe_stop_tx - 1st level check for tx stop conditions
2470 * @tx_ring: the ring to be checked
2471 * @size: the size buffer we want to assure is available
2472 *
2473 * Returns 0 if stop is not needed
2474 **/
2475#ifdef I40E_FCOE
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002476inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
Eric Dumazet4567dc12014-10-07 13:30:23 -07002477#else
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002478static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
Eric Dumazet4567dc12014-10-07 13:30:23 -07002479#endif
2480{
2481 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
2482 return 0;
2483 return __i40e_maybe_stop_tx(tx_ring, size);
2484}
2485
2486/**
Anjali Singhai71da6192015-02-21 06:42:35 +00002487 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
2488 * @skb: send buffer
2489 * @tx_flags: collected send information
Anjali Singhai71da6192015-02-21 06:42:35 +00002490 *
2491 * Note: Our HW can't scatter-gather more than 8 fragments to build
2492 * a packet on the wire and so we need to figure out the cases where we
2493 * need to linearize the skb.
2494 **/
Anjali Singhai Jain30520832015-05-08 15:35:52 -07002495static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
Anjali Singhai71da6192015-02-21 06:42:35 +00002496{
2497 struct skb_frag_struct *frag;
2498 bool linearize = false;
2499 unsigned int size = 0;
2500 u16 num_frags;
2501 u16 gso_segs;
2502
2503 num_frags = skb_shinfo(skb)->nr_frags;
2504 gso_segs = skb_shinfo(skb)->gso_segs;
2505
2506 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
Anjali Singhai Jain30520832015-05-08 15:35:52 -07002507 u16 j = 0;
Anjali Singhai71da6192015-02-21 06:42:35 +00002508
2509 if (num_frags < (I40E_MAX_BUFFER_TXD))
2510 goto linearize_chk_done;
2511 /* try the simple math, if we have too many frags per segment */
2512 if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
2513 I40E_MAX_BUFFER_TXD) {
2514 linearize = true;
2515 goto linearize_chk_done;
2516 }
2517 frag = &skb_shinfo(skb)->frags[0];
Anjali Singhai71da6192015-02-21 06:42:35 +00002518 /* we might still have more fragments per segment */
2519 do {
2520 size += skb_frag_size(frag);
2521 frag++; j++;
Anjali Singhai Jain30520832015-05-08 15:35:52 -07002522 if ((size >= skb_shinfo(skb)->gso_size) &&
2523 (j < I40E_MAX_BUFFER_TXD)) {
2524 size = (size % skb_shinfo(skb)->gso_size);
2525 j = (size) ? 1 : 0;
2526 }
Anjali Singhai71da6192015-02-21 06:42:35 +00002527 if (j == I40E_MAX_BUFFER_TXD) {
Anjali Singhai Jain30520832015-05-08 15:35:52 -07002528 linearize = true;
2529 break;
Anjali Singhai71da6192015-02-21 06:42:35 +00002530 }
2531 num_frags--;
2532 } while (num_frags);
2533 } else {
2534 if (num_frags >= I40E_MAX_BUFFER_TXD)
2535 linearize = true;
2536 }
2537
2538linearize_chk_done:
2539 return linearize;
2540}
2541
2542/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002543 * i40e_tx_map - Build the Tx descriptor
2544 * @tx_ring: ring to send buffer on
2545 * @skb: send buffer
2546 * @first: first buffer info buffer to use
2547 * @tx_flags: collected send information
2548 * @hdr_len: size of the packet header
2549 * @td_cmd: the command field in the descriptor
2550 * @td_offset: offset for checksum or crc
2551 **/
Vasu Dev38e00432014-08-01 13:27:03 -07002552#ifdef I40E_FCOE
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002553inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002554 struct i40e_tx_buffer *first, u32 tx_flags,
2555 const u8 hdr_len, u32 td_cmd, u32 td_offset)
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002556#else
2557static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2558 struct i40e_tx_buffer *first, u32 tx_flags,
2559 const u8 hdr_len, u32 td_cmd, u32 td_offset)
Vasu Dev38e00432014-08-01 13:27:03 -07002560#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002561{
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002562 unsigned int data_len = skb->data_len;
2563 unsigned int size = skb_headlen(skb);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002564 struct skb_frag_struct *frag;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002565 struct i40e_tx_buffer *tx_bi;
2566 struct i40e_tx_desc *tx_desc;
Alexander Duycka5e9c572013-09-28 06:00:27 +00002567 u16 i = tx_ring->next_to_use;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002568 u32 td_tag = 0;
2569 dma_addr_t dma;
2570 u16 gso_segs;
2571
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002572 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
2573 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
2574 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
2575 I40E_TX_FLAGS_VLAN_SHIFT;
2576 }
2577
Alexander Duycka5e9c572013-09-28 06:00:27 +00002578 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
2579 gso_segs = skb_shinfo(skb)->gso_segs;
2580 else
2581 gso_segs = 1;
2582
2583 /* multiply data chunks by size of headers */
2584 first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
2585 first->gso_segs = gso_segs;
2586 first->skb = skb;
2587 first->tx_flags = tx_flags;
2588
2589 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2590
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002591 tx_desc = I40E_TX_DESC(tx_ring, i);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002592 tx_bi = first;
2593
2594 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2595 if (dma_mapping_error(tx_ring->dev, dma))
2596 goto dma_error;
2597
2598 /* record length, and DMA address */
2599 dma_unmap_len_set(tx_bi, len, size);
2600 dma_unmap_addr_set(tx_bi, dma, dma);
2601
2602 tx_desc->buffer_addr = cpu_to_le64(dma);
2603
2604 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002605 tx_desc->cmd_type_offset_bsz =
2606 build_ctob(td_cmd, td_offset,
2607 I40E_MAX_DATA_PER_TXD, td_tag);
2608
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002609 tx_desc++;
2610 i++;
2611 if (i == tx_ring->count) {
2612 tx_desc = I40E_TX_DESC(tx_ring, 0);
2613 i = 0;
2614 }
Alexander Duycka5e9c572013-09-28 06:00:27 +00002615
2616 dma += I40E_MAX_DATA_PER_TXD;
2617 size -= I40E_MAX_DATA_PER_TXD;
2618
2619 tx_desc->buffer_addr = cpu_to_le64(dma);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002620 }
2621
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002622 if (likely(!data_len))
2623 break;
2624
Alexander Duycka5e9c572013-09-28 06:00:27 +00002625 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2626 size, td_tag);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002627
2628 tx_desc++;
2629 i++;
2630 if (i == tx_ring->count) {
2631 tx_desc = I40E_TX_DESC(tx_ring, 0);
2632 i = 0;
2633 }
2634
Alexander Duycka5e9c572013-09-28 06:00:27 +00002635 size = skb_frag_size(frag);
2636 data_len -= size;
2637
2638 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2639 DMA_TO_DEVICE);
2640
2641 tx_bi = &tx_ring->tx_bi[i];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002642 }
2643
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +00002644 /* Place RS bit on last descriptor of any packet that spans across the
2645 * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
2646 */
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +00002647 if (((i & WB_STRIDE) != WB_STRIDE) &&
2648 (first <= &tx_ring->tx_bi[i]) &&
2649 (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
2650 tx_desc->cmd_type_offset_bsz =
2651 build_ctob(td_cmd, td_offset, size, td_tag) |
2652 cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
2653 I40E_TXD_QW1_CMD_SHIFT);
2654 } else {
2655 tx_desc->cmd_type_offset_bsz =
2656 build_ctob(td_cmd, td_offset, size, td_tag) |
2657 cpu_to_le64((u64)I40E_TXD_CMD <<
2658 I40E_TXD_QW1_CMD_SHIFT);
2659 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002660
Alexander Duyck7070ce02013-09-28 06:00:37 +00002661 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
2662 tx_ring->queue_index),
2663 first->bytecount);
2664
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002665 /* Force memory writes to complete before letting h/w
2666 * know there are new descriptors to fetch. (Only
2667 * applicable for weak-ordered memory model archs,
2668 * such as IA-64).
2669 */
2670 wmb();
2671
Alexander Duycka5e9c572013-09-28 06:00:27 +00002672 /* set next_to_watch value indicating a packet is present */
2673 first->next_to_watch = tx_desc;
2674
2675 i++;
2676 if (i == tx_ring->count)
2677 i = 0;
2678
2679 tx_ring->next_to_use = i;
2680
Eric Dumazet4567dc12014-10-07 13:30:23 -07002681 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002682 /* notify HW of packet */
Eric Dumazet4567dc12014-10-07 13:30:23 -07002683 if (!skb->xmit_more ||
2684 netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2685 tx_ring->queue_index)))
2686 writel(i, tx_ring->tail);
Jesse Brandeburg489ce7a2015-04-27 14:57:08 -04002687 else
2688 prefetchw(tx_desc + 1);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002689
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002690 return;
2691
2692dma_error:
Alexander Duycka5e9c572013-09-28 06:00:27 +00002693 dev_info(tx_ring->dev, "TX DMA map failed\n");
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002694
2695 /* clear dma mappings for failed tx_bi map */
2696 for (;;) {
2697 tx_bi = &tx_ring->tx_bi[i];
Alexander Duycka5e9c572013-09-28 06:00:27 +00002698 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002699 if (tx_bi == first)
2700 break;
2701 if (i == 0)
2702 i = tx_ring->count;
2703 i--;
2704 }
2705
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002706 tx_ring->next_to_use = i;
2707}
2708
2709/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002710 * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
2711 * @skb: send buffer
2712 * @tx_ring: ring to send buffer on
2713 *
2714 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
2715 * there is not enough descriptors available in this ring since we need at least
2716 * one descriptor.
2717 **/
Vasu Dev38e00432014-08-01 13:27:03 -07002718#ifdef I40E_FCOE
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002719inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002720 struct i40e_ring *tx_ring)
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002721#else
2722static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
2723 struct i40e_ring *tx_ring)
Vasu Dev38e00432014-08-01 13:27:03 -07002724#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002725{
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002726 unsigned int f;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002727 int count = 0;
2728
2729 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2730 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
Jesse Brandeburgbe560522014-02-06 05:51:13 +00002731 * + 4 desc gap to avoid the cache line where head is,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002732 * + 1 desc for context descriptor,
2733 * otherwise try next time
2734 */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002735 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2736 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
Jesse Brandeburg980093e2014-05-10 04:49:12 +00002737
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002738 count += TXD_USE_COUNT(skb_headlen(skb));
Jesse Brandeburgbe560522014-02-06 05:51:13 +00002739 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002740 tx_ring->tx_stats.tx_busy++;
2741 return 0;
2742 }
2743 return count;
2744}
2745
2746/**
2747 * i40e_xmit_frame_ring - Sends buffer on Tx ring
2748 * @skb: send buffer
2749 * @tx_ring: ring to send buffer on
2750 *
2751 * Returns NETDEV_TX_OK if sent, else an error code
2752 **/
2753static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2754 struct i40e_ring *tx_ring)
2755{
2756 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
2757 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2758 struct i40e_tx_buffer *first;
2759 u32 td_offset = 0;
2760 u32 tx_flags = 0;
2761 __be16 protocol;
2762 u32 td_cmd = 0;
2763 u8 hdr_len = 0;
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002764 int tsyn;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002765 int tso;
2766 if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
2767 return NETDEV_TX_BUSY;
2768
2769 /* prepare the xmit flags */
2770 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2771 goto out_drop;
2772
2773 /* obtain protocol of skb */
Vlad Yasevich3d34dd02014-08-25 10:34:52 -04002774 protocol = vlan_get_protocol(skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002775
2776 /* record the location of the first descriptor for this packet */
2777 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2778
2779 /* setup IPv4/IPv6 offloads */
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00002780 if (protocol == htons(ETH_P_IP))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002781 tx_flags |= I40E_TX_FLAGS_IPV4;
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00002782 else if (protocol == htons(ETH_P_IPV6))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002783 tx_flags |= I40E_TX_FLAGS_IPV6;
2784
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002785 tso = i40e_tso(tx_ring, skb, &hdr_len,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002786 &cd_type_cmd_tso_mss, &cd_tunneling);
2787
2788 if (tso < 0)
2789 goto out_drop;
2790 else if (tso)
2791 tx_flags |= I40E_TX_FLAGS_TSO;
2792
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002793 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
2794
2795 if (tsyn)
2796 tx_flags |= I40E_TX_FLAGS_TSYN;
2797
Anjali Singhai Jain30520832015-05-08 15:35:52 -07002798 if (i40e_chk_linearize(skb, tx_flags))
Anjali Singhai71da6192015-02-21 06:42:35 +00002799 if (skb_linearize(skb))
2800 goto out_drop;
2801
Jakub Kicinski259afec2014-03-15 14:55:37 +00002802 skb_tx_timestamp(skb);
2803
Alexander Duyckb1941302013-09-28 06:00:32 +00002804 /* always enable CRC insertion offload */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002805 td_cmd |= I40E_TX_DESC_CMD_ICRC;
2806
Alexander Duyckb1941302013-09-28 06:00:32 +00002807 /* Always offload the checksum, since it's in the data descriptor */
2808 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2809 tx_flags |= I40E_TX_FLAGS_CSUM;
2810
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002811 i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002812 tx_ring, &cd_tunneling);
Alexander Duyckb1941302013-09-28 06:00:32 +00002813 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002814
2815 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2816 cd_tunneling, cd_l2tag2);
2817
2818 /* Add Flow Director ATR if it's enabled.
2819 *
2820 * NOTE: this must always be directly before the data descriptor.
2821 */
2822 i40e_atr(tx_ring, skb, tx_flags, protocol);
2823
2824 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2825 td_cmd, td_offset);
2826
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002827 return NETDEV_TX_OK;
2828
2829out_drop:
2830 dev_kfree_skb_any(skb);
2831 return NETDEV_TX_OK;
2832}
2833
2834/**
2835 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2836 * @skb: send buffer
2837 * @netdev: network interface device structure
2838 *
2839 * Returns NETDEV_TX_OK if sent, else an error code
2840 **/
2841netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2842{
2843 struct i40e_netdev_priv *np = netdev_priv(netdev);
2844 struct i40e_vsi *vsi = np->vsi;
Alexander Duyck9f65e152013-09-28 06:00:58 +00002845 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002846
2847 /* hardware can't handle really short frames, hardware padding works
2848 * beyond this point
2849 */
Alexander Duycka94d9e22014-12-03 08:17:39 -08002850 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
2851 return NETDEV_TX_OK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002852
2853 return i40e_xmit_frame_ring(skb, tx_ring);
2854}