blob: 9f2c9f1b8e061bb0f56175cc2cb7eb435d7ed0ee [file] [log] [blame]
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -08004 * Copyright(c) 2013 - 2016 Intel Corporation.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
Greg Rosedc641b72013-12-18 13:45:51 +000015 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000017 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
Mitch Williams1c112a62014-04-04 04:43:06 +000027#include <linux/prefetch.h>
Mitch Williamsa132af22015-01-24 09:58:35 +000028#include <net/busy_poll.h>
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000029#include "i40e.h"
Jesse Brandeburg206812b2014-02-12 01:45:33 +000030#include "i40e_prototype.h"
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +000031
32static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
33 u32 td_tag)
34{
35 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
36 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
37 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
38 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
39 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
40}
41
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +000042#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
Alexander Duyck5e02f282016-09-12 14:18:41 -070043/**
44 * i40e_fdir - Generate a Flow Director descriptor based on fdata
45 * @tx_ring: Tx ring to send buffer on
46 * @fdata: Flow director filter data
47 * @add: Indicate if we are adding a rule or deleting one
48 *
49 **/
50static void i40e_fdir(struct i40e_ring *tx_ring,
51 struct i40e_fdir_filter *fdata, bool add)
52{
53 struct i40e_filter_program_desc *fdir_desc;
54 struct i40e_pf *pf = tx_ring->vsi->back;
55 u32 flex_ptype, dtype_cmd;
56 u16 i;
57
58 /* grab the next descriptor */
59 i = tx_ring->next_to_use;
60 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
61
62 i++;
63 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
64
65 flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
66 (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
67
68 flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
69 (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
70
71 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
72 (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
73
Jacob Keller0e588de2017-02-06 14:38:50 -080074 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
75 (fdata->flex_offset << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
76
Alexander Duyck5e02f282016-09-12 14:18:41 -070077 /* Use LAN VSI Id if not programmed by user */
78 flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
79 ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
80 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
81
82 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
83
84 dtype_cmd |= add ?
85 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
86 I40E_TXD_FLTR_QW1_PCMD_SHIFT :
87 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
88 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
89
90 dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
91 (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
92
93 dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
94 (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
95
96 if (fdata->cnt_index) {
97 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
98 dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
99 ((u32)fdata->cnt_index <<
100 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
101 }
102
103 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
104 fdir_desc->rsvd = cpu_to_le32(0);
105 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
106 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
107}
108
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000109#define I40E_FD_CLEAN_DELAY 10
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000110/**
111 * i40e_program_fdir_filter - Program a Flow Director filter
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000112 * @fdir_data: Packet data that will be filter parameters
113 * @raw_packet: the pre-allocated packet buffer for FDir
Jeff Kirsherb40c82e62015-02-27 09:18:34 +0000114 * @pf: The PF pointer
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000115 * @add: True for add/update, False for remove
116 **/
Alexander Duyck1eb846a2016-09-12 14:18:42 -0700117static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
118 u8 *raw_packet, struct i40e_pf *pf,
119 bool add)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000120{
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000121 struct i40e_tx_buffer *tx_buf, *first;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000122 struct i40e_tx_desc *tx_desc;
123 struct i40e_ring *tx_ring;
124 struct i40e_vsi *vsi;
125 struct device *dev;
126 dma_addr_t dma;
127 u32 td_cmd = 0;
128 u16 i;
129
130 /* find existing FDIR VSI */
Alexander Duyck4b816442016-10-11 15:26:53 -0700131 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000132 if (!vsi)
133 return -ENOENT;
134
Alexander Duyck9f65e152013-09-28 06:00:58 +0000135 tx_ring = vsi->tx_rings[0];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000136 dev = tx_ring->dev;
137
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000138 /* we need two descriptors to add/del a filter and we can wait */
Alexander Duycked245402016-09-14 16:24:32 -0700139 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
140 if (!i)
141 return -EAGAIN;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000142 msleep_interruptible(1);
Alexander Duycked245402016-09-14 16:24:32 -0700143 }
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000144
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000145 dma = dma_map_single(dev, raw_packet,
146 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000147 if (dma_mapping_error(dev, dma))
148 goto dma_fail;
149
150 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000151 i = tx_ring->next_to_use;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000152 first = &tx_ring->tx_bi[i];
Alexander Duyck5e02f282016-09-12 14:18:41 -0700153 i40e_fdir(tx_ring, fdir_data, add);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000154
155 /* Now program a dummy descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000156 i = tx_ring->next_to_use;
157 tx_desc = I40E_TX_DESC(tx_ring, i);
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000158 tx_buf = &tx_ring->tx_bi[i];
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000159
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000160 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
161
162 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000163
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000164 /* record length, and DMA address */
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000165 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
Anjali Singhai Jain298deef2013-11-28 06:39:33 +0000166 dma_unmap_addr_set(tx_buf, dma, dma);
167
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000168 tx_desc->buffer_addr = cpu_to_le64(dma);
Jesse Brandeburgeaefbd02013-09-28 07:13:54 +0000169 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000170
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000171 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
172 tx_buf->raw_buf = (void *)raw_packet;
173
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000174 tx_desc->cmd_type_offset_bsz =
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000175 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000176
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000177 /* Force memory writes to complete before letting h/w
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000178 * know there are new descriptors to fetch.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000179 */
180 wmb();
181
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000182 /* Mark the data descriptor to be watched */
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000183 first->next_to_watch = tx_desc;
Alexander Duyckfc4ac672013-09-28 06:00:22 +0000184
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000185 writel(tx_ring->next_to_use, tx_ring->tail);
186 return 0;
187
188dma_fail:
189 return -1;
190}
191
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000192#define IP_HEADER_OFFSET 14
193#define I40E_UDPIP_DUMMY_PACKET_LEN 42
194/**
195 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
196 * @vsi: pointer to the targeted VSI
197 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000198 * @add: true adds a filter, false removes it
199 *
200 * Returns 0 if the filters were successfully added or removed
201 **/
202static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
203 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000204 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000205{
206 struct i40e_pf *pf = vsi->back;
207 struct udphdr *udp;
208 struct iphdr *ip;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000209 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000210 int ret;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000211 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
212 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
213 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
214
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000215 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
216 if (!raw_packet)
217 return -ENOMEM;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000218 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
219
220 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
221 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
222 + sizeof(struct iphdr));
223
Jacob Keller8ce43dc2017-02-06 14:38:39 -0800224 ip->daddr = fd_data->dst_ip;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000225 udp->dest = fd_data->dst_port;
Jacob Keller8ce43dc2017-02-06 14:38:39 -0800226 ip->saddr = fd_data->src_ip;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000227 udp->source = fd_data->src_port;
228
Jacob Keller0e588de2017-02-06 14:38:50 -0800229 if (fd_data->flex_filter) {
230 u8 *payload = raw_packet + I40E_UDPIP_DUMMY_PACKET_LEN;
231 __be16 pattern = fd_data->flex_word;
232 u16 off = fd_data->flex_offset;
233
234 *((__force __be16 *)(payload + off)) = pattern;
235 }
236
Kevin Scottb2d36c02014-04-09 05:58:59 +0000237 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
238 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
239 if (ret) {
240 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000241 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
242 fd_data->pctype, fd_data->fd_id, ret);
Jacob Kellere5187ee2017-02-06 14:38:41 -0800243 /* Free the packet buffer since it wasn't added to the ring */
244 kfree(raw_packet);
245 return -EOPNOTSUPP;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000246 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000247 if (add)
248 dev_info(&pf->pdev->dev,
249 "Filter OK for PCTYPE %d loc = %d\n",
250 fd_data->pctype, fd_data->fd_id);
251 else
252 dev_info(&pf->pdev->dev,
253 "Filter deleted for PCTYPE %d loc = %d\n",
254 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000255 }
Kiran Patila42e7a32015-11-06 15:26:03 -0800256
Jacob Keller097dbf52017-02-06 14:38:46 -0800257 if (add)
258 pf->fd_udp4_filter_cnt++;
259 else
260 pf->fd_udp4_filter_cnt--;
261
Jacob Kellere5187ee2017-02-06 14:38:41 -0800262 return 0;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000263}
264
265#define I40E_TCPIP_DUMMY_PACKET_LEN 54
266/**
267 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
268 * @vsi: pointer to the targeted VSI
269 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000270 * @add: true adds a filter, false removes it
271 *
272 * Returns 0 if the filters were successfully added or removed
273 **/
274static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
275 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000276 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000277{
278 struct i40e_pf *pf = vsi->back;
279 struct tcphdr *tcp;
280 struct iphdr *ip;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000281 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000282 int ret;
283 /* Dummy packet */
284 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
285 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
286 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
287 0x0, 0x72, 0, 0, 0, 0};
288
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000289 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
290 if (!raw_packet)
291 return -ENOMEM;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000292 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
293
294 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
295 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
296 + sizeof(struct iphdr));
297
Jacob Keller8ce43dc2017-02-06 14:38:39 -0800298 ip->daddr = fd_data->dst_ip;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000299 tcp->dest = fd_data->dst_port;
Jacob Keller8ce43dc2017-02-06 14:38:39 -0800300 ip->saddr = fd_data->src_ip;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000301 tcp->source = fd_data->src_port;
302
Jacob Keller0e588de2017-02-06 14:38:50 -0800303 if (fd_data->flex_filter) {
304 u8 *payload = raw_packet + I40E_TCPIP_DUMMY_PACKET_LEN;
305 __be16 pattern = fd_data->flex_word;
306 u16 off = fd_data->flex_offset;
307
308 *((__force __be16 *)(payload + off)) = pattern;
309 }
310
Kevin Scottb2d36c02014-04-09 05:58:59 +0000311 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000312 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000313 if (ret) {
314 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000315 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
316 fd_data->pctype, fd_data->fd_id, ret);
Jacob Kellere5187ee2017-02-06 14:38:41 -0800317 /* Free the packet buffer since it wasn't added to the ring */
318 kfree(raw_packet);
319 return -EOPNOTSUPP;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000320 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000321 if (add)
322 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
323 fd_data->pctype, fd_data->fd_id);
324 else
325 dev_info(&pf->pdev->dev,
326 "Filter deleted for PCTYPE %d loc = %d\n",
327 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000328 }
329
Jacob Keller377cc242017-02-06 14:38:42 -0800330 if (add) {
Jacob Keller097dbf52017-02-06 14:38:46 -0800331 pf->fd_tcp4_filter_cnt++;
Jacob Keller377cc242017-02-06 14:38:42 -0800332 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
333 I40E_DEBUG_FD & pf->hw.debug_mask)
334 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
335 pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
336 } else {
Jacob Keller097dbf52017-02-06 14:38:46 -0800337 pf->fd_tcp4_filter_cnt--;
338 if (pf->fd_tcp4_filter_cnt == 0) {
Jacob Keller377cc242017-02-06 14:38:42 -0800339 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
340 I40E_DEBUG_FD & pf->hw.debug_mask)
341 dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
342 pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
343 }
344 }
345
Jacob Kellere5187ee2017-02-06 14:38:41 -0800346 return 0;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000347}
348
Jacob Kellerf223c872017-02-06 14:38:51 -0800349#define I40E_SCTPIP_DUMMY_PACKET_LEN 46
350/**
351 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
352 * a specific flow spec
353 * @vsi: pointer to the targeted VSI
354 * @fd_data: the flow director data required for the FDir descriptor
355 * @add: true adds a filter, false removes it
356 *
357 * Returns 0 if the filters were successfully added or removed
358 **/
359static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
360 struct i40e_fdir_filter *fd_data,
361 bool add)
362{
363 struct i40e_pf *pf = vsi->back;
364 struct sctphdr *sctp;
365 struct iphdr *ip;
366 u8 *raw_packet;
367 int ret;
368 /* Dummy packet */
369 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
370 0x45, 0, 0, 0x20, 0, 0, 0x40, 0, 0x40, 0x84, 0, 0, 0, 0, 0, 0,
371 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
372
373 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
374 if (!raw_packet)
375 return -ENOMEM;
376 memcpy(raw_packet, packet, I40E_SCTPIP_DUMMY_PACKET_LEN);
377
378 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
379 sctp = (struct sctphdr *)(raw_packet + IP_HEADER_OFFSET
380 + sizeof(struct iphdr));
381
382 ip->daddr = fd_data->dst_ip;
383 sctp->dest = fd_data->dst_port;
384 ip->saddr = fd_data->src_ip;
385 sctp->source = fd_data->src_port;
386
387 if (fd_data->flex_filter) {
388 u8 *payload = raw_packet + I40E_SCTPIP_DUMMY_PACKET_LEN;
389 __be16 pattern = fd_data->flex_word;
390 u16 off = fd_data->flex_offset;
391
392 *((__force __be16 *)(payload + off)) = pattern;
393 }
394
395 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
396 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
397 if (ret) {
398 dev_info(&pf->pdev->dev,
399 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
400 fd_data->pctype, fd_data->fd_id, ret);
401 /* Free the packet buffer since it wasn't added to the ring */
402 kfree(raw_packet);
403 return -EOPNOTSUPP;
404 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
405 if (add)
406 dev_info(&pf->pdev->dev,
407 "Filter OK for PCTYPE %d loc = %d\n",
408 fd_data->pctype, fd_data->fd_id);
409 else
410 dev_info(&pf->pdev->dev,
411 "Filter deleted for PCTYPE %d loc = %d\n",
412 fd_data->pctype, fd_data->fd_id);
413 }
414
415 if (add)
416 pf->fd_sctp4_filter_cnt++;
417 else
418 pf->fd_sctp4_filter_cnt--;
419
420 return 0;
421}
422
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000423#define I40E_IP_DUMMY_PACKET_LEN 34
424/**
425 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
426 * a specific flow spec
427 * @vsi: pointer to the targeted VSI
428 * @fd_data: the flow director data required for the FDir descriptor
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000429 * @add: true adds a filter, false removes it
430 *
431 * Returns 0 if the filters were successfully added or removed
432 **/
433static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
434 struct i40e_fdir_filter *fd_data,
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000435 bool add)
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000436{
437 struct i40e_pf *pf = vsi->back;
438 struct iphdr *ip;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000439 u8 *raw_packet;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000440 int ret;
441 int i;
442 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
443 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
444 0, 0, 0, 0};
445
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000446 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
447 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000448 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
449 if (!raw_packet)
450 return -ENOMEM;
451 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
452 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
453
Jacob Keller8ce43dc2017-02-06 14:38:39 -0800454 ip->saddr = fd_data->src_ip;
455 ip->daddr = fd_data->dst_ip;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000456 ip->protocol = 0;
457
Jacob Keller0e588de2017-02-06 14:38:50 -0800458 if (fd_data->flex_filter) {
459 u8 *payload = raw_packet + I40E_IP_DUMMY_PACKET_LEN;
460 __be16 pattern = fd_data->flex_word;
461 u16 off = fd_data->flex_offset;
462
463 *((__force __be16 *)(payload + off)) = pattern;
464 }
465
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000466 fd_data->pctype = i;
467 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000468 if (ret) {
469 dev_info(&pf->pdev->dev,
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000470 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
471 fd_data->pctype, fd_data->fd_id, ret);
Jacob Kellere5187ee2017-02-06 14:38:41 -0800472 /* The packet buffer wasn't added to the ring so we
473 * need to free it now.
474 */
475 kfree(raw_packet);
476 return -EOPNOTSUPP;
Anjali Singhai Jain4205d372015-02-27 09:15:27 +0000477 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000478 if (add)
479 dev_info(&pf->pdev->dev,
480 "Filter OK for PCTYPE %d loc = %d\n",
481 fd_data->pctype, fd_data->fd_id);
482 else
483 dev_info(&pf->pdev->dev,
484 "Filter deleted for PCTYPE %d loc = %d\n",
485 fd_data->pctype, fd_data->fd_id);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000486 }
487 }
488
Jacob Keller097dbf52017-02-06 14:38:46 -0800489 if (add)
490 pf->fd_ip4_filter_cnt++;
491 else
492 pf->fd_ip4_filter_cnt--;
493
Jacob Kellere5187ee2017-02-06 14:38:41 -0800494 return 0;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000495}
496
497/**
498 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
499 * @vsi: pointer to the targeted VSI
500 * @cmd: command to get or set RX flow classification rules
501 * @add: true adds a filter, false removes it
502 *
503 **/
504int i40e_add_del_fdir(struct i40e_vsi *vsi,
505 struct i40e_fdir_filter *input, bool add)
506{
507 struct i40e_pf *pf = vsi->back;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000508 int ret;
509
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000510 switch (input->flow_type & ~FLOW_EXT) {
511 case TCP_V4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000512 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000513 break;
514 case UDP_V4_FLOW:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000515 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000516 break;
Jacob Kellerf223c872017-02-06 14:38:51 -0800517 case SCTP_V4_FLOW:
518 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
519 break;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000520 case IP_USER_FLOW:
521 switch (input->ip4_proto) {
522 case IPPROTO_TCP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000523 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000524 break;
525 case IPPROTO_UDP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000526 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000527 break;
Jacob Kellerf223c872017-02-06 14:38:51 -0800528 case IPPROTO_SCTP:
529 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
530 break;
Alexander Duycke1da71c2016-09-14 16:24:35 -0700531 case IPPROTO_IP:
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000532 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000533 break;
Alexander Duycke1da71c2016-09-14 16:24:35 -0700534 default:
535 /* We cannot support masking based on protocol */
536 goto unsupported_flow;
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000537 }
538 break;
539 default:
Alexander Duycke1da71c2016-09-14 16:24:35 -0700540unsupported_flow:
Jakub Kicinskic5ffe7e2014-04-02 10:33:22 +0000541 dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000542 input->flow_type);
543 ret = -EINVAL;
544 }
545
Jacob Kellera158aea2017-02-09 23:44:27 -0800546 /* The buffer allocated here will be normally be freed by
547 * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit
548 * completion. In the event of an error adding the buffer to the FDIR
549 * ring, it will immediately be freed. It may also be freed by
550 * i40e_clean_tx_ring() when closing the VSI.
551 */
Joseph Gasparakis17a73f62014-02-12 01:45:30 +0000552 return ret;
553}
554
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000555/**
556 * i40e_fd_handle_status - check the Programming Status for FD
557 * @rx_ring: the Rx ring for this descriptor
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000558 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000559 * @prog_id: the id originally used for programming
560 *
561 * This is used to verify if the FD programming or invalidation
562 * requested by SW to the HW is successful or not and take actions accordingly.
563 **/
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000564static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
565 union i40e_rx_desc *rx_desc, u8 prog_id)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000566{
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000567 struct i40e_pf *pf = rx_ring->vsi->back;
568 struct pci_dev *pdev = pf->pdev;
569 u32 fcnt_prog, fcnt_avail;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000570 u32 error;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000571 u64 qw;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000572
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000573 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000574 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
575 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
576
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400577 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
Carolyn Wyborny3487b6c2015-08-27 11:42:38 -0400578 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
Anjali Singhai Jainf7233c52014-07-09 07:46:16 +0000579 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
580 (I40E_DEBUG_FD & pf->hw.debug_mask))
581 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
Carolyn Wyborny3487b6c2015-08-27 11:42:38 -0400582 pf->fd_inv);
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000583
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000584 /* Check if the programming error is for ATR.
585 * If so, auto disable ATR and set a state for
586 * flush in progress. Next time we come here if flush is in
587 * progress do nothing, once flush is complete the state will
588 * be cleared.
589 */
590 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
591 return;
592
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000593 pf->fd_add_err++;
594 /* store the current atr filter count */
595 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
596
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000597 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
Harshitha Ramamurthyb77ac972017-02-03 10:57:42 -0800598 (pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED)) {
599 pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000600 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
601 }
602
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000603 /* filter programming failed most likely due to table full */
Anjali Singhai Jain04294e32015-02-27 09:15:28 +0000604 fcnt_prog = i40e_get_global_fd_count(pf);
Anjali Singhai Jain12957382014-06-04 04:22:47 +0000605 fcnt_avail = pf->fdir_pf_filter_count;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000606 /* If ATR is running fcnt_prog can quickly change,
607 * if we are very close to full, it makes sense to disable
608 * FD ATR/SB and then re-enable it when there is room.
609 */
610 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
Anjali Singhai Jain1e1be8f2014-07-10 08:03:26 +0000611 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
Harshitha Ramamurthyb77ac972017-02-03 10:57:42 -0800612 !(pf->hw_disabled_flags &
Anjali Singhai Jainb814ba62014-06-04 20:41:48 +0000613 I40E_FLAG_FD_SB_ENABLED)) {
Anjali Singhai Jain2e4875e2015-04-16 20:06:06 -0400614 if (I40E_DEBUG_FD & pf->hw.debug_mask)
615 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
Harshitha Ramamurthyb77ac972017-02-03 10:57:42 -0800616 pf->hw_disabled_flags |=
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000617 I40E_FLAG_FD_SB_ENABLED;
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000618 }
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000619 }
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400620 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
Anjali Singhai Jain13c28842014-03-06 09:00:04 +0000621 if (I40E_DEBUG_FD & pf->hw.debug_mask)
Carolyn Wybornye99bdd32014-07-09 07:46:12 +0000622 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
Anjali Singhai Jain13c28842014-03-06 09:00:04 +0000623 rx_desc->wb.qword0.hi_dword.fd_id);
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +0000624 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000625}
626
627/**
Alexander Duycka5e9c572013-09-28 06:00:27 +0000628 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000629 * @ring: the ring that owns the buffer
630 * @tx_buffer: the buffer to free
631 **/
Alexander Duycka5e9c572013-09-28 06:00:27 +0000632static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
633 struct i40e_tx_buffer *tx_buffer)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000634{
Alexander Duycka5e9c572013-09-28 06:00:27 +0000635 if (tx_buffer->skb) {
Alexander Duyck64bfd682016-09-12 14:18:39 -0700636 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
637 kfree(tx_buffer->raw_buf);
638 else
639 dev_kfree_skb_any(tx_buffer->skb);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000640 if (dma_unmap_len(tx_buffer, len))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000641 dma_unmap_single(ring->dev,
Alexander Duyck35a1e2a2013-09-28 06:00:17 +0000642 dma_unmap_addr(tx_buffer, dma),
643 dma_unmap_len(tx_buffer, len),
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000644 DMA_TO_DEVICE);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000645 } else if (dma_unmap_len(tx_buffer, len)) {
646 dma_unmap_page(ring->dev,
647 dma_unmap_addr(tx_buffer, dma),
648 dma_unmap_len(tx_buffer, len),
649 DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000650 }
Kiran Patila42e7a32015-11-06 15:26:03 -0800651
Alexander Duycka5e9c572013-09-28 06:00:27 +0000652 tx_buffer->next_to_watch = NULL;
653 tx_buffer->skb = NULL;
Alexander Duyck35a1e2a2013-09-28 06:00:17 +0000654 dma_unmap_len_set(tx_buffer, len, 0);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000655 /* tx_buffer must be completely set up in the transmit path */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000656}
657
658/**
659 * i40e_clean_tx_ring - Free any empty Tx buffers
660 * @tx_ring: ring to be cleaned
661 **/
662void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
663{
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000664 unsigned long bi_size;
665 u16 i;
666
667 /* ring already cleared, nothing to do */
668 if (!tx_ring->tx_bi)
669 return;
670
671 /* Free all the Tx ring sk_buffs */
Alexander Duycka5e9c572013-09-28 06:00:27 +0000672 for (i = 0; i < tx_ring->count; i++)
673 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000674
675 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
676 memset(tx_ring->tx_bi, 0, bi_size);
677
678 /* Zero out the descriptor ring */
679 memset(tx_ring->desc, 0, tx_ring->size);
680
681 tx_ring->next_to_use = 0;
682 tx_ring->next_to_clean = 0;
Alexander Duyck7070ce02013-09-28 06:00:37 +0000683
684 if (!tx_ring->netdev)
685 return;
686
687 /* cleanup Tx queue statistics */
Alexander Duycke486bdf2016-09-12 14:18:40 -0700688 netdev_tx_reset_queue(txring_txq(tx_ring));
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000689}
690
691/**
692 * i40e_free_tx_resources - Free Tx resources per queue
693 * @tx_ring: Tx descriptor ring for a specific queue
694 *
695 * Free all transmit software resources
696 **/
697void i40e_free_tx_resources(struct i40e_ring *tx_ring)
698{
699 i40e_clean_tx_ring(tx_ring);
700 kfree(tx_ring->tx_bi);
701 tx_ring->tx_bi = NULL;
702
703 if (tx_ring->desc) {
704 dma_free_coherent(tx_ring->dev, tx_ring->size,
705 tx_ring->desc, tx_ring->dma);
706 tx_ring->desc = NULL;
707 }
708}
709
Jesse Brandeburga68de582015-02-24 05:26:03 +0000710/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000711 * i40e_get_tx_pending - how many tx descriptors not processed
712 * @tx_ring: the ring of descriptors
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800713 * @in_sw: is tx_pending being checked in SW or HW
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000714 *
715 * Since there is no access to the ring head register
716 * in XL710, we need to use our local copies
717 **/
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800718u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000719{
Jesse Brandeburga68de582015-02-24 05:26:03 +0000720 u32 head, tail;
721
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800722 if (!in_sw)
723 head = i40e_get_head(ring);
724 else
725 head = ring->next_to_clean;
Jesse Brandeburga68de582015-02-24 05:26:03 +0000726 tail = readl(ring->tail);
727
728 if (head != tail)
729 return (head < tail) ?
730 tail - head : (tail + ring->count - head);
731
732 return 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000733}
734
Alexander Duyck1dc8b532016-10-11 15:26:54 -0700735#define WB_STRIDE 4
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000736
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000737/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000738 * i40e_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duycka619afe2016-03-07 09:30:03 -0800739 * @vsi: the VSI we care about
740 * @tx_ring: Tx ring to clean
741 * @napi_budget: Used to determine if we are in netpoll
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000742 *
743 * Returns true if there's any budget left (e.g. the clean is finished)
744 **/
Alexander Duycka619afe2016-03-07 09:30:03 -0800745static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
746 struct i40e_ring *tx_ring, int napi_budget)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000747{
748 u16 i = tx_ring->next_to_clean;
749 struct i40e_tx_buffer *tx_buf;
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000750 struct i40e_tx_desc *tx_head;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000751 struct i40e_tx_desc *tx_desc;
Alexander Duycka619afe2016-03-07 09:30:03 -0800752 unsigned int total_bytes = 0, total_packets = 0;
753 unsigned int budget = vsi->work_limit;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000754
755 tx_buf = &tx_ring->tx_bi[i];
756 tx_desc = I40E_TX_DESC(tx_ring, i);
Alexander Duycka5e9c572013-09-28 06:00:27 +0000757 i -= tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000758
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000759 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
760
Alexander Duycka5e9c572013-09-28 06:00:27 +0000761 do {
762 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000763
764 /* if next_to_watch is not set then there is no work pending */
765 if (!eop_desc)
766 break;
767
Alexander Duycka5e9c572013-09-28 06:00:27 +0000768 /* prevent any other reads prior to eop_desc */
769 read_barrier_depends();
770
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000771 /* we have caught up to head, no work left to do */
772 if (tx_head == tx_desc)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000773 break;
774
Alexander Duyckc304fda2013-09-28 06:00:12 +0000775 /* clear next_to_watch to prevent false hangs */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000776 tx_buf->next_to_watch = NULL;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000777
Alexander Duycka5e9c572013-09-28 06:00:27 +0000778 /* update the statistics for this packet */
779 total_bytes += tx_buf->bytecount;
780 total_packets += tx_buf->gso_segs;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000781
Alexander Duycka5e9c572013-09-28 06:00:27 +0000782 /* free the skb */
Alexander Duycka619afe2016-03-07 09:30:03 -0800783 napi_consume_skb(tx_buf->skb, napi_budget);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000784
Alexander Duycka5e9c572013-09-28 06:00:27 +0000785 /* unmap skb header data */
786 dma_unmap_single(tx_ring->dev,
787 dma_unmap_addr(tx_buf, dma),
788 dma_unmap_len(tx_buf, len),
789 DMA_TO_DEVICE);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000790
Alexander Duycka5e9c572013-09-28 06:00:27 +0000791 /* clear tx_buffer data */
792 tx_buf->skb = NULL;
793 dma_unmap_len_set(tx_buf, len, 0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000794
Alexander Duycka5e9c572013-09-28 06:00:27 +0000795 /* unmap remaining buffers */
796 while (tx_desc != eop_desc) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000797
798 tx_buf++;
799 tx_desc++;
800 i++;
Alexander Duycka5e9c572013-09-28 06:00:27 +0000801 if (unlikely(!i)) {
802 i -= tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000803 tx_buf = tx_ring->tx_bi;
804 tx_desc = I40E_TX_DESC(tx_ring, 0);
805 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000806
Alexander Duycka5e9c572013-09-28 06:00:27 +0000807 /* unmap any remaining paged data */
808 if (dma_unmap_len(tx_buf, len)) {
809 dma_unmap_page(tx_ring->dev,
810 dma_unmap_addr(tx_buf, dma),
811 dma_unmap_len(tx_buf, len),
812 DMA_TO_DEVICE);
813 dma_unmap_len_set(tx_buf, len, 0);
814 }
815 }
816
817 /* move us one more past the eop_desc for start of next pkt */
818 tx_buf++;
819 tx_desc++;
820 i++;
821 if (unlikely(!i)) {
822 i -= tx_ring->count;
823 tx_buf = tx_ring->tx_bi;
824 tx_desc = I40E_TX_DESC(tx_ring, 0);
825 }
826
Jesse Brandeburg016890b2015-02-27 09:15:31 +0000827 prefetch(tx_desc);
828
Alexander Duycka5e9c572013-09-28 06:00:27 +0000829 /* update budget accounting */
830 budget--;
831 } while (likely(budget));
832
833 i += tx_ring->count;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000834 tx_ring->next_to_clean = i;
Alexander Duyck980e9b12013-09-28 06:01:03 +0000835 u64_stats_update_begin(&tx_ring->syncp);
Alexander Duycka114d0a2013-09-28 06:00:43 +0000836 tx_ring->stats.bytes += total_bytes;
837 tx_ring->stats.packets += total_packets;
Alexander Duyck980e9b12013-09-28 06:01:03 +0000838 u64_stats_update_end(&tx_ring->syncp);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000839 tx_ring->q_vector->tx.total_bytes += total_bytes;
840 tx_ring->q_vector->tx.total_packets += total_packets;
Alexander Duycka5e9c572013-09-28 06:00:27 +0000841
Anjali Singhai58044742015-09-25 18:26:13 -0700842 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
Anjali Singhai58044742015-09-25 18:26:13 -0700843 /* check to see if there are < 4 descriptors
844 * waiting to be written back, then kick the hardware to force
845 * them to be written back in case we stay in NAPI.
846 * In this mode on X722 we do not enable Interrupt.
847 */
Mitch Williams88dc9e62016-06-20 09:10:35 -0700848 unsigned int j = i40e_get_tx_pending(tx_ring, false);
Anjali Singhai58044742015-09-25 18:26:13 -0700849
850 if (budget &&
Alexander Duyck1dc8b532016-10-11 15:26:54 -0700851 ((j / WB_STRIDE) == 0) && (j > 0) &&
Alexander Duycka619afe2016-03-07 09:30:03 -0800852 !test_bit(__I40E_DOWN, &vsi->state) &&
Anjali Singhai58044742015-09-25 18:26:13 -0700853 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
854 tx_ring->arm_wb = true;
855 }
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000856
Alexander Duycke486bdf2016-09-12 14:18:40 -0700857 /* notify netdev of completed buffers */
858 netdev_tx_completed_queue(txring_txq(tx_ring),
Alexander Duyck7070ce02013-09-28 06:00:37 +0000859 total_packets, total_bytes);
860
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000861#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
862 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
863 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
864 /* Make sure that anybody stopping the queue after this
865 * sees the new next_to_clean.
866 */
867 smp_mb();
868 if (__netif_subqueue_stopped(tx_ring->netdev,
869 tx_ring->queue_index) &&
Alexander Duycka619afe2016-03-07 09:30:03 -0800870 !test_bit(__I40E_DOWN, &vsi->state)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000871 netif_wake_subqueue(tx_ring->netdev,
872 tx_ring->queue_index);
873 ++tx_ring->tx_stats.restart_queue;
874 }
875 }
876
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000877 return !!budget;
878}
879
880/**
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -0800881 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
882 * @vsi: the VSI we care about
883 * @q_vector: the vector on which to enable writeback
884 *
885 **/
886static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
887 struct i40e_q_vector *q_vector)
888{
889 u16 flags = q_vector->tx.ring[0].flags;
890 u32 val;
891
892 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
893 return;
894
895 if (q_vector->arm_wb_state)
896 return;
897
898 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
899 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
900 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
901
902 wr32(&vsi->back->hw,
903 I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
904 val);
905 } else {
906 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
907 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
908
909 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
910 }
911 q_vector->arm_wb_state = true;
912}
913
914/**
915 * i40e_force_wb - Issue SW Interrupt so HW does a wb
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000916 * @vsi: the VSI we care about
917 * @q_vector: the vector on which to force writeback
918 *
919 **/
Kiran Patilb03a8c12015-09-24 18:13:15 -0400920void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
Jesse Brandeburgd91649f2015-01-07 02:55:01 +0000921{
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -0800922 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
Anjali Singhai Jain8e0764b2015-06-05 12:20:30 -0400923 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
924 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
925 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
926 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
927 /* allow 00 to be written to the index */
928
929 wr32(&vsi->back->hw,
930 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
931 vsi->base_vector - 1), val);
932 } else {
933 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
934 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
935 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
936 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
937 /* allow 00 to be written to the index */
938
939 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
940 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000941}
942
943/**
944 * i40e_set_new_dynamic_itr - Find new ITR level
945 * @rc: structure containing ring performance data
946 *
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400947 * Returns true if ITR changed, false if not
948 *
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000949 * Stores a new ITR value based on packets and byte counts during
950 * the last interrupt. The advantage of per interrupt computation
951 * is faster updates and more accurate ITR for the current traffic
952 * pattern. Constants in this function were computed based on
953 * theoretical maximum wire speed and thresholds were set based on
954 * testing data as well as attempting to minimize response time
955 * while increasing bulk throughput.
956 **/
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400957static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000958{
959 enum i40e_latency_range new_latency_range = rc->latency_range;
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400960 struct i40e_q_vector *qv = rc->ring->q_vector;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000961 u32 new_itr = rc->itr;
962 int bytes_per_int;
Jesse Brandeburg51cc6d92015-09-28 14:16:52 -0400963 int usecs;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000964
965 if (rc->total_packets == 0 || !rc->itr)
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400966 return false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000967
968 /* simple throttlerate management
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400969 * 0-10MB/s lowest (50000 ints/s)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000970 * 10-20MB/s low (20000 ints/s)
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400971 * 20-1249MB/s bulk (18000 ints/s)
972 * > 40000 Rx packets per second (8000 ints/s)
Jesse Brandeburg51cc6d92015-09-28 14:16:52 -0400973 *
974 * The math works out because the divisor is in 10^(-6) which
975 * turns the bytes/us input value into MB/s values, but
976 * make sure to use usecs, as the register values written
Jesse Brandeburgee2319c2015-09-28 14:16:54 -0400977 * are in 2 usec increments in the ITR registers, and make sure
978 * to use the smoothed values that the countdown timer gives us.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000979 */
Jesse Brandeburgee2319c2015-09-28 14:16:54 -0400980 usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
Jesse Brandeburg51cc6d92015-09-28 14:16:52 -0400981 bytes_per_int = rc->total_bytes / usecs;
Jesse Brandeburgee2319c2015-09-28 14:16:54 -0400982
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400983 switch (new_latency_range) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000984 case I40E_LOWEST_LATENCY:
985 if (bytes_per_int > 10)
986 new_latency_range = I40E_LOW_LATENCY;
987 break;
988 case I40E_LOW_LATENCY:
989 if (bytes_per_int > 20)
990 new_latency_range = I40E_BULK_LATENCY;
991 else if (bytes_per_int <= 10)
992 new_latency_range = I40E_LOWEST_LATENCY;
993 break;
994 case I40E_BULK_LATENCY:
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400995 case I40E_ULTRA_LATENCY:
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400996 default:
997 if (bytes_per_int <= 20)
998 new_latency_range = I40E_LOW_LATENCY;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +0000999 break;
1000 }
Jesse Brandeburgc56625d2015-09-28 14:16:53 -04001001
1002 /* this is to adjust RX more aggressively when streaming small
1003 * packets. The value of 40000 was picked as it is just beyond
1004 * what the hardware can receive per second if in low latency
1005 * mode.
1006 */
1007#define RX_ULTRA_PACKET_RATE 40000
1008
1009 if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
1010 (&qv->rx == rc))
1011 new_latency_range = I40E_ULTRA_LATENCY;
1012
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001013 rc->latency_range = new_latency_range;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001014
1015 switch (new_latency_range) {
1016 case I40E_LOWEST_LATENCY:
Jesse Brandeburgc56625d2015-09-28 14:16:53 -04001017 new_itr = I40E_ITR_50K;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001018 break;
1019 case I40E_LOW_LATENCY:
1020 new_itr = I40E_ITR_20K;
1021 break;
1022 case I40E_BULK_LATENCY:
Jesse Brandeburgc56625d2015-09-28 14:16:53 -04001023 new_itr = I40E_ITR_18K;
1024 break;
1025 case I40E_ULTRA_LATENCY:
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001026 new_itr = I40E_ITR_8K;
1027 break;
1028 default:
1029 break;
1030 }
1031
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001032 rc->total_bytes = 0;
1033 rc->total_packets = 0;
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001034
1035 if (new_itr != rc->itr) {
1036 rc->itr = new_itr;
1037 return true;
1038 }
1039
1040 return false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001041}
1042
1043/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001044 * i40e_clean_programming_status - clean the programming status descriptor
1045 * @rx_ring: the rx ring that has this descriptor
1046 * @rx_desc: the rx descriptor written back by HW
1047 *
1048 * Flow director should handle FD_FILTER_STATUS to check its filter programming
1049 * status being successful or not and take actions accordingly. FCoE should
1050 * handle its context/filter programming/invalidation status and take actions.
1051 *
1052 **/
1053static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
1054 union i40e_rx_desc *rx_desc)
1055{
1056 u64 qw;
1057 u8 id;
1058
1059 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1060 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1061 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1062
1063 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00001064 i40e_fd_handle_status(rx_ring, rx_desc, id);
Vasu Dev38e00432014-08-01 13:27:03 -07001065#ifdef I40E_FCOE
1066 else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
1067 (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
1068 i40e_fcoe_handle_status(rx_ring, rx_desc, id);
1069#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001070}
1071
1072/**
1073 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
1074 * @tx_ring: the tx ring to set up
1075 *
1076 * Return 0 on success, negative on error
1077 **/
1078int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
1079{
1080 struct device *dev = tx_ring->dev;
1081 int bi_size;
1082
1083 if (!dev)
1084 return -ENOMEM;
1085
Jesse Brandeburge908f812015-07-23 16:54:42 -04001086 /* warn if we are about to overwrite the pointer */
1087 WARN_ON(tx_ring->tx_bi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001088 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
1089 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
1090 if (!tx_ring->tx_bi)
1091 goto err;
1092
1093 /* round up to nearest 4K */
1094 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +00001095 /* add u32 for head writeback, align after this takes care of
1096 * guaranteeing this is at least one cache line in size
1097 */
1098 tx_ring->size += sizeof(u32);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001099 tx_ring->size = ALIGN(tx_ring->size, 4096);
1100 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1101 &tx_ring->dma, GFP_KERNEL);
1102 if (!tx_ring->desc) {
1103 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1104 tx_ring->size);
1105 goto err;
1106 }
1107
1108 tx_ring->next_to_use = 0;
1109 tx_ring->next_to_clean = 0;
1110 return 0;
1111
1112err:
1113 kfree(tx_ring->tx_bi);
1114 tx_ring->tx_bi = NULL;
1115 return -ENOMEM;
1116}
1117
1118/**
1119 * i40e_clean_rx_ring - Free Rx buffers
1120 * @rx_ring: ring to be cleaned
1121 **/
1122void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1123{
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001124 unsigned long bi_size;
1125 u16 i;
1126
1127 /* ring already cleared, nothing to do */
1128 if (!rx_ring->rx_bi)
1129 return;
1130
Scott Petersone72e5652017-02-09 23:40:25 -08001131 if (rx_ring->skb) {
1132 dev_kfree_skb(rx_ring->skb);
1133 rx_ring->skb = NULL;
1134 }
1135
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001136 /* Free all the Rx ring sk_buffs */
1137 for (i = 0; i < rx_ring->count; i++) {
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001138 struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
1139
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001140 if (!rx_bi->page)
1141 continue;
1142
Alexander Duyck59605bc2017-01-30 12:29:35 -08001143 /* Invalidate cache lines that may have been written to by
1144 * device so that we avoid corrupting memory.
1145 */
1146 dma_sync_single_range_for_cpu(rx_ring->dev,
1147 rx_bi->dma,
1148 rx_bi->page_offset,
1149 I40E_RXBUFFER_2048,
1150 DMA_FROM_DEVICE);
1151
1152 /* free resources associated with mapping */
1153 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
1154 PAGE_SIZE,
1155 DMA_FROM_DEVICE,
1156 I40E_RX_DMA_ATTR);
Alexander Duyck17936682017-02-21 15:55:39 -08001157 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001158
1159 rx_bi->page = NULL;
1160 rx_bi->page_offset = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001161 }
1162
1163 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1164 memset(rx_ring->rx_bi, 0, bi_size);
1165
1166 /* Zero out the descriptor ring */
1167 memset(rx_ring->desc, 0, rx_ring->size);
1168
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001169 rx_ring->next_to_alloc = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001170 rx_ring->next_to_clean = 0;
1171 rx_ring->next_to_use = 0;
1172}
1173
1174/**
1175 * i40e_free_rx_resources - Free Rx resources
1176 * @rx_ring: ring to clean the resources from
1177 *
1178 * Free all receive software resources
1179 **/
1180void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1181{
1182 i40e_clean_rx_ring(rx_ring);
1183 kfree(rx_ring->rx_bi);
1184 rx_ring->rx_bi = NULL;
1185
1186 if (rx_ring->desc) {
1187 dma_free_coherent(rx_ring->dev, rx_ring->size,
1188 rx_ring->desc, rx_ring->dma);
1189 rx_ring->desc = NULL;
1190 }
1191}
1192
1193/**
1194 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1195 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1196 *
1197 * Returns 0 on success, negative on failure
1198 **/
1199int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1200{
1201 struct device *dev = rx_ring->dev;
1202 int bi_size;
1203
Jesse Brandeburge908f812015-07-23 16:54:42 -04001204 /* warn if we are about to overwrite the pointer */
1205 WARN_ON(rx_ring->rx_bi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001206 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1207 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1208 if (!rx_ring->rx_bi)
1209 goto err;
1210
Carolyn Wybornyf217d6c2015-02-09 17:42:31 -08001211 u64_stats_init(&rx_ring->syncp);
Carolyn Wyborny638702b2015-01-24 09:58:32 +00001212
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001213 /* Round up to nearest 4K */
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001214 rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001215 rx_ring->size = ALIGN(rx_ring->size, 4096);
1216 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1217 &rx_ring->dma, GFP_KERNEL);
1218
1219 if (!rx_ring->desc) {
1220 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1221 rx_ring->size);
1222 goto err;
1223 }
1224
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001225 rx_ring->next_to_alloc = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001226 rx_ring->next_to_clean = 0;
1227 rx_ring->next_to_use = 0;
1228
1229 return 0;
1230err:
1231 kfree(rx_ring->rx_bi);
1232 rx_ring->rx_bi = NULL;
1233 return -ENOMEM;
1234}
1235
1236/**
1237 * i40e_release_rx_desc - Store the new tail and head values
1238 * @rx_ring: ring to bump
1239 * @val: new head index
1240 **/
1241static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1242{
1243 rx_ring->next_to_use = val;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001244
1245 /* update next to alloc since we have filled the ring */
1246 rx_ring->next_to_alloc = val;
1247
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001248 /* Force memory writes to complete before letting h/w
1249 * know there are new descriptors to fetch. (Only
1250 * applicable for weak-ordered memory model archs,
1251 * such as IA-64).
1252 */
1253 wmb();
1254 writel(val, rx_ring->tail);
1255}
1256
1257/**
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001258 * i40e_alloc_mapped_page - recycle or make a new page
1259 * @rx_ring: ring to use
1260 * @bi: rx_buffer struct to modify
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001261 *
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001262 * Returns true if the page was successfully allocated or
1263 * reused.
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001264 **/
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001265static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1266 struct i40e_rx_buffer *bi)
Mitch Williamsa132af22015-01-24 09:58:35 +00001267{
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001268 struct page *page = bi->page;
1269 dma_addr_t dma;
Mitch Williamsa132af22015-01-24 09:58:35 +00001270
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001271 /* since we are recycling buffers we should seldom need to alloc */
1272 if (likely(page)) {
1273 rx_ring->rx_stats.page_reuse_count++;
1274 return true;
Mitch Williamsa132af22015-01-24 09:58:35 +00001275 }
1276
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001277 /* alloc new page for storage */
1278 page = dev_alloc_page();
1279 if (unlikely(!page)) {
1280 rx_ring->rx_stats.alloc_page_failed++;
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001281 return false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001282 }
1283
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001284 /* map page for use */
Alexander Duyck59605bc2017-01-30 12:29:35 -08001285 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1286 PAGE_SIZE,
1287 DMA_FROM_DEVICE,
1288 I40E_RX_DMA_ATTR);
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001289
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001290 /* if mapping failed free memory back to system since
1291 * there isn't much point in holding memory we can't use
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001292 */
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001293 if (dma_mapping_error(rx_ring->dev, dma)) {
1294 __free_pages(page, 0);
1295 rx_ring->rx_stats.alloc_page_failed++;
1296 return false;
1297 }
1298
1299 bi->dma = dma;
1300 bi->page = page;
1301 bi->page_offset = 0;
Alexander Duyck17936682017-02-21 15:55:39 -08001302 bi->pagecnt_bias = 1;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001303
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001304 return true;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001305}
1306
1307/**
1308 * i40e_receive_skb - Send a completed packet up the stack
1309 * @rx_ring: rx ring in play
1310 * @skb: packet to send up
1311 * @vlan_tag: vlan tag for packet
1312 **/
1313static void i40e_receive_skb(struct i40e_ring *rx_ring,
1314 struct sk_buff *skb, u16 vlan_tag)
1315{
1316 struct i40e_q_vector *q_vector = rx_ring->q_vector;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001317
Jesse Brandeburga149f2c2016-04-12 08:30:49 -07001318 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1319 (vlan_tag & VLAN_VID_MASK))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001320 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1321
Alexander Duyck8b650352015-09-24 09:04:32 -07001322 napi_gro_receive(&q_vector->napi, skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001323}
1324
1325/**
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001326 * i40e_alloc_rx_buffers - Replace used receive buffers
1327 * @rx_ring: ring to place buffers on
1328 * @cleaned_count: number of buffers to replace
1329 *
1330 * Returns false if all allocations were successful, true if any fail
1331 **/
1332bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1333{
1334 u16 ntu = rx_ring->next_to_use;
1335 union i40e_rx_desc *rx_desc;
1336 struct i40e_rx_buffer *bi;
1337
1338 /* do nothing if no valid netdev defined */
1339 if (!rx_ring->netdev || !cleaned_count)
1340 return false;
1341
1342 rx_desc = I40E_RX_DESC(rx_ring, ntu);
1343 bi = &rx_ring->rx_bi[ntu];
1344
1345 do {
1346 if (!i40e_alloc_mapped_page(rx_ring, bi))
1347 goto no_buffers;
1348
Alexander Duyck59605bc2017-01-30 12:29:35 -08001349 /* sync the buffer for use by the device */
1350 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1351 bi->page_offset,
1352 I40E_RXBUFFER_2048,
1353 DMA_FROM_DEVICE);
1354
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001355 /* Refresh the desc even if buffer_addrs didn't change
1356 * because each write-back erases this info.
1357 */
1358 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001359
1360 rx_desc++;
1361 bi++;
1362 ntu++;
1363 if (unlikely(ntu == rx_ring->count)) {
1364 rx_desc = I40E_RX_DESC(rx_ring, 0);
1365 bi = rx_ring->rx_bi;
1366 ntu = 0;
1367 }
1368
1369 /* clear the status bits for the next_to_use descriptor */
1370 rx_desc->wb.qword1.status_error_len = 0;
1371
1372 cleaned_count--;
1373 } while (cleaned_count);
1374
1375 if (rx_ring->next_to_use != ntu)
1376 i40e_release_rx_desc(rx_ring, ntu);
1377
1378 return false;
1379
1380no_buffers:
1381 if (rx_ring->next_to_use != ntu)
1382 i40e_release_rx_desc(rx_ring, ntu);
1383
1384 /* make sure to come back via polling to try again after
1385 * allocation failure
1386 */
1387 return true;
1388}
1389
1390/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001391 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1392 * @vsi: the VSI we care about
1393 * @skb: skb currently being received and modified
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001394 * @rx_desc: the receive descriptor
1395 *
1396 * skb->protocol must be set before this function is called
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001397 **/
1398static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1399 struct sk_buff *skb,
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001400 union i40e_rx_desc *rx_desc)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001401{
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001402 struct i40e_rx_ptype_decoded decoded;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001403 u32 rx_error, rx_status;
Alexander Duyck858296c82016-06-14 15:45:42 -07001404 bool ipv4, ipv6;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001405 u8 ptype;
1406 u64 qword;
1407
1408 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1409 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
1410 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1411 I40E_RXD_QW1_ERROR_SHIFT;
1412 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1413 I40E_RXD_QW1_STATUS_SHIFT;
1414 decoded = decode_rx_desc_ptype(ptype);
Joseph Gasparakis8144f0f2013-12-28 05:27:57 +00001415
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001416 skb->ip_summed = CHECKSUM_NONE;
1417
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001418 skb_checksum_none_assert(skb);
1419
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001420 /* Rx csum enabled and ip headers found? */
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001421 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001422 return;
1423
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001424 /* did the hardware decode the packet and checksum? */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001425 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001426 return;
1427
1428 /* both known and outer_ip must be set for the below code to work */
1429 if (!(decoded.known && decoded.outer_ip))
1430 return;
1431
Alexander Duyckfad57332016-01-24 21:17:22 -08001432 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1433 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1434 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1435 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001436
1437 if (ipv4 &&
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001438 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1439 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001440 goto checksum_fail;
1441
Jesse Brandeburgddf1d0d2014-02-13 03:48:39 -08001442 /* likely incorrect csum if alternate IP extension headers found */
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001443 if (ipv6 &&
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001444 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001445 /* don't increment checksum err here, non-fatal err */
Shannon Nelson8ee75a82013-12-21 05:44:46 +00001446 return;
1447
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001448 /* there was some L4 error, count error and punt packet to the stack */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001449 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001450 goto checksum_fail;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001451
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001452 /* handle packets that were not able to be checksummed due
1453 * to arrival speed, in this case the stack can compute
1454 * the csum.
1455 */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -04001456 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001457 return;
1458
Alexander Duyck858296c82016-06-14 15:45:42 -07001459 /* If there is an outer header present that might contain a checksum
1460 * we need to bump the checksum level by 1 to reflect the fact that
1461 * we are indicating we validated the inner checksum.
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001462 */
Alexander Duyck858296c82016-06-14 15:45:42 -07001463 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
1464 skb->csum_level = 1;
Alexander Duyckfad57332016-01-24 21:17:22 -08001465
Alexander Duyck858296c82016-06-14 15:45:42 -07001466 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
1467 switch (decoded.inner_prot) {
1468 case I40E_RX_PTYPE_INNER_PROT_TCP:
1469 case I40E_RX_PTYPE_INNER_PROT_UDP:
1470 case I40E_RX_PTYPE_INNER_PROT_SCTP:
1471 skb->ip_summed = CHECKSUM_UNNECESSARY;
1472 /* fall though */
1473 default:
1474 break;
1475 }
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +00001476
1477 return;
1478
1479checksum_fail:
1480 vsi->back->hw_csum_rx_error++;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001481}
1482
1483/**
Anjali Singhai Jain857942f2015-12-09 15:50:21 -08001484 * i40e_ptype_to_htype - get a hash type
Jesse Brandeburg206812b2014-02-12 01:45:33 +00001485 * @ptype: the ptype value from the descriptor
1486 *
1487 * Returns a hash type to be used by skb_set_hash
1488 **/
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001489static inline int i40e_ptype_to_htype(u8 ptype)
Jesse Brandeburg206812b2014-02-12 01:45:33 +00001490{
1491 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1492
1493 if (!decoded.known)
1494 return PKT_HASH_TYPE_NONE;
1495
1496 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1497 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1498 return PKT_HASH_TYPE_L4;
1499 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1500 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1501 return PKT_HASH_TYPE_L3;
1502 else
1503 return PKT_HASH_TYPE_L2;
1504}
1505
1506/**
Anjali Singhai Jain857942f2015-12-09 15:50:21 -08001507 * i40e_rx_hash - set the hash value in the skb
1508 * @ring: descriptor ring
1509 * @rx_desc: specific descriptor
1510 **/
1511static inline void i40e_rx_hash(struct i40e_ring *ring,
1512 union i40e_rx_desc *rx_desc,
1513 struct sk_buff *skb,
1514 u8 rx_ptype)
1515{
1516 u32 hash;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001517 const __le64 rss_mask =
Anjali Singhai Jain857942f2015-12-09 15:50:21 -08001518 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1519 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1520
Mitch Williamsa876c3b2016-05-03 15:13:18 -07001521 if (!(ring->netdev->features & NETIF_F_RXHASH))
Anjali Singhai Jain857942f2015-12-09 15:50:21 -08001522 return;
1523
1524 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1525 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1526 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1527 }
1528}
1529
1530/**
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001531 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
1532 * @rx_ring: rx descriptor ring packet is being transacted on
1533 * @rx_desc: pointer to the EOP Rx descriptor
1534 * @skb: pointer to current skb being populated
1535 * @rx_ptype: the packet type decoded by hardware
Mitch Williamsa132af22015-01-24 09:58:35 +00001536 *
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001537 * This function checks the ring, descriptor, and packet information in
1538 * order to populate the hash, checksum, VLAN, protocol, and
1539 * other fields within the skb.
Mitch Williamsa132af22015-01-24 09:58:35 +00001540 **/
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001541static inline
1542void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1543 union i40e_rx_desc *rx_desc, struct sk_buff *skb,
1544 u8 rx_ptype)
1545{
1546 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1547 u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1548 I40E_RXD_QW1_STATUS_SHIFT;
Jacob Keller144ed172016-10-05 09:30:42 -07001549 u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
1550 u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001551 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
1552
Jacob Keller12490502016-10-05 09:30:44 -07001553 if (unlikely(tsynvalid))
Jacob Keller144ed172016-10-05 09:30:42 -07001554 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001555
1556 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1557
1558 /* modifies the skb - consumes the enet header */
1559 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1560
1561 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1562
1563 skb_record_rx_queue(skb, rx_ring->queue_index);
1564}
1565
1566/**
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001567 * i40e_cleanup_headers - Correct empty headers
1568 * @rx_ring: rx descriptor ring packet is being transacted on
1569 * @skb: pointer to current skb being fixed
1570 *
1571 * Also address the case where we are pulling data in on pages only
1572 * and as such no data is present in the skb header.
1573 *
1574 * In addition if skb is not at least 60 bytes we need to pad it so that
1575 * it is large enough to qualify as a valid Ethernet frame.
1576 *
1577 * Returns true if an error was encountered and skb was freed.
1578 **/
1579static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
1580{
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001581 /* if eth_skb_pad returns an error the skb was freed */
1582 if (eth_skb_pad(skb))
1583 return true;
1584
1585 return false;
1586}
1587
1588/**
1589 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1590 * @rx_ring: rx descriptor ring to store buffers on
1591 * @old_buff: donor buffer to have page reused
1592 *
1593 * Synchronizes page for reuse by the adapter
1594 **/
1595static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1596 struct i40e_rx_buffer *old_buff)
1597{
1598 struct i40e_rx_buffer *new_buff;
1599 u16 nta = rx_ring->next_to_alloc;
1600
1601 new_buff = &rx_ring->rx_bi[nta];
1602
1603 /* update, and store next to alloc */
1604 nta++;
1605 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1606
1607 /* transfer page from old buffer to new buffer */
Alexander Duyck17936682017-02-21 15:55:39 -08001608 new_buff->dma = old_buff->dma;
1609 new_buff->page = old_buff->page;
1610 new_buff->page_offset = old_buff->page_offset;
1611 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001612}
1613
1614/**
Scott Peterson9b37c932017-02-09 23:43:30 -08001615 * i40e_page_is_reusable - check if any reuse is possible
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001616 * @page: page struct to check
Scott Peterson9b37c932017-02-09 23:43:30 -08001617 *
1618 * A page is not reusable if it was allocated under low memory
1619 * conditions, or it's not in the same NUMA node as this CPU.
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001620 */
Scott Peterson9b37c932017-02-09 23:43:30 -08001621static inline bool i40e_page_is_reusable(struct page *page)
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001622{
Scott Peterson9b37c932017-02-09 23:43:30 -08001623 return (page_to_nid(page) == numa_mem_id()) &&
1624 !page_is_pfmemalloc(page);
1625}
1626
1627/**
1628 * i40e_can_reuse_rx_page - Determine if this page can be reused by
1629 * the adapter for another receive
1630 *
1631 * @rx_buffer: buffer containing the page
1632 * @page: page address from rx_buffer
1633 * @truesize: actual size of the buffer in this page
1634 *
1635 * If page is reusable, rx_buffer->page_offset is adjusted to point to
1636 * an unused region in the page.
1637 *
1638 * For small pages, @truesize will be a constant value, half the size
1639 * of the memory at page. We'll attempt to alternate between high and
1640 * low halves of the page, with one half ready for use by the hardware
1641 * and the other half being consumed by the stack. We use the page
1642 * ref count to determine whether the stack has finished consuming the
1643 * portion of this page that was passed up with a previous packet. If
1644 * the page ref count is >1, we'll assume the "other" half page is
1645 * still busy, and this page cannot be reused.
1646 *
1647 * For larger pages, @truesize will be the actual space used by the
1648 * received packet (adjusted upward to an even multiple of the cache
1649 * line size). This will advance through the page by the amount
1650 * actually consumed by the received packets while there is still
1651 * space for a buffer. Each region of larger pages will be used at
1652 * most once, after which the page will not be reused.
1653 *
1654 * In either case, if the page is reusable its refcount is increased.
1655 **/
1656static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
1657 struct page *page,
1658 const unsigned int truesize)
1659{
1660#if (PAGE_SIZE >= 8192)
1661 unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
1662#endif
Alexander Duyck17936682017-02-21 15:55:39 -08001663 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
Scott Peterson9b37c932017-02-09 23:43:30 -08001664
1665 /* Is any reuse possible? */
1666 if (unlikely(!i40e_page_is_reusable(page)))
1667 return false;
1668
1669#if (PAGE_SIZE < 8192)
1670 /* if we are only owner of page we can reuse it */
Alexander Duyck17936682017-02-21 15:55:39 -08001671 if (unlikely(page_count(page) != pagecnt_bias))
Scott Peterson9b37c932017-02-09 23:43:30 -08001672 return false;
1673
1674 /* flip page offset to other buffer */
1675 rx_buffer->page_offset ^= truesize;
1676#else
1677 /* move offset up to the next cache line */
1678 rx_buffer->page_offset += truesize;
1679
1680 if (rx_buffer->page_offset > last_offset)
1681 return false;
1682#endif
1683
Alexander Duyck17936682017-02-21 15:55:39 -08001684 /* If we have drained the page fragment pool we need to update
1685 * the pagecnt_bias and page count so that we fully restock the
1686 * number of references the driver holds.
1687 */
1688 if (unlikely(pagecnt_bias == 1)) {
1689 page_ref_add(page, USHRT_MAX);
1690 rx_buffer->pagecnt_bias = USHRT_MAX;
1691 }
Scott Peterson9b37c932017-02-09 23:43:30 -08001692 return true;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001693}
1694
1695/**
1696 * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
1697 * @rx_ring: rx descriptor ring to transact packets on
1698 * @rx_buffer: buffer containing page to add
Scott Peterson7987dcd2017-02-09 23:37:28 -08001699 * @size: packet length from rx_desc
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001700 * @skb: sk_buff to place the data into
1701 *
1702 * This function will add the data contained in rx_buffer->page to the skb.
1703 * This is done either through a direct copy if the data in the buffer is
1704 * less than the skb header size, otherwise it will just attach the page as
1705 * a frag to the skb.
1706 *
1707 * The function will then update the page offset if necessary and return
1708 * true if the buffer can be reused by the adapter.
1709 **/
1710static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
1711 struct i40e_rx_buffer *rx_buffer,
Scott Peterson7987dcd2017-02-09 23:37:28 -08001712 unsigned int size,
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001713 struct sk_buff *skb)
1714{
1715 struct page *page = rx_buffer->page;
Scott Peterson9b37c932017-02-09 23:43:30 -08001716 unsigned char *va = page_address(page) + rx_buffer->page_offset;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001717#if (PAGE_SIZE < 8192)
1718 unsigned int truesize = I40E_RXBUFFER_2048;
1719#else
1720 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001721#endif
Scott Peterson9b37c932017-02-09 23:43:30 -08001722 unsigned int pull_len;
1723
1724 if (unlikely(skb_is_nonlinear(skb)))
1725 goto add_tail_frag;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001726
1727 /* will the data fit in the skb we allocated? if so, just
1728 * copy it as it is pretty small anyway
1729 */
Scott Peterson9b37c932017-02-09 23:43:30 -08001730 if (size <= I40E_RX_HDR_SIZE) {
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001731 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
1732
Scott Peterson9b37c932017-02-09 23:43:30 -08001733 /* page is reusable, we can reuse buffer as-is */
1734 if (likely(i40e_page_is_reusable(page)))
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001735 return true;
1736
1737 /* this page cannot be reused so discard it */
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001738 return false;
1739 }
1740
Scott Peterson9b37c932017-02-09 23:43:30 -08001741 /* we need the header to contain the greater of either
1742 * ETH_HLEN or 60 bytes if the skb->len is less than
1743 * 60 for skb_pad.
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001744 */
Scott Peterson9b37c932017-02-09 23:43:30 -08001745 pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001746
Scott Peterson9b37c932017-02-09 23:43:30 -08001747 /* align pull length to size of long to optimize
1748 * memcpy performance
1749 */
1750 memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
1751
1752 /* update all of the pointers */
1753 va += pull_len;
1754 size -= pull_len;
1755
1756add_tail_frag:
1757 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1758 (unsigned long)va & ~PAGE_MASK, size, truesize);
1759
1760 return i40e_can_reuse_rx_page(rx_buffer, page, truesize);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001761}
1762
1763/**
1764 * i40e_fetch_rx_buffer - Allocate skb and populate it
1765 * @rx_ring: rx descriptor ring to transact packets on
1766 * @rx_desc: descriptor containing info written by hardware
1767 *
1768 * This function allocates an skb on the fly, and populates it with the page
1769 * data from the current receive descriptor, taking care to set up the skb
1770 * correctly, as well as handling calling the page recycle function if
1771 * necessary.
1772 */
1773static inline
1774struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
Scott Petersone72e5652017-02-09 23:40:25 -08001775 union i40e_rx_desc *rx_desc,
1776 struct sk_buff *skb)
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001777{
Scott Peterson7987dcd2017-02-09 23:37:28 -08001778 u64 local_status_error_len =
1779 le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1780 unsigned int size =
1781 (local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1782 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001783 struct i40e_rx_buffer *rx_buffer;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001784 struct page *page;
1785
1786 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
1787 page = rx_buffer->page;
1788 prefetchw(page);
1789
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001790 if (likely(!skb)) {
1791 void *page_addr = page_address(page) + rx_buffer->page_offset;
1792
1793 /* prefetch first cache line of first page */
1794 prefetch(page_addr);
1795#if L1_CACHE_BYTES < 128
1796 prefetch(page_addr + L1_CACHE_BYTES);
1797#endif
1798
1799 /* allocate a skb to store the frags */
1800 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
1801 I40E_RX_HDR_SIZE,
1802 GFP_ATOMIC | __GFP_NOWARN);
1803 if (unlikely(!skb)) {
1804 rx_ring->rx_stats.alloc_buff_failed++;
1805 return NULL;
1806 }
1807
1808 /* we will be copying header into skb->data in
1809 * pskb_may_pull so it is in our interest to prefetch
1810 * it now to avoid a possible cache miss
1811 */
1812 prefetchw(skb->data);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001813 }
1814
1815 /* we are reusing so sync this buffer for CPU use */
1816 dma_sync_single_range_for_cpu(rx_ring->dev,
1817 rx_buffer->dma,
1818 rx_buffer->page_offset,
Scott Peterson7987dcd2017-02-09 23:37:28 -08001819 size,
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001820 DMA_FROM_DEVICE);
1821
1822 /* pull page into skb */
Scott Peterson7987dcd2017-02-09 23:37:28 -08001823 if (i40e_add_rx_frag(rx_ring, rx_buffer, size, skb)) {
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001824 /* hand second half of page back to the ring */
1825 i40e_reuse_rx_page(rx_ring, rx_buffer);
1826 rx_ring->rx_stats.page_reuse_count++;
1827 } else {
1828 /* we are not reusing the buffer so unmap it */
Alexander Duyck59605bc2017-01-30 12:29:35 -08001829 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
1830 DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
Alexander Duyck17936682017-02-21 15:55:39 -08001831 __page_frag_cache_drain(rx_buffer->page,
1832 rx_buffer->pagecnt_bias);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001833 }
1834
1835 /* clear contents of buffer_info */
1836 rx_buffer->page = NULL;
1837
1838 return skb;
1839}
1840
1841/**
1842 * i40e_is_non_eop - process handling of non-EOP buffers
1843 * @rx_ring: Rx ring being processed
1844 * @rx_desc: Rx descriptor for current buffer
1845 * @skb: Current socket buffer containing buffer in progress
1846 *
1847 * This function updates next to clean. If the buffer is an EOP buffer
1848 * this function exits returning false, otherwise it will place the
1849 * sk_buff in the next buffer to be chained and return true indicating
1850 * that this is in fact a non-EOP buffer.
1851 **/
1852static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
1853 union i40e_rx_desc *rx_desc,
1854 struct sk_buff *skb)
1855{
1856 u32 ntc = rx_ring->next_to_clean + 1;
1857
1858 /* fetch, update, and store next to clean */
1859 ntc = (ntc < rx_ring->count) ? ntc : 0;
1860 rx_ring->next_to_clean = ntc;
1861
1862 prefetch(I40E_RX_DESC(rx_ring, ntc));
1863
1864#define staterrlen rx_desc->wb.qword1.status_error_len
1865 if (unlikely(i40e_rx_is_programming_status(le64_to_cpu(staterrlen)))) {
1866 i40e_clean_programming_status(rx_ring, rx_desc);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001867 return true;
1868 }
1869 /* if we are the last buffer then there is nothing else to do */
1870#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
1871 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
1872 return false;
1873
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001874 rx_ring->rx_stats.non_eop_descs++;
1875
1876 return true;
1877}
1878
1879/**
1880 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1881 * @rx_ring: rx descriptor ring to transact packets on
1882 * @budget: Total limit on number of packets to process
1883 *
1884 * This function provides a "bounce buffer" approach to Rx interrupt
1885 * processing. The advantage to this is that on systems that have
1886 * expensive overhead for IOMMU access this provides a means of avoiding
1887 * it by maintaining the mapping of the page to the system.
1888 *
1889 * Returns amount of work completed
1890 **/
1891static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
Mitch Williamsa132af22015-01-24 09:58:35 +00001892{
1893 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
Scott Petersone72e5652017-02-09 23:40:25 -08001894 struct sk_buff *skb = rx_ring->skb;
Mitch Williamsa132af22015-01-24 09:58:35 +00001895 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001896 bool failure = false;
Mitch Williamsa132af22015-01-24 09:58:35 +00001897
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001898 while (likely(total_rx_packets < budget)) {
1899 union i40e_rx_desc *rx_desc;
Mitch Williamsa132af22015-01-24 09:58:35 +00001900 u16 vlan_tag;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001901 u8 rx_ptype;
1902 u64 qword;
1903
Mitch Williamsa132af22015-01-24 09:58:35 +00001904 /* return some buffers to hardware, one at a time is too slow */
1905 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001906 failure = failure ||
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001907 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
Mitch Williamsa132af22015-01-24 09:58:35 +00001908 cleaned_count = 0;
1909 }
1910
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001911 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
1912
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001913 /* status_error_len will always be zero for unused descriptors
1914 * because it's cleared in cleanup, and overlaps with hdr_addr
1915 * which is always zero because packet split isn't used, if the
1916 * hardware wrote DD then it will be non-zero
1917 */
Alexander Duyck99dad8b2016-09-27 11:28:50 -07001918 if (!i40e_test_staterr(rx_desc,
1919 BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001920 break;
1921
Mitch Williamsa132af22015-01-24 09:58:35 +00001922 /* This memory barrier is needed to keep us from reading
1923 * any other fields out of the rx_desc until we know the
1924 * DD bit is set.
1925 */
Alexander Duyck67317162015-04-08 18:49:43 -07001926 dma_rmb();
Mitch Williamsa132af22015-01-24 09:58:35 +00001927
Scott Petersone72e5652017-02-09 23:40:25 -08001928 skb = i40e_fetch_rx_buffer(rx_ring, rx_desc, skb);
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001929 if (!skb)
1930 break;
Mitch Williamsa132af22015-01-24 09:58:35 +00001931
Mitch Williamsa132af22015-01-24 09:58:35 +00001932 cleaned_count++;
1933
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001934 if (i40e_is_non_eop(rx_ring, rx_desc, skb))
Mitch Williamsa132af22015-01-24 09:58:35 +00001935 continue;
Mitch Williamsa132af22015-01-24 09:58:35 +00001936
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001937 /* ERR_MASK will only have valid bits if EOP set, and
1938 * what we are doing here is actually checking
1939 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1940 * the error field
1941 */
1942 if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
Mitch Williamsa132af22015-01-24 09:58:35 +00001943 dev_kfree_skb_any(skb);
Alexander Duyck741b8b82017-02-21 15:55:41 -08001944 skb = NULL;
Mitch Williamsa132af22015-01-24 09:58:35 +00001945 continue;
1946 }
1947
Scott Petersone72e5652017-02-09 23:40:25 -08001948 if (i40e_cleanup_headers(rx_ring, skb)) {
1949 skb = NULL;
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001950 continue;
Scott Petersone72e5652017-02-09 23:40:25 -08001951 }
Mitch Williamsa132af22015-01-24 09:58:35 +00001952
1953 /* probably a little skewed due to removing CRC */
1954 total_rx_bytes += skb->len;
Mitch Williamsa132af22015-01-24 09:58:35 +00001955
Alexander Duyck99dad8b2016-09-27 11:28:50 -07001956 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1957 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1958 I40E_RXD_QW1_PTYPE_SHIFT;
1959
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001960 /* populate checksum, VLAN, and protocol */
1961 i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
Mitch Williamsa132af22015-01-24 09:58:35 +00001962
Mitch Williamsa132af22015-01-24 09:58:35 +00001963#ifdef I40E_FCOE
Jesse Brandeburg1f15d662016-04-01 03:56:06 -07001964 if (unlikely(
1965 i40e_rx_is_fcoe(rx_ptype) &&
1966 !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) {
Mitch Williamsa132af22015-01-24 09:58:35 +00001967 dev_kfree_skb_any(skb);
1968 continue;
1969 }
1970#endif
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001971
1972 vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
1973 le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
1974
Mitch Williamsa132af22015-01-24 09:58:35 +00001975 i40e_receive_skb(rx_ring, skb, vlan_tag);
Scott Petersone72e5652017-02-09 23:40:25 -08001976 skb = NULL;
Mitch Williamsa132af22015-01-24 09:58:35 +00001977
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001978 /* update budget accounting */
1979 total_rx_packets++;
1980 }
Mitch Williamsa132af22015-01-24 09:58:35 +00001981
Scott Petersone72e5652017-02-09 23:40:25 -08001982 rx_ring->skb = skb;
1983
Mitch Williamsa132af22015-01-24 09:58:35 +00001984 u64_stats_update_begin(&rx_ring->syncp);
1985 rx_ring->stats.packets += total_rx_packets;
1986 rx_ring->stats.bytes += total_rx_bytes;
1987 u64_stats_update_end(&rx_ring->syncp);
1988 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1989 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1990
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07001991 /* guarantee a trip back through this routine if there was a failure */
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001992 return failure ? budget : total_rx_packets;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00001993}
1994
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001995static u32 i40e_buildreg_itr(const int type, const u16 itr)
1996{
1997 u32 val;
1998
1999 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
Jesse Brandeburg40d72a52016-01-13 16:51:45 -08002000 /* Don't clear PBA because that can cause lost interrupts that
2001 * came in while we were cleaning/polling
2002 */
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04002003 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
2004 (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
2005
2006 return val;
2007}
2008
2009/* a small macro to shorten up some long lines */
2010#define INTREG I40E_PFINT_DYN_CTLN
Carolyn Wyborny3c234c42016-12-12 15:44:12 -08002011static inline int get_rx_itr(struct i40e_vsi *vsi, int idx)
Jacob Keller65e87c02016-09-12 14:18:44 -07002012{
Carolyn Wyborny3c234c42016-12-12 15:44:12 -08002013 return vsi->rx_rings[idx]->rx_itr_setting;
Jacob Keller65e87c02016-09-12 14:18:44 -07002014}
2015
Carolyn Wyborny3c234c42016-12-12 15:44:12 -08002016static inline int get_tx_itr(struct i40e_vsi *vsi, int idx)
Jacob Keller65e87c02016-09-12 14:18:44 -07002017{
Carolyn Wyborny3c234c42016-12-12 15:44:12 -08002018 return vsi->tx_rings[idx]->tx_itr_setting;
Jacob Keller65e87c02016-09-12 14:18:44 -07002019}
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04002020
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002021/**
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04002022 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
2023 * @vsi: the VSI we care about
2024 * @q_vector: q_vector for which itr is being updated and interrupt enabled
2025 *
2026 **/
2027static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
2028 struct i40e_q_vector *q_vector)
2029{
2030 struct i40e_hw *hw = &vsi->back->hw;
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04002031 bool rx = false, tx = false;
2032 u32 rxval, txval;
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04002033 int vector;
Kan Lianga75e8002016-02-19 09:24:04 -05002034 int idx = q_vector->v_idx;
Jacob Keller65e87c02016-09-12 14:18:44 -07002035 int rx_itr_setting, tx_itr_setting;
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04002036
2037 vector = (q_vector->v_idx + vsi->base_vector);
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04002038
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04002039 /* avoid dynamic calculation if in countdown mode OR if
2040 * all dynamic is disabled
2041 */
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04002042 rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
2043
Carolyn Wyborny3c234c42016-12-12 15:44:12 -08002044 rx_itr_setting = get_rx_itr(vsi, idx);
2045 tx_itr_setting = get_tx_itr(vsi, idx);
Jacob Keller65e87c02016-09-12 14:18:44 -07002046
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04002047 if (q_vector->itr_countdown > 0 ||
Jacob Keller65e87c02016-09-12 14:18:44 -07002048 (!ITR_IS_DYNAMIC(rx_itr_setting) &&
2049 !ITR_IS_DYNAMIC(tx_itr_setting))) {
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04002050 goto enable_int;
2051 }
2052
Jacob Keller65e87c02016-09-12 14:18:44 -07002053 if (ITR_IS_DYNAMIC(tx_itr_setting)) {
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04002054 rx = i40e_set_new_dynamic_itr(&q_vector->rx);
2055 rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04002056 }
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04002057
Jacob Keller65e87c02016-09-12 14:18:44 -07002058 if (ITR_IS_DYNAMIC(tx_itr_setting)) {
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04002059 tx = i40e_set_new_dynamic_itr(&q_vector->tx);
2060 txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04002061 }
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04002062
2063 if (rx || tx) {
2064 /* get the higher of the two ITR adjustments and
2065 * use the same value for both ITR registers
2066 * when in adaptive mode (Rx and/or Tx)
2067 */
2068 u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
2069
2070 q_vector->tx.itr = q_vector->rx.itr = itr;
2071 txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
2072 tx = true;
2073 rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
2074 rx = true;
2075 }
2076
2077 /* only need to enable the interrupt once, but need
2078 * to possibly update both ITR values
2079 */
2080 if (rx) {
2081 /* set the INTENA_MSK_MASK so that this first write
2082 * won't actually enable the interrupt, instead just
2083 * updating the ITR (it's bit 31 PF and VF)
2084 */
2085 rxval |= BIT(31);
2086 /* don't check _DOWN because interrupt isn't being enabled */
2087 wr32(hw, INTREG(vector - 1), rxval);
2088 }
2089
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04002090enable_int:
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04002091 if (!test_bit(__I40E_DOWN, &vsi->state))
2092 wr32(hw, INTREG(vector - 1), txval);
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04002093
2094 if (q_vector->itr_countdown)
2095 q_vector->itr_countdown--;
2096 else
2097 q_vector->itr_countdown = ITR_COUNTDOWN_START;
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04002098}
2099
2100/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002101 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
2102 * @napi: napi struct with our devices info in it
2103 * @budget: amount of work driver is allowed to do this pass, in packets
2104 *
2105 * This function will clean all queues associated with a q_vector.
2106 *
2107 * Returns the amount of work done
2108 **/
2109int i40e_napi_poll(struct napi_struct *napi, int budget)
2110{
2111 struct i40e_q_vector *q_vector =
2112 container_of(napi, struct i40e_q_vector, napi);
2113 struct i40e_vsi *vsi = q_vector->vsi;
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00002114 struct i40e_ring *ring;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002115 bool clean_complete = true;
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00002116 bool arm_wb = false;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002117 int budget_per_ring;
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07002118 int work_done = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002119
2120 if (test_bit(__I40E_DOWN, &vsi->state)) {
2121 napi_complete(napi);
2122 return 0;
2123 }
2124
Kiran Patil9c6c1252015-11-06 15:26:02 -08002125 /* Clear hung_detected bit */
2126 clear_bit(I40E_Q_VECTOR_HUNG_DETECT, &q_vector->hung_detected);
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00002127 /* Since the actual Tx work is minimal, we can give the Tx a larger
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002128 * budget and be more aggressive about cleaning up the Tx descriptors.
2129 */
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00002130 i40e_for_each_ring(ring, q_vector->tx) {
Alexander Duycka619afe2016-03-07 09:30:03 -08002131 if (!i40e_clean_tx_irq(vsi, ring, budget)) {
Alexander Duyckf2edaaa2016-03-07 09:29:57 -08002132 clean_complete = false;
2133 continue;
2134 }
2135 arm_wb |= ring->arm_wb;
Jesse Brandeburg0deda862015-07-23 16:54:34 -04002136 ring->arm_wb = false;
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00002137 }
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00002138
Alexander Duyckc67cace2015-09-24 09:04:26 -07002139 /* Handle case where we are called by netpoll with a budget of 0 */
2140 if (budget <= 0)
2141 goto tx_only;
2142
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00002143 /* We attempt to distribute budget to each Rx queue fairly, but don't
2144 * allow the budget to go below 1 because that would exit polling early.
2145 */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002146 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
Alexander Duyckcd0b6fa2013-09-28 06:00:53 +00002147
Mitch Williamsa132af22015-01-24 09:58:35 +00002148 i40e_for_each_ring(ring, q_vector->rx) {
Jesse Brandeburg1a557afc2016-04-20 19:43:37 -07002149 int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07002150
2151 work_done += cleaned;
Alexander Duyckf2edaaa2016-03-07 09:29:57 -08002152 /* if we clean as many as budgeted, we must not be done */
2153 if (cleaned >= budget_per_ring)
2154 clean_complete = false;
Mitch Williamsa132af22015-01-24 09:58:35 +00002155 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002156
2157 /* If work not completed, return budget and polling will return */
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00002158 if (!clean_complete) {
Alan Brady96db7762016-09-14 16:24:38 -07002159 const cpumask_t *aff_mask = &q_vector->affinity_mask;
2160 int cpu_id = smp_processor_id();
2161
2162 /* It is possible that the interrupt affinity has changed but,
2163 * if the cpu is pegged at 100%, polling will never exit while
2164 * traffic continues and the interrupt will be stuck on this
2165 * cpu. We check to make sure affinity is correct before we
2166 * continue to poll, otherwise we must stop polling so the
2167 * interrupt can move to the correct cpu.
2168 */
2169 if (likely(cpumask_test_cpu(cpu_id, aff_mask) ||
2170 !(vsi->back->flags & I40E_FLAG_MSIX_ENABLED))) {
Alexander Duyckc67cace2015-09-24 09:04:26 -07002171tx_only:
Alan Brady96db7762016-09-14 16:24:38 -07002172 if (arm_wb) {
2173 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2174 i40e_enable_wb_on_itr(vsi, q_vector);
2175 }
2176 return budget;
Anjali Singhai Jain164c9f52015-10-21 19:47:08 -04002177 }
Jesse Brandeburgd91649f2015-01-07 02:55:01 +00002178 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002179
Anjali Singhai Jain8e0764b2015-06-05 12:20:30 -04002180 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
2181 q_vector->arm_wb_state = false;
2182
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002183 /* Work is done so exit the polling mode and re-enable the interrupt */
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07002184 napi_complete_done(napi, work_done);
Alan Brady96db7762016-09-14 16:24:38 -07002185
2186 /* If we're prematurely stopping polling to fix the interrupt
2187 * affinity we want to make sure polling starts back up so we
2188 * issue a call to i40e_force_wb which triggers a SW interrupt.
2189 */
2190 if (!clean_complete)
2191 i40e_force_wb(vsi, q_vector);
2192 else if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED))
Jesse Brandeburg40d72a52016-01-13 16:51:45 -08002193 i40e_irq_dynamic_enable_icr0(vsi->back, false);
Alan Brady96db7762016-09-14 16:24:38 -07002194 else
2195 i40e_update_enable_itr(vsi, q_vector);
2196
Alexander Duyck6beb84a2016-11-08 13:05:16 -08002197 return min(work_done, budget - 1);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002198}
2199
2200/**
2201 * i40e_atr - Add a Flow Director ATR filter
2202 * @tx_ring: ring to add programming descriptor to
2203 * @skb: send buffer
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002204 * @tx_flags: send tx flags
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002205 **/
2206static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002207 u32 tx_flags)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002208{
2209 struct i40e_filter_program_desc *fdir_desc;
2210 struct i40e_pf *pf = tx_ring->vsi->back;
2211 union {
2212 unsigned char *network;
2213 struct iphdr *ipv4;
2214 struct ipv6hdr *ipv6;
2215 } hdr;
2216 struct tcphdr *th;
2217 unsigned int hlen;
2218 u32 flex_ptype, dtype_cmd;
Alexander Duyckffcc55c2016-01-25 19:32:54 -08002219 int l4_proto;
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002220 u16 i;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002221
2222 /* make sure ATR is enabled */
Jesse Brandeburg60ea5f82014-01-17 15:36:34 -08002223 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002224 return;
2225
Harshitha Ramamurthyb77ac972017-02-03 10:57:42 -08002226 if ((pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED))
Anjali Singhai Jain04294e32015-02-27 09:15:28 +00002227 return;
2228
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002229 /* if sampling is disabled do nothing */
2230 if (!tx_ring->atr_sample_rate)
2231 return;
2232
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002233 /* Currently only IPv4/IPv6 with TCP is supported */
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002234 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002235 return;
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002236
Alexander Duyckffcc55c2016-01-25 19:32:54 -08002237 /* snag network header to get L4 type and address */
2238 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2239 skb_inner_network_header(skb) : skb_network_header(skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002240
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002241 /* Note: tx_flags gets modified to reflect inner protocols in
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002242 * tx_enable_csum function if encap is enabled.
2243 */
Alexander Duyckffcc55c2016-01-25 19:32:54 -08002244 if (tx_flags & I40E_TX_FLAGS_IPV4) {
2245 /* access ihl as u8 to avoid unaligned access on ia64 */
2246 hlen = (hdr.network[0] & 0x0F) << 2;
2247 l4_proto = hdr.ipv4->protocol;
2248 } else {
2249 hlen = hdr.network - skb->data;
2250 l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
2251 hlen -= hdr.network - skb->data;
2252 }
2253
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002254 if (l4_proto != IPPROTO_TCP)
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002255 return;
2256
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002257 th = (struct tcphdr *)(hdr.network + hlen);
2258
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00002259 /* Due to lack of space, no more new filters can be programmed */
Harshitha Ramamurthyb77ac972017-02-03 10:57:42 -08002260 if (th->syn && (pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED))
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00002261 return;
Anjali Singhai Jain72b74862016-01-08 17:50:21 -08002262 if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
Harshitha Ramamurthyb77ac972017-02-03 10:57:42 -08002263 (!(pf->hw_disabled_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) {
Anjali Singhai Jain52eb95e2015-06-05 12:20:33 -04002264 /* HW ATR eviction will take care of removing filters on FIN
2265 * and RST packets.
2266 */
2267 if (th->fin || th->rst)
2268 return;
2269 }
Anjali Singhai Jain55a5e602014-02-12 06:33:25 +00002270
2271 tx_ring->atr_count++;
2272
Anjali Singhai Jaince806782014-03-06 08:59:54 +00002273 /* sample on all syn/fin/rst packets or once every atr sample rate */
2274 if (!th->fin &&
2275 !th->syn &&
2276 !th->rst &&
2277 (tx_ring->atr_count < tx_ring->atr_sample_rate))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002278 return;
2279
2280 tx_ring->atr_count = 0;
2281
2282 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002283 i = tx_ring->next_to_use;
2284 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2285
2286 i++;
2287 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002288
2289 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2290 I40E_TXD_FLTR_QW0_QINDEX_MASK;
Alexander Duyck6b037cd2016-01-24 21:17:36 -08002291 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002292 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2293 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2294 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2295 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2296
2297 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2298
2299 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2300
Anjali Singhai Jaince806782014-03-06 08:59:54 +00002301 dtype_cmd |= (th->fin || th->rst) ?
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002302 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2303 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2304 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2305 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2306
2307 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2308 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2309
2310 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2311 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2312
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +00002313 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
Singhai, Anjali6a899022015-12-14 12:21:18 -08002314 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
Anjali Singhai Jain60ccd452015-04-16 20:06:01 -04002315 dtype_cmd |=
2316 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2317 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2318 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2319 else
2320 dtype_cmd |=
2321 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2322 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2323 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
Anjali Singhai Jain433c47d2014-05-22 06:32:17 +00002324
Anjali Singhai Jain72b74862016-01-08 17:50:21 -08002325 if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
Harshitha Ramamurthyb77ac972017-02-03 10:57:42 -08002326 (!(pf->hw_disabled_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)))
Anjali Singhai Jain52eb95e2015-06-05 12:20:33 -04002327 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2328
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002329 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
Jesse Brandeburg99753ea2014-06-04 04:22:49 +00002330 fdir_desc->rsvd = cpu_to_le32(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002331 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
Jesse Brandeburg99753ea2014-06-04 04:22:49 +00002332 fdir_desc->fd_id = cpu_to_le32(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002333}
2334
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002335/**
2336 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2337 * @skb: send buffer
2338 * @tx_ring: ring to send buffer on
2339 * @flags: the tx flags to be set
2340 *
2341 * Checks the skb and set up correspondingly several generic transmit flags
2342 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2343 *
2344 * Returns error code indicate the frame should be dropped upon error and the
2345 * otherwise returns 0 to indicate the flags has been set properly.
2346 **/
Vasu Dev38e00432014-08-01 13:27:03 -07002347#ifdef I40E_FCOE
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002348inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002349 struct i40e_ring *tx_ring,
2350 u32 *flags)
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002351#else
2352static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2353 struct i40e_ring *tx_ring,
2354 u32 *flags)
Vasu Dev38e00432014-08-01 13:27:03 -07002355#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002356{
2357 __be16 protocol = skb->protocol;
2358 u32 tx_flags = 0;
2359
Greg Rose31eaacc2015-03-31 00:45:03 -07002360 if (protocol == htons(ETH_P_8021Q) &&
2361 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2362 /* When HW VLAN acceleration is turned off by the user the
2363 * stack sets the protocol to 8021q so that the driver
2364 * can take any steps required to support the SW only
2365 * VLAN handling. In our case the driver doesn't need
2366 * to take any further steps so just set the protocol
2367 * to the encapsulated ethertype.
2368 */
2369 skb->protocol = vlan_get_protocol(skb);
2370 goto out;
2371 }
2372
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002373 /* if we have a HW VLAN tag being added, default to the HW one */
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01002374 if (skb_vlan_tag_present(skb)) {
2375 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002376 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2377 /* else if it is a SW VLAN, check the next protocol and store the tag */
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00002378 } else if (protocol == htons(ETH_P_8021Q)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002379 struct vlan_hdr *vhdr, _vhdr;
Jesse Brandeburg6995b362015-08-28 17:55:54 -04002380
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002381 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2382 if (!vhdr)
2383 return -EINVAL;
2384
2385 protocol = vhdr->h_vlan_encapsulated_proto;
2386 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2387 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2388 }
2389
Neerav Parikhd40d00b2015-02-24 06:58:40 +00002390 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2391 goto out;
2392
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002393 /* Insert 802.1p priority into VLAN header */
Vasu Dev38e00432014-08-01 13:27:03 -07002394 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2395 (skb->priority != TC_PRIO_CONTROL)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002396 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2397 tx_flags |= (skb->priority & 0x7) <<
2398 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2399 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2400 struct vlan_ethhdr *vhdr;
Francois Romieudd225bc2014-03-30 03:14:48 +00002401 int rc;
2402
2403 rc = skb_cow_head(skb, 0);
2404 if (rc < 0)
2405 return rc;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002406 vhdr = (struct vlan_ethhdr *)skb->data;
2407 vhdr->h_vlan_TCI = htons(tx_flags >>
2408 I40E_TX_FLAGS_VLAN_SHIFT);
2409 } else {
2410 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2411 }
2412 }
Neerav Parikhd40d00b2015-02-24 06:58:40 +00002413
2414out:
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002415 *flags = tx_flags;
2416 return 0;
2417}
2418
2419/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002420 * i40e_tso - set up the tso context descriptor
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002421 * @first: pointer to first Tx buffer for xmit
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002422 * @hdr_len: ptr to the size of the packet header
Shannon Nelson9c883bd2015-10-21 19:47:02 -04002423 * @cd_type_cmd_tso_mss: Quad Word 1
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002424 *
2425 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2426 **/
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002427static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
2428 u64 *cd_type_cmd_tso_mss)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002429{
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002430 struct sk_buff *skb = first->skb;
Alexander Duyck03f9d6a2016-01-24 21:16:20 -08002431 u64 cd_cmd, cd_tso_len, cd_mss;
Alexander Duyckc7770192016-01-24 21:16:35 -08002432 union {
2433 struct iphdr *v4;
2434 struct ipv6hdr *v6;
2435 unsigned char *hdr;
2436 } ip;
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002437 union {
2438 struct tcphdr *tcp;
Alexander Duyck54532052016-01-24 21:17:29 -08002439 struct udphdr *udp;
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002440 unsigned char *hdr;
2441 } l4;
2442 u32 paylen, l4_offset;
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002443 u16 gso_segs, gso_size;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002444 int err;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002445
Shannon Nelsone9f65632016-01-04 10:33:04 -08002446 if (skb->ip_summed != CHECKSUM_PARTIAL)
2447 return 0;
2448
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002449 if (!skb_is_gso(skb))
2450 return 0;
2451
Francois Romieudd225bc2014-03-30 03:14:48 +00002452 err = skb_cow_head(skb, 0);
2453 if (err < 0)
2454 return err;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002455
Alexander Duyckc7770192016-01-24 21:16:35 -08002456 ip.hdr = skb_network_header(skb);
2457 l4.hdr = skb_transport_header(skb);
Anjali Singhaidf230752014-12-19 02:58:16 +00002458
Alexander Duyckc7770192016-01-24 21:16:35 -08002459 /* initialize outer IP header fields */
2460 if (ip.v4->version == 4) {
2461 ip.v4->tot_len = 0;
2462 ip.v4->check = 0;
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002463 } else {
Alexander Duyckc7770192016-01-24 21:16:35 -08002464 ip.v6->payload_len = 0;
2465 }
2466
Alexander Duyck577389a2016-04-02 00:06:56 -07002467 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04002468 SKB_GSO_GRE_CSUM |
Tom Herbert7e133182016-05-18 09:06:10 -07002469 SKB_GSO_IPXIP4 |
Alexander Duyckbf2d1df2016-05-18 10:44:53 -07002470 SKB_GSO_IPXIP6 |
Alexander Duyck577389a2016-04-02 00:06:56 -07002471 SKB_GSO_UDP_TUNNEL |
Alexander Duyck54532052016-01-24 21:17:29 -08002472 SKB_GSO_UDP_TUNNEL_CSUM)) {
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04002473 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2474 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2475 l4.udp->len = 0;
2476
Alexander Duyck54532052016-01-24 21:17:29 -08002477 /* determine offset of outer transport header */
2478 l4_offset = l4.hdr - skb->data;
2479
2480 /* remove payload length from outer checksum */
Alexander Duyck24d41e52016-03-18 16:06:47 -07002481 paylen = skb->len - l4_offset;
Jacob Kellerb9c015d2016-12-12 15:44:17 -08002482 csum_replace_by_diff(&l4.udp->check,
2483 (__force __wsum)htonl(paylen));
Alexander Duyck54532052016-01-24 21:17:29 -08002484 }
2485
Alexander Duyckc7770192016-01-24 21:16:35 -08002486 /* reset pointers to inner headers */
2487 ip.hdr = skb_inner_network_header(skb);
2488 l4.hdr = skb_inner_transport_header(skb);
2489
2490 /* initialize inner IP header fields */
2491 if (ip.v4->version == 4) {
2492 ip.v4->tot_len = 0;
2493 ip.v4->check = 0;
2494 } else {
2495 ip.v6->payload_len = 0;
2496 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002497 }
2498
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002499 /* determine offset of inner transport header */
2500 l4_offset = l4.hdr - skb->data;
2501
2502 /* remove payload length from inner checksum */
Alexander Duyck24d41e52016-03-18 16:06:47 -07002503 paylen = skb->len - l4_offset;
Jacob Kellerb9c015d2016-12-12 15:44:17 -08002504 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08002505
2506 /* compute length of segmentation header */
2507 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002508
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002509 /* pull values out of skb_shinfo */
2510 gso_size = skb_shinfo(skb)->gso_size;
2511 gso_segs = skb_shinfo(skb)->gso_segs;
2512
2513 /* update GSO size and bytecount with header size */
2514 first->gso_segs = gso_segs;
2515 first->bytecount += (first->gso_segs - 1) * *hdr_len;
2516
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002517 /* find the field values */
2518 cd_cmd = I40E_TX_CTX_DESC_TSO;
2519 cd_tso_len = skb->len - *hdr_len;
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002520 cd_mss = gso_size;
Alexander Duyck03f9d6a2016-01-24 21:16:20 -08002521 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2522 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2523 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002524 return 1;
2525}
2526
2527/**
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002528 * i40e_tsyn - set up the tsyn context descriptor
2529 * @tx_ring: ptr to the ring to send
2530 * @skb: ptr to the skb we're sending
2531 * @tx_flags: the collected send information
Shannon Nelson9c883bd2015-10-21 19:47:02 -04002532 * @cd_type_cmd_tso_mss: Quad Word 1
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002533 *
2534 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2535 **/
2536static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2537 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2538{
2539 struct i40e_pf *pf;
2540
2541 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2542 return 0;
2543
2544 /* Tx timestamps cannot be sampled when doing TSO */
2545 if (tx_flags & I40E_TX_FLAGS_TSO)
2546 return 0;
2547
2548 /* only timestamp the outbound packet if the user has requested it and
2549 * we are not already transmitting a packet to be timestamped
2550 */
2551 pf = i40e_netdev_to_pf(tx_ring->netdev);
Jacob Keller22b47772014-12-14 01:55:09 +00002552 if (!(pf->flags & I40E_FLAG_PTP))
2553 return 0;
2554
Jakub Kicinski9ce34f02014-03-15 14:55:42 +00002555 if (pf->ptp_tx &&
2556 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002557 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2558 pf->ptp_tx_skb = skb_get(skb);
2559 } else {
2560 return 0;
2561 }
2562
2563 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
2564 I40E_TXD_CTX_QW1_CMD_SHIFT;
2565
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00002566 return 1;
2567}
2568
2569/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002570 * i40e_tx_enable_csum - Enable Tx checksum offloads
2571 * @skb: send buffer
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002572 * @tx_flags: pointer to Tx flags currently set
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002573 * @td_cmd: Tx descriptor command bits to set
2574 * @td_offset: Tx descriptor header offsets to set
Jean Sacren554f4542015-10-13 01:06:28 -06002575 * @tx_ring: Tx descriptor ring
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002576 * @cd_tunneling: ptr to context desc bits
2577 **/
Alexander Duyck529f1f62016-01-24 21:17:10 -08002578static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
2579 u32 *td_cmd, u32 *td_offset,
2580 struct i40e_ring *tx_ring,
2581 u32 *cd_tunneling)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002582{
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002583 union {
2584 struct iphdr *v4;
2585 struct ipv6hdr *v6;
2586 unsigned char *hdr;
2587 } ip;
2588 union {
2589 struct tcphdr *tcp;
2590 struct udphdr *udp;
2591 unsigned char *hdr;
2592 } l4;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002593 unsigned char *exthdr;
Jesse Brandeburgd1bd7432016-04-01 03:56:04 -07002594 u32 offset, cmd = 0;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002595 __be16 frag_off;
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002596 u8 l4_proto = 0;
2597
Alexander Duyck529f1f62016-01-24 21:17:10 -08002598 if (skb->ip_summed != CHECKSUM_PARTIAL)
2599 return 0;
2600
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002601 ip.hdr = skb_network_header(skb);
2602 l4.hdr = skb_transport_header(skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002603
Alexander Duyck475b4202016-01-24 21:17:01 -08002604 /* compute outer L2 header size */
2605 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
2606
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002607 if (skb->encapsulation) {
Jesse Brandeburgd1bd7432016-04-01 03:56:04 -07002608 u32 tunnel = 0;
Alexander Duycka0064722016-01-24 21:16:48 -08002609 /* define outer network header type */
2610 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
Alexander Duyck475b4202016-01-24 21:17:01 -08002611 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
2612 I40E_TX_CTX_EXT_IP_IPV4 :
2613 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2614
Alexander Duycka0064722016-01-24 21:16:48 -08002615 l4_proto = ip.v4->protocol;
2616 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
Alexander Duyck475b4202016-01-24 21:17:01 -08002617 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002618
2619 exthdr = ip.hdr + sizeof(*ip.v6);
Alexander Duycka0064722016-01-24 21:16:48 -08002620 l4_proto = ip.v6->nexthdr;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002621 if (l4.hdr != exthdr)
2622 ipv6_skip_exthdr(skb, exthdr - skb->data,
2623 &l4_proto, &frag_off);
Alexander Duycka0064722016-01-24 21:16:48 -08002624 }
2625
2626 /* define outer transport */
2627 switch (l4_proto) {
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002628 case IPPROTO_UDP:
Alexander Duyck475b4202016-01-24 21:17:01 -08002629 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
Singhai, Anjali6a899022015-12-14 12:21:18 -08002630 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002631 break;
Shannon Nelsonc1d17912015-09-25 19:26:04 +00002632 case IPPROTO_GRE:
Alexander Duyck475b4202016-01-24 21:17:01 -08002633 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
Alexander Duycka0064722016-01-24 21:16:48 -08002634 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
Shannon Nelsonc1d17912015-09-25 19:26:04 +00002635 break;
Alexander Duyck577389a2016-04-02 00:06:56 -07002636 case IPPROTO_IPIP:
2637 case IPPROTO_IPV6:
2638 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2639 l4.hdr = skb_inner_network_header(skb);
2640 break;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002641 default:
Alexander Duyck529f1f62016-01-24 21:17:10 -08002642 if (*tx_flags & I40E_TX_FLAGS_TSO)
2643 return -1;
2644
2645 skb_checksum_help(skb);
2646 return 0;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00002647 }
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002648
Alexander Duyck577389a2016-04-02 00:06:56 -07002649 /* compute outer L3 header size */
2650 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
2651 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
2652
2653 /* switch IP header pointer from outer to inner header */
2654 ip.hdr = skb_inner_network_header(skb);
2655
Alexander Duyck475b4202016-01-24 21:17:01 -08002656 /* compute tunnel header size */
2657 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
2658 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
2659
Alexander Duyck54532052016-01-24 21:17:29 -08002660 /* indicate if we need to offload outer UDP header */
2661 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04002662 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
Alexander Duyck54532052016-01-24 21:17:29 -08002663 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
2664 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2665
Alexander Duyck475b4202016-01-24 21:17:01 -08002666 /* record tunnel offload values */
2667 *cd_tunneling |= tunnel;
2668
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002669 /* switch L4 header pointer from outer to inner */
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002670 l4.hdr = skb_inner_transport_header(skb);
Alexander Duycka0064722016-01-24 21:16:48 -08002671 l4_proto = 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002672
Alexander Duycka0064722016-01-24 21:16:48 -08002673 /* reset type as we transition from outer to inner headers */
2674 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
2675 if (ip.v4->version == 4)
2676 *tx_flags |= I40E_TX_FLAGS_IPV4;
2677 if (ip.v6->version == 6)
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002678 *tx_flags |= I40E_TX_FLAGS_IPV6;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002679 }
2680
2681 /* Enable IP checksum offloads */
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002682 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002683 l4_proto = ip.v4->protocol;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002684 /* the stack computes the IP header already, the only time we
2685 * need the hardware to recompute it is in the case of TSO.
2686 */
Alexander Duyck475b4202016-01-24 21:17:01 -08002687 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
2688 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
2689 I40E_TX_DESC_CMD_IIPT_IPV4;
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04002690 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
Alexander Duyck475b4202016-01-24 21:17:01 -08002691 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08002692
2693 exthdr = ip.hdr + sizeof(*ip.v6);
2694 l4_proto = ip.v6->nexthdr;
2695 if (l4.hdr != exthdr)
2696 ipv6_skip_exthdr(skb, exthdr - skb->data,
2697 &l4_proto, &frag_off);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002698 }
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002699
Alexander Duyck475b4202016-01-24 21:17:01 -08002700 /* compute inner L3 header size */
2701 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002702
2703 /* Enable L4 checksum offloads */
Alexander Duyckb96b78f2016-01-24 21:16:42 -08002704 switch (l4_proto) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002705 case IPPROTO_TCP:
2706 /* enable checksum offloads */
Alexander Duyck475b4202016-01-24 21:17:01 -08002707 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
2708 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002709 break;
2710 case IPPROTO_SCTP:
2711 /* enable SCTP checksum offload */
Alexander Duyck475b4202016-01-24 21:17:01 -08002712 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
2713 offset |= (sizeof(struct sctphdr) >> 2) <<
2714 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002715 break;
2716 case IPPROTO_UDP:
2717 /* enable UDP checksum offload */
Alexander Duyck475b4202016-01-24 21:17:01 -08002718 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
2719 offset |= (sizeof(struct udphdr) >> 2) <<
2720 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002721 break;
2722 default:
Alexander Duyck529f1f62016-01-24 21:17:10 -08002723 if (*tx_flags & I40E_TX_FLAGS_TSO)
2724 return -1;
2725 skb_checksum_help(skb);
2726 return 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002727 }
Alexander Duyck475b4202016-01-24 21:17:01 -08002728
2729 *td_cmd |= cmd;
2730 *td_offset |= offset;
Alexander Duyck529f1f62016-01-24 21:17:10 -08002731
2732 return 1;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002733}
2734
2735/**
2736 * i40e_create_tx_ctx Build the Tx context descriptor
2737 * @tx_ring: ring to create the descriptor on
2738 * @cd_type_cmd_tso_mss: Quad Word 1
2739 * @cd_tunneling: Quad Word 0 - bits 0-31
2740 * @cd_l2tag2: Quad Word 0 - bits 32-63
2741 **/
2742static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
2743 const u64 cd_type_cmd_tso_mss,
2744 const u32 cd_tunneling, const u32 cd_l2tag2)
2745{
2746 struct i40e_tx_context_desc *context_desc;
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002747 int i = tx_ring->next_to_use;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002748
Jesse Brandeburgff40dd52014-02-14 02:14:41 +00002749 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
2750 !cd_tunneling && !cd_l2tag2)
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002751 return;
2752
2753 /* grab the next descriptor */
Alexander Duyckfc4ac672013-09-28 06:00:22 +00002754 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
2755
2756 i++;
2757 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002758
2759 /* cpu_to_le32 and assign to struct fields */
2760 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2761 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
Jesse Brandeburg3efbbb22014-06-04 20:41:54 +00002762 context_desc->rsvd = cpu_to_le16(0);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002763 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2764}
2765
2766/**
Eric Dumazet4567dc12014-10-07 13:30:23 -07002767 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2768 * @tx_ring: the ring to be checked
2769 * @size: the size buffer we want to assure is available
2770 *
2771 * Returns -EBUSY if a stop is needed, else 0
2772 **/
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002773int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
Eric Dumazet4567dc12014-10-07 13:30:23 -07002774{
2775 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2776 /* Memory barrier before checking head and tail */
2777 smp_mb();
2778
2779 /* Check again in a case another CPU has just made room available. */
2780 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2781 return -EBUSY;
2782
2783 /* A reprieve! - use start_queue because it doesn't call schedule */
2784 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2785 ++tx_ring->tx_stats.restart_queue;
2786 return 0;
2787}
2788
2789/**
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002790 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
Anjali Singhai71da6192015-02-21 06:42:35 +00002791 * @skb: send buffer
Anjali Singhai71da6192015-02-21 06:42:35 +00002792 *
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002793 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
2794 * and so we need to figure out the cases where we need to linearize the skb.
2795 *
2796 * For TSO we need to count the TSO header and segment payload separately.
2797 * As such we need to check cases where we have 7 fragments or more as we
2798 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2799 * the segment payload in the first descriptor, and another 7 for the
2800 * fragments.
Anjali Singhai71da6192015-02-21 06:42:35 +00002801 **/
Alexander Duyck2d374902016-02-17 11:02:50 -08002802bool __i40e_chk_linearize(struct sk_buff *skb)
Anjali Singhai71da6192015-02-21 06:42:35 +00002803{
Alexander Duyck2d374902016-02-17 11:02:50 -08002804 const struct skb_frag_struct *frag, *stale;
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002805 int nr_frags, sum;
Anjali Singhai71da6192015-02-21 06:42:35 +00002806
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002807 /* no need to check if number of frags is less than 7 */
Alexander Duyck2d374902016-02-17 11:02:50 -08002808 nr_frags = skb_shinfo(skb)->nr_frags;
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002809 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
Alexander Duyck2d374902016-02-17 11:02:50 -08002810 return false;
Anjali Singhai71da6192015-02-21 06:42:35 +00002811
Alexander Duyck2d374902016-02-17 11:02:50 -08002812 /* We need to walk through the list and validate that each group
Alexander Duyck841493a2016-09-06 18:05:04 -07002813 * of 6 fragments totals at least gso_size.
Alexander Duyck2d374902016-02-17 11:02:50 -08002814 */
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002815 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
Alexander Duyck2d374902016-02-17 11:02:50 -08002816 frag = &skb_shinfo(skb)->frags[0];
2817
2818 /* Initialize size to the negative value of gso_size minus 1. We
2819 * use this as the worst case scenerio in which the frag ahead
2820 * of us only provides one byte which is why we are limited to 6
2821 * descriptors for a single transmit as the header and previous
2822 * fragment are already consuming 2 descriptors.
2823 */
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002824 sum = 1 - skb_shinfo(skb)->gso_size;
Alexander Duyck2d374902016-02-17 11:02:50 -08002825
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002826 /* Add size of frags 0 through 4 to create our initial sum */
2827 sum += skb_frag_size(frag++);
2828 sum += skb_frag_size(frag++);
2829 sum += skb_frag_size(frag++);
2830 sum += skb_frag_size(frag++);
2831 sum += skb_frag_size(frag++);
Alexander Duyck2d374902016-02-17 11:02:50 -08002832
2833 /* Walk through fragments adding latest fragment, testing it, and
2834 * then removing stale fragments from the sum.
2835 */
2836 stale = &skb_shinfo(skb)->frags[0];
2837 for (;;) {
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002838 sum += skb_frag_size(frag++);
Alexander Duyck2d374902016-02-17 11:02:50 -08002839
2840 /* if sum is negative we failed to make sufficient progress */
2841 if (sum < 0)
2842 return true;
2843
Alexander Duyck841493a2016-09-06 18:05:04 -07002844 if (!nr_frags--)
Alexander Duyck2d374902016-02-17 11:02:50 -08002845 break;
2846
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07002847 sum -= skb_frag_size(stale++);
Anjali Singhai71da6192015-02-21 06:42:35 +00002848 }
2849
Alexander Duyck2d374902016-02-17 11:02:50 -08002850 return false;
Anjali Singhai71da6192015-02-21 06:42:35 +00002851}
2852
2853/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002854 * i40e_tx_map - Build the Tx descriptor
2855 * @tx_ring: ring to send buffer on
2856 * @skb: send buffer
2857 * @first: first buffer info buffer to use
2858 * @tx_flags: collected send information
2859 * @hdr_len: size of the packet header
2860 * @td_cmd: the command field in the descriptor
2861 * @td_offset: offset for checksum or crc
2862 **/
Vasu Dev38e00432014-08-01 13:27:03 -07002863#ifdef I40E_FCOE
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002864inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002865 struct i40e_tx_buffer *first, u32 tx_flags,
2866 const u8 hdr_len, u32 td_cmd, u32 td_offset)
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002867#else
2868static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2869 struct i40e_tx_buffer *first, u32 tx_flags,
2870 const u8 hdr_len, u32 td_cmd, u32 td_offset)
Vasu Dev38e00432014-08-01 13:27:03 -07002871#endif
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002872{
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002873 unsigned int data_len = skb->data_len;
2874 unsigned int size = skb_headlen(skb);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002875 struct skb_frag_struct *frag;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002876 struct i40e_tx_buffer *tx_bi;
2877 struct i40e_tx_desc *tx_desc;
Alexander Duycka5e9c572013-09-28 06:00:27 +00002878 u16 i = tx_ring->next_to_use;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002879 u32 td_tag = 0;
2880 dma_addr_t dma;
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002881 u16 desc_count = 1;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002882
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002883 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
2884 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
2885 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
2886 I40E_TX_FLAGS_VLAN_SHIFT;
2887 }
2888
Alexander Duycka5e9c572013-09-28 06:00:27 +00002889 first->tx_flags = tx_flags;
2890
2891 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2892
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002893 tx_desc = I40E_TX_DESC(tx_ring, i);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002894 tx_bi = first;
2895
2896 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002897 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
2898
Alexander Duycka5e9c572013-09-28 06:00:27 +00002899 if (dma_mapping_error(tx_ring->dev, dma))
2900 goto dma_error;
2901
2902 /* record length, and DMA address */
2903 dma_unmap_len_set(tx_bi, len, size);
2904 dma_unmap_addr_set(tx_bi, dma, dma);
2905
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002906 /* align size to end of page */
2907 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002908 tx_desc->buffer_addr = cpu_to_le64(dma);
2909
2910 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002911 tx_desc->cmd_type_offset_bsz =
2912 build_ctob(td_cmd, td_offset,
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002913 max_data, td_tag);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002914
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002915 tx_desc++;
2916 i++;
Anjali Singhai58044742015-09-25 18:26:13 -07002917 desc_count++;
2918
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002919 if (i == tx_ring->count) {
2920 tx_desc = I40E_TX_DESC(tx_ring, 0);
2921 i = 0;
2922 }
Alexander Duycka5e9c572013-09-28 06:00:27 +00002923
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002924 dma += max_data;
2925 size -= max_data;
Alexander Duycka5e9c572013-09-28 06:00:27 +00002926
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002927 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
Alexander Duycka5e9c572013-09-28 06:00:27 +00002928 tx_desc->buffer_addr = cpu_to_le64(dma);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002929 }
2930
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002931 if (likely(!data_len))
2932 break;
2933
Alexander Duycka5e9c572013-09-28 06:00:27 +00002934 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2935 size, td_tag);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002936
2937 tx_desc++;
2938 i++;
Anjali Singhai58044742015-09-25 18:26:13 -07002939 desc_count++;
2940
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002941 if (i == tx_ring->count) {
2942 tx_desc = I40E_TX_DESC(tx_ring, 0);
2943 i = 0;
2944 }
2945
Alexander Duycka5e9c572013-09-28 06:00:27 +00002946 size = skb_frag_size(frag);
2947 data_len -= size;
2948
2949 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2950 DMA_TO_DEVICE);
2951
2952 tx_bi = &tx_ring->tx_bi[i];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00002953 }
2954
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002955 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
Alexander Duycka5e9c572013-09-28 06:00:27 +00002956
2957 i++;
2958 if (i == tx_ring->count)
2959 i = 0;
2960
2961 tx_ring->next_to_use = i;
2962
Eric Dumazet4567dc12014-10-07 13:30:23 -07002963 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
Anjali Singhai58044742015-09-25 18:26:13 -07002964
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002965 /* write last descriptor with EOP bit */
2966 td_cmd |= I40E_TX_DESC_CMD_EOP;
2967
2968 /* We can OR these values together as they both are checked against
2969 * 4 below and at this point desc_count will be used as a boolean value
2970 * after this if/else block.
2971 */
2972 desc_count |= ++tx_ring->packet_stride;
2973
Anjali Singhai58044742015-09-25 18:26:13 -07002974 /* Algorithm to optimize tail and RS bit setting:
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002975 * if queue is stopped
2976 * mark RS bit
2977 * reset packet counter
2978 * else if xmit_more is supported and is true
2979 * advance packet counter to 4
2980 * reset desc_count to 0
Anjali Singhai58044742015-09-25 18:26:13 -07002981 *
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002982 * if desc_count >= 4
2983 * mark RS bit
2984 * reset packet counter
2985 * if desc_count > 0
2986 * update tail
Anjali Singhai58044742015-09-25 18:26:13 -07002987 *
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002988 * Note: If there are less than 4 descriptors
Anjali Singhai58044742015-09-25 18:26:13 -07002989 * pending and interrupts were disabled the service task will
2990 * trigger a force WB.
2991 */
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002992 if (netif_xmit_stopped(txring_txq(tx_ring))) {
2993 goto do_rs;
2994 } else if (skb->xmit_more) {
2995 /* set stride to arm on next packet and reset desc_count */
2996 tx_ring->packet_stride = WB_STRIDE;
2997 desc_count = 0;
2998 } else if (desc_count >= WB_STRIDE) {
2999do_rs:
3000 /* write last descriptor with RS bit set */
3001 td_cmd |= I40E_TX_DESC_CMD_RS;
Anjali Singhai58044742015-09-25 18:26:13 -07003002 tx_ring->packet_stride = 0;
Anjali Singhai58044742015-09-25 18:26:13 -07003003 }
Anjali Singhai58044742015-09-25 18:26:13 -07003004
3005 tx_desc->cmd_type_offset_bsz =
Alexander Duyck1dc8b532016-10-11 15:26:54 -07003006 build_ctob(td_cmd, td_offset, size, td_tag);
3007
3008 /* Force memory writes to complete before letting h/w know there
3009 * are new descriptors to fetch.
3010 *
3011 * We also use this memory barrier to make certain all of the
3012 * status bits have been updated before next_to_watch is written.
3013 */
3014 wmb();
3015
3016 /* set next_to_watch value indicating a packet is present */
3017 first->next_to_watch = tx_desc;
Anjali Singhai58044742015-09-25 18:26:13 -07003018
Alexander Duycka5e9c572013-09-28 06:00:27 +00003019 /* notify HW of packet */
Alexander Duyck1dc8b532016-10-11 15:26:54 -07003020 if (desc_count) {
Anjali Singhai58044742015-09-25 18:26:13 -07003021 writel(i, tx_ring->tail);
Alexander Duyck1dc8b532016-10-11 15:26:54 -07003022
3023 /* we need this if more than one processor can write to our tail
3024 * at a time, it synchronizes IO on IA64/Altix systems
3025 */
3026 mmiowb();
Anjali Singhai58044742015-09-25 18:26:13 -07003027 }
Alexander Duyck1dc8b532016-10-11 15:26:54 -07003028
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003029 return;
3030
3031dma_error:
Alexander Duycka5e9c572013-09-28 06:00:27 +00003032 dev_info(tx_ring->dev, "TX DMA map failed\n");
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003033
3034 /* clear dma mappings for failed tx_bi map */
3035 for (;;) {
3036 tx_bi = &tx_ring->tx_bi[i];
Alexander Duycka5e9c572013-09-28 06:00:27 +00003037 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003038 if (tx_bi == first)
3039 break;
3040 if (i == 0)
3041 i = tx_ring->count;
3042 i--;
3043 }
3044
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003045 tx_ring->next_to_use = i;
3046}
3047
3048/**
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003049 * i40e_xmit_frame_ring - Sends buffer on Tx ring
3050 * @skb: send buffer
3051 * @tx_ring: ring to send buffer on
3052 *
3053 * Returns NETDEV_TX_OK if sent, else an error code
3054 **/
3055static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
3056 struct i40e_ring *tx_ring)
3057{
3058 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
3059 u32 cd_tunneling = 0, cd_l2tag2 = 0;
3060 struct i40e_tx_buffer *first;
3061 u32 td_offset = 0;
3062 u32 tx_flags = 0;
3063 __be16 protocol;
3064 u32 td_cmd = 0;
3065 u8 hdr_len = 0;
Alexander Duyck4ec441d2016-02-17 11:02:43 -08003066 int tso, count;
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00003067 int tsyn;
Jesse Brandeburg6995b362015-08-28 17:55:54 -04003068
Jesse Brandeburgb74118f2015-10-26 19:44:30 -04003069 /* prefetch the data, we'll need it later */
3070 prefetch(skb->data);
3071
Alexander Duyck4ec441d2016-02-17 11:02:43 -08003072 count = i40e_xmit_descriptor_count(skb);
Alexander Duyck2d374902016-02-17 11:02:50 -08003073 if (i40e_chk_linearize(skb, count)) {
Alexander Duyck52ea3e82016-11-28 16:05:59 -08003074 if (__skb_linearize(skb)) {
3075 dev_kfree_skb_any(skb);
3076 return NETDEV_TX_OK;
3077 }
Alexander Duyck5c4654d2016-02-19 12:17:08 -08003078 count = i40e_txd_use_count(skb->len);
Alexander Duyck2d374902016-02-17 11:02:50 -08003079 tx_ring->tx_stats.tx_linearize++;
3080 }
Alexander Duyck4ec441d2016-02-17 11:02:43 -08003081
3082 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
3083 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
3084 * + 4 desc gap to avoid the cache line where head is,
3085 * + 1 desc for context descriptor,
3086 * otherwise try next time
3087 */
3088 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
3089 tx_ring->tx_stats.tx_busy++;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003090 return NETDEV_TX_BUSY;
Alexander Duyck4ec441d2016-02-17 11:02:43 -08003091 }
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003092
Alexander Duyck52ea3e82016-11-28 16:05:59 -08003093 /* record the location of the first descriptor for this packet */
3094 first = &tx_ring->tx_bi[tx_ring->next_to_use];
3095 first->skb = skb;
3096 first->bytecount = skb->len;
3097 first->gso_segs = 1;
3098
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003099 /* prepare the xmit flags */
3100 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
3101 goto out_drop;
3102
3103 /* obtain protocol of skb */
Vlad Yasevich3d34dd02014-08-25 10:34:52 -04003104 protocol = vlan_get_protocol(skb);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003105
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003106 /* setup IPv4/IPv6 offloads */
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00003107 if (protocol == htons(ETH_P_IP))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003108 tx_flags |= I40E_TX_FLAGS_IPV4;
Jesse Brandeburg0e2fe46c2013-11-28 06:39:29 +00003109 else if (protocol == htons(ETH_P_IPV6))
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003110 tx_flags |= I40E_TX_FLAGS_IPV6;
3111
Alexander Duyck52ea3e82016-11-28 16:05:59 -08003112 tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003113
3114 if (tso < 0)
3115 goto out_drop;
3116 else if (tso)
3117 tx_flags |= I40E_TX_FLAGS_TSO;
3118
Alexander Duyck3bc67972016-02-17 11:02:56 -08003119 /* Always offload the checksum, since it's in the data descriptor */
3120 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
3121 tx_ring, &cd_tunneling);
3122 if (tso < 0)
3123 goto out_drop;
3124
Jacob Kellerbeb0dff2014-01-11 05:43:19 +00003125 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
3126
3127 if (tsyn)
3128 tx_flags |= I40E_TX_FLAGS_TSYN;
3129
Jakub Kicinski259afec2014-03-15 14:55:37 +00003130 skb_tx_timestamp(skb);
3131
Alexander Duyckb1941302013-09-28 06:00:32 +00003132 /* always enable CRC insertion offload */
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003133 td_cmd |= I40E_TX_DESC_CMD_ICRC;
3134
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003135 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
3136 cd_tunneling, cd_l2tag2);
3137
3138 /* Add Flow Director ATR if it's enabled.
3139 *
3140 * NOTE: this must always be directly before the data descriptor.
3141 */
Alexander Duyck6b037cd2016-01-24 21:17:36 -08003142 i40e_atr(tx_ring, skb, tx_flags);
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003143
3144 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
3145 td_cmd, td_offset);
3146
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003147 return NETDEV_TX_OK;
3148
3149out_drop:
Alexander Duyck52ea3e82016-11-28 16:05:59 -08003150 dev_kfree_skb_any(first->skb);
3151 first->skb = NULL;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003152 return NETDEV_TX_OK;
3153}
3154
3155/**
3156 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
3157 * @skb: send buffer
3158 * @netdev: network interface device structure
3159 *
3160 * Returns NETDEV_TX_OK if sent, else an error code
3161 **/
3162netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3163{
3164 struct i40e_netdev_priv *np = netdev_priv(netdev);
3165 struct i40e_vsi *vsi = np->vsi;
Alexander Duyck9f65e152013-09-28 06:00:58 +00003166 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003167
3168 /* hardware can't handle really short frames, hardware padding works
3169 * beyond this point
3170 */
Alexander Duycka94d9e22014-12-03 08:17:39 -08003171 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
3172 return NETDEV_TX_OK;
Jesse Brandeburgfd0a05c2013-09-11 08:39:51 +00003173
3174 return i40e_xmit_frame_ring(skb, tx_ring);
3175}