blob: d892922a2ed96dab9bc1e794d5fe55bf1862d2f7 [file] [log] [blame]
Greg Rose7f12ad72013-12-21 06:12:51 +00001/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -08004 * Copyright(c) 2013 - 2016 Intel Corporation.
Greg Rose7f12ad72013-12-21 06:12:51 +00005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
Jesse Brandeburgb8316072014-04-05 07:46:11 +000015 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
Greg Rose7f12ad72013-12-21 06:12:51 +000018 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
Paul Gortmaker7ed3f5f2014-01-11 04:00:31 +000027#include <linux/prefetch.h>
Mitch Williamsa132af22015-01-24 09:58:35 +000028#include <net/busy_poll.h>
Paul Gortmaker7ed3f5f2014-01-11 04:00:31 +000029
Greg Rose7f12ad72013-12-21 06:12:51 +000030#include "i40evf.h"
Jesse Brandeburg206812b2014-02-12 01:45:33 +000031#include "i40e_prototype.h"
Greg Rose7f12ad72013-12-21 06:12:51 +000032
33static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
34 u32 td_tag)
35{
36 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
37 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
38 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
39 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
40 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
41}
42
43#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
44
45/**
46 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
47 * @ring: the ring that owns the buffer
48 * @tx_buffer: the buffer to free
49 **/
50static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
51 struct i40e_tx_buffer *tx_buffer)
52{
53 if (tx_buffer->skb) {
Alexander Duyck64bfd682016-09-12 14:18:39 -070054 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
55 kfree(tx_buffer->raw_buf);
56 else
57 dev_kfree_skb_any(tx_buffer->skb);
Greg Rose7f12ad72013-12-21 06:12:51 +000058 if (dma_unmap_len(tx_buffer, len))
59 dma_unmap_single(ring->dev,
60 dma_unmap_addr(tx_buffer, dma),
61 dma_unmap_len(tx_buffer, len),
62 DMA_TO_DEVICE);
63 } else if (dma_unmap_len(tx_buffer, len)) {
64 dma_unmap_page(ring->dev,
65 dma_unmap_addr(tx_buffer, dma),
66 dma_unmap_len(tx_buffer, len),
67 DMA_TO_DEVICE);
68 }
Kiran Patila42e7a32015-11-06 15:26:03 -080069
Greg Rose7f12ad72013-12-21 06:12:51 +000070 tx_buffer->next_to_watch = NULL;
71 tx_buffer->skb = NULL;
72 dma_unmap_len_set(tx_buffer, len, 0);
73 /* tx_buffer must be completely set up in the transmit path */
74}
75
76/**
77 * i40evf_clean_tx_ring - Free any empty Tx buffers
78 * @tx_ring: ring to be cleaned
79 **/
80void i40evf_clean_tx_ring(struct i40e_ring *tx_ring)
81{
82 unsigned long bi_size;
83 u16 i;
84
85 /* ring already cleared, nothing to do */
86 if (!tx_ring->tx_bi)
87 return;
88
89 /* Free all the Tx ring sk_buffs */
90 for (i = 0; i < tx_ring->count; i++)
91 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
92
93 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
94 memset(tx_ring->tx_bi, 0, bi_size);
95
96 /* Zero out the descriptor ring */
97 memset(tx_ring->desc, 0, tx_ring->size);
98
99 tx_ring->next_to_use = 0;
100 tx_ring->next_to_clean = 0;
101
102 if (!tx_ring->netdev)
103 return;
104
105 /* cleanup Tx queue statistics */
Alexander Duycke486bdf2016-09-12 14:18:40 -0700106 netdev_tx_reset_queue(txring_txq(tx_ring));
Greg Rose7f12ad72013-12-21 06:12:51 +0000107}
108
109/**
110 * i40evf_free_tx_resources - Free Tx resources per queue
111 * @tx_ring: Tx descriptor ring for a specific queue
112 *
113 * Free all transmit software resources
114 **/
115void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
116{
117 i40evf_clean_tx_ring(tx_ring);
118 kfree(tx_ring->tx_bi);
119 tx_ring->tx_bi = NULL;
120
121 if (tx_ring->desc) {
122 dma_free_coherent(tx_ring->dev, tx_ring->size,
123 tx_ring->desc, tx_ring->dma);
124 tx_ring->desc = NULL;
125 }
126}
127
128/**
Kiran Patil9c6c1252015-11-06 15:26:02 -0800129 * i40evf_get_tx_pending - how many Tx descriptors not processed
130 * @tx_ring: the ring of descriptors
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800131 * @in_sw: is tx_pending being checked in SW or HW
Jesse Brandeburga68de582015-02-24 05:26:03 +0000132 *
Kiran Patil9c6c1252015-11-06 15:26:02 -0800133 * Since there is no access to the ring head register
134 * in XL710, we need to use our local copies
Jesse Brandeburga68de582015-02-24 05:26:03 +0000135 **/
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800136u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
Jesse Brandeburga68de582015-02-24 05:26:03 +0000137{
Kiran Patil9c6c1252015-11-06 15:26:02 -0800138 u32 head, tail;
Jesse Brandeburga68de582015-02-24 05:26:03 +0000139
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800140 if (!in_sw)
141 head = i40e_get_head(ring);
142 else
143 head = ring->next_to_clean;
Kiran Patil9c6c1252015-11-06 15:26:02 -0800144 tail = readl(ring->tail);
145
146 if (head != tail)
147 return (head < tail) ?
148 tail - head : (tail + ring->count - head);
149
150 return 0;
Jesse Brandeburga68de582015-02-24 05:26:03 +0000151}
152
Alexander Duyck1dc8b532016-10-11 15:26:54 -0700153#define WB_STRIDE 4
Anjali Singhai Jainc29af372015-01-10 01:07:19 +0000154
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000155/**
Greg Rose7f12ad72013-12-21 06:12:51 +0000156 * i40e_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duycka619afe2016-03-07 09:30:03 -0800157 * @vsi: the VSI we care about
158 * @tx_ring: Tx ring to clean
159 * @napi_budget: Used to determine if we are in netpoll
Greg Rose7f12ad72013-12-21 06:12:51 +0000160 *
161 * Returns true if there's any budget left (e.g. the clean is finished)
162 **/
Alexander Duycka619afe2016-03-07 09:30:03 -0800163static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
164 struct i40e_ring *tx_ring, int napi_budget)
Greg Rose7f12ad72013-12-21 06:12:51 +0000165{
166 u16 i = tx_ring->next_to_clean;
167 struct i40e_tx_buffer *tx_buf;
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000168 struct i40e_tx_desc *tx_head;
Greg Rose7f12ad72013-12-21 06:12:51 +0000169 struct i40e_tx_desc *tx_desc;
Alexander Duycka619afe2016-03-07 09:30:03 -0800170 unsigned int total_bytes = 0, total_packets = 0;
171 unsigned int budget = vsi->work_limit;
Greg Rose7f12ad72013-12-21 06:12:51 +0000172
173 tx_buf = &tx_ring->tx_bi[i];
174 tx_desc = I40E_TX_DESC(tx_ring, i);
175 i -= tx_ring->count;
176
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000177 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
178
Greg Rose7f12ad72013-12-21 06:12:51 +0000179 do {
180 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
181
182 /* if next_to_watch is not set then there is no work pending */
183 if (!eop_desc)
184 break;
185
186 /* prevent any other reads prior to eop_desc */
187 read_barrier_depends();
188
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000189 /* we have caught up to head, no work left to do */
190 if (tx_head == tx_desc)
Greg Rose7f12ad72013-12-21 06:12:51 +0000191 break;
192
193 /* clear next_to_watch to prevent false hangs */
194 tx_buf->next_to_watch = NULL;
195
196 /* update the statistics for this packet */
197 total_bytes += tx_buf->bytecount;
198 total_packets += tx_buf->gso_segs;
199
200 /* free the skb */
Alexander Duycka619afe2016-03-07 09:30:03 -0800201 napi_consume_skb(tx_buf->skb, napi_budget);
Greg Rose7f12ad72013-12-21 06:12:51 +0000202
203 /* unmap skb header data */
204 dma_unmap_single(tx_ring->dev,
205 dma_unmap_addr(tx_buf, dma),
206 dma_unmap_len(tx_buf, len),
207 DMA_TO_DEVICE);
208
209 /* clear tx_buffer data */
210 tx_buf->skb = NULL;
211 dma_unmap_len_set(tx_buf, len, 0);
212
213 /* unmap remaining buffers */
214 while (tx_desc != eop_desc) {
215
216 tx_buf++;
217 tx_desc++;
218 i++;
219 if (unlikely(!i)) {
220 i -= tx_ring->count;
221 tx_buf = tx_ring->tx_bi;
222 tx_desc = I40E_TX_DESC(tx_ring, 0);
223 }
224
225 /* unmap any remaining paged data */
226 if (dma_unmap_len(tx_buf, len)) {
227 dma_unmap_page(tx_ring->dev,
228 dma_unmap_addr(tx_buf, dma),
229 dma_unmap_len(tx_buf, len),
230 DMA_TO_DEVICE);
231 dma_unmap_len_set(tx_buf, len, 0);
232 }
233 }
234
235 /* move us one more past the eop_desc for start of next pkt */
236 tx_buf++;
237 tx_desc++;
238 i++;
239 if (unlikely(!i)) {
240 i -= tx_ring->count;
241 tx_buf = tx_ring->tx_bi;
242 tx_desc = I40E_TX_DESC(tx_ring, 0);
243 }
244
Jesse Brandeburg016890b2015-02-27 09:15:31 +0000245 prefetch(tx_desc);
246
Greg Rose7f12ad72013-12-21 06:12:51 +0000247 /* update budget accounting */
248 budget--;
249 } while (likely(budget));
250
251 i += tx_ring->count;
252 tx_ring->next_to_clean = i;
253 u64_stats_update_begin(&tx_ring->syncp);
254 tx_ring->stats.bytes += total_bytes;
255 tx_ring->stats.packets += total_packets;
256 u64_stats_update_end(&tx_ring->syncp);
257 tx_ring->q_vector->tx.total_bytes += total_bytes;
258 tx_ring->q_vector->tx.total_packets += total_packets;
259
Anjali Singhai Jainf6d83d12015-12-22 14:25:07 -0800260 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
Anjali Singhai Jainf6d83d12015-12-22 14:25:07 -0800261 /* check to see if there are < 4 descriptors
262 * waiting to be written back, then kick the hardware to force
263 * them to be written back in case we stay in NAPI.
264 * In this mode on X722 we do not enable Interrupt.
265 */
Mitch Williams88dc9e62016-06-20 09:10:35 -0700266 unsigned int j = i40evf_get_tx_pending(tx_ring, false);
Anjali Singhai Jainf6d83d12015-12-22 14:25:07 -0800267
268 if (budget &&
Alexander Duyck1dc8b532016-10-11 15:26:54 -0700269 ((j / WB_STRIDE) == 0) && (j > 0) &&
Alexander Duycka619afe2016-03-07 09:30:03 -0800270 !test_bit(__I40E_DOWN, &vsi->state) &&
Anjali Singhai Jainf6d83d12015-12-22 14:25:07 -0800271 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
272 tx_ring->arm_wb = true;
273 }
274
Alexander Duycke486bdf2016-09-12 14:18:40 -0700275 /* notify netdev of completed buffers */
276 netdev_tx_completed_queue(txring_txq(tx_ring),
Greg Rose7f12ad72013-12-21 06:12:51 +0000277 total_packets, total_bytes);
278
279#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
280 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
281 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
282 /* Make sure that anybody stopping the queue after this
283 * sees the new next_to_clean.
284 */
285 smp_mb();
286 if (__netif_subqueue_stopped(tx_ring->netdev,
287 tx_ring->queue_index) &&
Alexander Duycka619afe2016-03-07 09:30:03 -0800288 !test_bit(__I40E_DOWN, &vsi->state)) {
Greg Rose7f12ad72013-12-21 06:12:51 +0000289 netif_wake_subqueue(tx_ring->netdev,
290 tx_ring->queue_index);
291 ++tx_ring->tx_stats.restart_queue;
292 }
293 }
294
Kiran Patilb03a8c12015-09-24 18:13:15 -0400295 return !!budget;
Greg Rose7f12ad72013-12-21 06:12:51 +0000296}
297
298/**
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -0800299 * i40evf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
300 * @vsi: the VSI we care about
301 * @q_vector: the vector on which to enable writeback
302 *
303 **/
304static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
305 struct i40e_q_vector *q_vector)
306{
307 u16 flags = q_vector->tx.ring[0].flags;
308 u32 val;
309
310 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
311 return;
312
313 if (q_vector->arm_wb_state)
314 return;
315
316 val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
317 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */
318
319 wr32(&vsi->back->hw,
320 I40E_VFINT_DYN_CTLN1(q_vector->v_idx +
321 vsi->base_vector - 1), val);
322 q_vector->arm_wb_state = true;
323}
324
325/**
326 * i40evf_force_wb - Issue SW Interrupt so HW does a wb
Anjali Singhai Jainc29af372015-01-10 01:07:19 +0000327 * @vsi: the VSI we care about
328 * @q_vector: the vector on which to force writeback
329 *
330 **/
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -0800331void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
Anjali Singhai Jainc29af372015-01-10 01:07:19 +0000332{
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -0800333 u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
334 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
335 I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
336 I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK
337 /* allow 00 to be written to the index */;
Anjali Singhai Jainc29af372015-01-10 01:07:19 +0000338
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -0800339 wr32(&vsi->back->hw,
340 I40E_VFINT_DYN_CTLN1(q_vector->v_idx + vsi->base_vector - 1),
341 val);
Anjali Singhai Jainc29af372015-01-10 01:07:19 +0000342}
343
344/**
Greg Rose7f12ad72013-12-21 06:12:51 +0000345 * i40e_set_new_dynamic_itr - Find new ITR level
346 * @rc: structure containing ring performance data
347 *
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400348 * Returns true if ITR changed, false if not
349 *
Greg Rose7f12ad72013-12-21 06:12:51 +0000350 * Stores a new ITR value based on packets and byte counts during
351 * the last interrupt. The advantage of per interrupt computation
352 * is faster updates and more accurate ITR for the current traffic
353 * pattern. Constants in this function were computed based on
354 * theoretical maximum wire speed and thresholds were set based on
355 * testing data as well as attempting to minimize response time
356 * while increasing bulk throughput.
357 **/
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400358static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
Greg Rose7f12ad72013-12-21 06:12:51 +0000359{
360 enum i40e_latency_range new_latency_range = rc->latency_range;
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400361 struct i40e_q_vector *qv = rc->ring->q_vector;
Greg Rose7f12ad72013-12-21 06:12:51 +0000362 u32 new_itr = rc->itr;
363 int bytes_per_int;
Jesse Brandeburg51cc6d92015-09-28 14:16:52 -0400364 int usecs;
Greg Rose7f12ad72013-12-21 06:12:51 +0000365
366 if (rc->total_packets == 0 || !rc->itr)
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400367 return false;
Greg Rose7f12ad72013-12-21 06:12:51 +0000368
369 /* simple throttlerate management
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400370 * 0-10MB/s lowest (50000 ints/s)
Greg Rose7f12ad72013-12-21 06:12:51 +0000371 * 10-20MB/s low (20000 ints/s)
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400372 * 20-1249MB/s bulk (18000 ints/s)
373 * > 40000 Rx packets per second (8000 ints/s)
Jesse Brandeburg51cc6d92015-09-28 14:16:52 -0400374 *
375 * The math works out because the divisor is in 10^(-6) which
376 * turns the bytes/us input value into MB/s values, but
377 * make sure to use usecs, as the register values written
Jesse Brandeburgee2319c2015-09-28 14:16:54 -0400378 * are in 2 usec increments in the ITR registers, and make sure
379 * to use the smoothed values that the countdown timer gives us.
Greg Rose7f12ad72013-12-21 06:12:51 +0000380 */
Jesse Brandeburgee2319c2015-09-28 14:16:54 -0400381 usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
Jesse Brandeburg51cc6d92015-09-28 14:16:52 -0400382 bytes_per_int = rc->total_bytes / usecs;
Jesse Brandeburgee2319c2015-09-28 14:16:54 -0400383
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400384 switch (new_latency_range) {
Greg Rose7f12ad72013-12-21 06:12:51 +0000385 case I40E_LOWEST_LATENCY:
386 if (bytes_per_int > 10)
387 new_latency_range = I40E_LOW_LATENCY;
388 break;
389 case I40E_LOW_LATENCY:
390 if (bytes_per_int > 20)
391 new_latency_range = I40E_BULK_LATENCY;
392 else if (bytes_per_int <= 10)
393 new_latency_range = I40E_LOWEST_LATENCY;
394 break;
395 case I40E_BULK_LATENCY:
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400396 case I40E_ULTRA_LATENCY:
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400397 default:
398 if (bytes_per_int <= 20)
399 new_latency_range = I40E_LOW_LATENCY;
Greg Rose7f12ad72013-12-21 06:12:51 +0000400 break;
401 }
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400402
403 /* this is to adjust RX more aggressively when streaming small
404 * packets. The value of 40000 was picked as it is just beyond
405 * what the hardware can receive per second if in low latency
406 * mode.
407 */
408#define RX_ULTRA_PACKET_RATE 40000
409
410 if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
411 (&qv->rx == rc))
412 new_latency_range = I40E_ULTRA_LATENCY;
413
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -0400414 rc->latency_range = new_latency_range;
Greg Rose7f12ad72013-12-21 06:12:51 +0000415
416 switch (new_latency_range) {
417 case I40E_LOWEST_LATENCY:
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400418 new_itr = I40E_ITR_50K;
Greg Rose7f12ad72013-12-21 06:12:51 +0000419 break;
420 case I40E_LOW_LATENCY:
421 new_itr = I40E_ITR_20K;
422 break;
423 case I40E_BULK_LATENCY:
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400424 new_itr = I40E_ITR_18K;
425 break;
426 case I40E_ULTRA_LATENCY:
Greg Rose7f12ad72013-12-21 06:12:51 +0000427 new_itr = I40E_ITR_8K;
428 break;
429 default:
430 break;
431 }
432
Greg Rose7f12ad72013-12-21 06:12:51 +0000433 rc->total_bytes = 0;
434 rc->total_packets = 0;
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -0400435
436 if (new_itr != rc->itr) {
437 rc->itr = new_itr;
438 return true;
439 }
440
441 return false;
Greg Rose7f12ad72013-12-21 06:12:51 +0000442}
443
Jesse Brandeburg4eeb1ff2015-11-18 17:35:42 -0800444/**
Greg Rose7f12ad72013-12-21 06:12:51 +0000445 * i40evf_setup_tx_descriptors - Allocate the Tx descriptors
446 * @tx_ring: the tx ring to set up
447 *
448 * Return 0 on success, negative on error
449 **/
450int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
451{
452 struct device *dev = tx_ring->dev;
453 int bi_size;
454
455 if (!dev)
456 return -ENOMEM;
457
Mitch Williams67c818a2015-06-19 08:56:30 -0700458 /* warn if we are about to overwrite the pointer */
459 WARN_ON(tx_ring->tx_bi);
Greg Rose7f12ad72013-12-21 06:12:51 +0000460 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
461 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
462 if (!tx_ring->tx_bi)
463 goto err;
464
465 /* round up to nearest 4K */
466 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
Jesse Brandeburg1943d8b2014-02-14 02:14:40 +0000467 /* add u32 for head writeback, align after this takes care of
468 * guaranteeing this is at least one cache line in size
469 */
470 tx_ring->size += sizeof(u32);
Greg Rose7f12ad72013-12-21 06:12:51 +0000471 tx_ring->size = ALIGN(tx_ring->size, 4096);
472 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
473 &tx_ring->dma, GFP_KERNEL);
474 if (!tx_ring->desc) {
475 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
476 tx_ring->size);
477 goto err;
478 }
479
480 tx_ring->next_to_use = 0;
481 tx_ring->next_to_clean = 0;
482 return 0;
483
484err:
485 kfree(tx_ring->tx_bi);
486 tx_ring->tx_bi = NULL;
487 return -ENOMEM;
488}
489
490/**
491 * i40evf_clean_rx_ring - Free Rx buffers
492 * @rx_ring: ring to be cleaned
493 **/
494void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
495{
Greg Rose7f12ad72013-12-21 06:12:51 +0000496 unsigned long bi_size;
497 u16 i;
498
499 /* ring already cleared, nothing to do */
500 if (!rx_ring->rx_bi)
501 return;
502
Scott Petersone72e5652017-02-09 23:40:25 -0800503 if (rx_ring->skb) {
504 dev_kfree_skb(rx_ring->skb);
505 rx_ring->skb = NULL;
506 }
507
Greg Rose7f12ad72013-12-21 06:12:51 +0000508 /* Free all the Rx ring sk_buffs */
509 for (i = 0; i < rx_ring->count; i++) {
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700510 struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
511
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700512 if (!rx_bi->page)
513 continue;
514
Alexander Duyck59605bc2017-01-30 12:29:35 -0800515 /* Invalidate cache lines that may have been written to by
516 * device so that we avoid corrupting memory.
517 */
518 dma_sync_single_range_for_cpu(rx_ring->dev,
519 rx_bi->dma,
520 rx_bi->page_offset,
521 I40E_RXBUFFER_2048,
522 DMA_FROM_DEVICE);
523
524 /* free resources associated with mapping */
525 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
526 PAGE_SIZE,
527 DMA_FROM_DEVICE,
528 I40E_RX_DMA_ATTR);
Alexander Duyck17936682017-02-21 15:55:39 -0800529 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700530
531 rx_bi->page = NULL;
532 rx_bi->page_offset = 0;
Greg Rose7f12ad72013-12-21 06:12:51 +0000533 }
534
535 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
536 memset(rx_ring->rx_bi, 0, bi_size);
537
538 /* Zero out the descriptor ring */
539 memset(rx_ring->desc, 0, rx_ring->size);
540
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700541 rx_ring->next_to_alloc = 0;
Greg Rose7f12ad72013-12-21 06:12:51 +0000542 rx_ring->next_to_clean = 0;
543 rx_ring->next_to_use = 0;
544}
545
546/**
547 * i40evf_free_rx_resources - Free Rx resources
548 * @rx_ring: ring to clean the resources from
549 *
550 * Free all receive software resources
551 **/
552void i40evf_free_rx_resources(struct i40e_ring *rx_ring)
553{
554 i40evf_clean_rx_ring(rx_ring);
555 kfree(rx_ring->rx_bi);
556 rx_ring->rx_bi = NULL;
557
558 if (rx_ring->desc) {
559 dma_free_coherent(rx_ring->dev, rx_ring->size,
560 rx_ring->desc, rx_ring->dma);
561 rx_ring->desc = NULL;
562 }
563}
564
565/**
566 * i40evf_setup_rx_descriptors - Allocate Rx descriptors
567 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
568 *
569 * Returns 0 on success, negative on failure
570 **/
571int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
572{
573 struct device *dev = rx_ring->dev;
574 int bi_size;
575
Mitch Williams67c818a2015-06-19 08:56:30 -0700576 /* warn if we are about to overwrite the pointer */
577 WARN_ON(rx_ring->rx_bi);
Greg Rose7f12ad72013-12-21 06:12:51 +0000578 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
579 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
580 if (!rx_ring->rx_bi)
581 goto err;
582
Carolyn Wybornyf217d6c2015-02-09 17:42:31 -0800583 u64_stats_init(&rx_ring->syncp);
Carolyn Wyborny638702b2015-01-24 09:58:32 +0000584
Greg Rose7f12ad72013-12-21 06:12:51 +0000585 /* Round up to nearest 4K */
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700586 rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
Greg Rose7f12ad72013-12-21 06:12:51 +0000587 rx_ring->size = ALIGN(rx_ring->size, 4096);
588 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
589 &rx_ring->dma, GFP_KERNEL);
590
591 if (!rx_ring->desc) {
592 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
593 rx_ring->size);
594 goto err;
595 }
596
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700597 rx_ring->next_to_alloc = 0;
Greg Rose7f12ad72013-12-21 06:12:51 +0000598 rx_ring->next_to_clean = 0;
599 rx_ring->next_to_use = 0;
600
601 return 0;
602err:
603 kfree(rx_ring->rx_bi);
604 rx_ring->rx_bi = NULL;
605 return -ENOMEM;
606}
607
608/**
609 * i40e_release_rx_desc - Store the new tail and head values
610 * @rx_ring: ring to bump
611 * @val: new head index
612 **/
613static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
614{
615 rx_ring->next_to_use = val;
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700616
617 /* update next to alloc since we have filled the ring */
618 rx_ring->next_to_alloc = val;
619
Greg Rose7f12ad72013-12-21 06:12:51 +0000620 /* Force memory writes to complete before letting h/w
621 * know there are new descriptors to fetch. (Only
622 * applicable for weak-ordered memory model archs,
623 * such as IA-64).
624 */
625 wmb();
626 writel(val, rx_ring->tail);
627}
628
629/**
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700630 * i40e_alloc_mapped_page - recycle or make a new page
631 * @rx_ring: ring to use
632 * @bi: rx_buffer struct to modify
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -0800633 *
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700634 * Returns true if the page was successfully allocated or
635 * reused.
Greg Rose7f12ad72013-12-21 06:12:51 +0000636 **/
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700637static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
638 struct i40e_rx_buffer *bi)
Mitch Williamsa132af22015-01-24 09:58:35 +0000639{
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700640 struct page *page = bi->page;
641 dma_addr_t dma;
Mitch Williamsa132af22015-01-24 09:58:35 +0000642
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700643 /* since we are recycling buffers we should seldom need to alloc */
644 if (likely(page)) {
645 rx_ring->rx_stats.page_reuse_count++;
646 return true;
Mitch Williamsa132af22015-01-24 09:58:35 +0000647 }
648
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700649 /* alloc new page for storage */
650 page = dev_alloc_page();
651 if (unlikely(!page)) {
652 rx_ring->rx_stats.alloc_page_failed++;
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -0800653 return false;
Greg Rose7f12ad72013-12-21 06:12:51 +0000654 }
655
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700656 /* map page for use */
Alexander Duyck59605bc2017-01-30 12:29:35 -0800657 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
658 PAGE_SIZE,
659 DMA_FROM_DEVICE,
660 I40E_RX_DMA_ATTR);
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -0800661
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700662 /* if mapping failed free memory back to system since
663 * there isn't much point in holding memory we can't use
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -0800664 */
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700665 if (dma_mapping_error(rx_ring->dev, dma)) {
666 __free_pages(page, 0);
667 rx_ring->rx_stats.alloc_page_failed++;
668 return false;
669 }
670
671 bi->dma = dma;
672 bi->page = page;
673 bi->page_offset = 0;
Alexander Duyck17936682017-02-21 15:55:39 -0800674 bi->pagecnt_bias = 1;
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700675
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -0800676 return true;
Greg Rose7f12ad72013-12-21 06:12:51 +0000677}
678
679/**
680 * i40e_receive_skb - Send a completed packet up the stack
681 * @rx_ring: rx ring in play
682 * @skb: packet to send up
683 * @vlan_tag: vlan tag for packet
684 **/
685static void i40e_receive_skb(struct i40e_ring *rx_ring,
686 struct sk_buff *skb, u16 vlan_tag)
687{
688 struct i40e_q_vector *q_vector = rx_ring->q_vector;
Greg Rose7f12ad72013-12-21 06:12:51 +0000689
Jesse Brandeburga149f2c2016-04-12 08:30:49 -0700690 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
691 (vlan_tag & VLAN_VID_MASK))
Greg Rose7f12ad72013-12-21 06:12:51 +0000692 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
693
Alexander Duyck8b650352015-09-24 09:04:32 -0700694 napi_gro_receive(&q_vector->napi, skb);
Greg Rose7f12ad72013-12-21 06:12:51 +0000695}
696
697/**
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700698 * i40evf_alloc_rx_buffers - Replace used receive buffers
699 * @rx_ring: ring to place buffers on
700 * @cleaned_count: number of buffers to replace
701 *
702 * Returns false if all allocations were successful, true if any fail
703 **/
704bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
705{
706 u16 ntu = rx_ring->next_to_use;
707 union i40e_rx_desc *rx_desc;
708 struct i40e_rx_buffer *bi;
709
710 /* do nothing if no valid netdev defined */
711 if (!rx_ring->netdev || !cleaned_count)
712 return false;
713
714 rx_desc = I40E_RX_DESC(rx_ring, ntu);
715 bi = &rx_ring->rx_bi[ntu];
716
717 do {
718 if (!i40e_alloc_mapped_page(rx_ring, bi))
719 goto no_buffers;
720
Alexander Duyck59605bc2017-01-30 12:29:35 -0800721 /* sync the buffer for use by the device */
722 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
723 bi->page_offset,
724 I40E_RXBUFFER_2048,
725 DMA_FROM_DEVICE);
726
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700727 /* Refresh the desc even if buffer_addrs didn't change
728 * because each write-back erases this info.
729 */
730 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700731
732 rx_desc++;
733 bi++;
734 ntu++;
735 if (unlikely(ntu == rx_ring->count)) {
736 rx_desc = I40E_RX_DESC(rx_ring, 0);
737 bi = rx_ring->rx_bi;
738 ntu = 0;
739 }
740
741 /* clear the status bits for the next_to_use descriptor */
742 rx_desc->wb.qword1.status_error_len = 0;
743
744 cleaned_count--;
745 } while (cleaned_count);
746
747 if (rx_ring->next_to_use != ntu)
748 i40e_release_rx_desc(rx_ring, ntu);
749
750 return false;
751
752no_buffers:
753 if (rx_ring->next_to_use != ntu)
754 i40e_release_rx_desc(rx_ring, ntu);
755
756 /* make sure to come back via polling to try again after
757 * allocation failure
758 */
759 return true;
760}
761
762/**
Greg Rose7f12ad72013-12-21 06:12:51 +0000763 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
764 * @vsi: the VSI we care about
765 * @skb: skb currently being received and modified
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700766 * @rx_desc: the receive descriptor
767 *
768 * skb->protocol must be set before this function is called
Greg Rose7f12ad72013-12-21 06:12:51 +0000769 **/
770static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
771 struct sk_buff *skb,
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700772 union i40e_rx_desc *rx_desc)
Greg Rose7f12ad72013-12-21 06:12:51 +0000773{
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700774 struct i40e_rx_ptype_decoded decoded;
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700775 u32 rx_error, rx_status;
Alexander Duyck858296c82016-06-14 15:45:42 -0700776 bool ipv4, ipv6;
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700777 u8 ptype;
778 u64 qword;
779
780 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
781 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
782 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
783 I40E_RXD_QW1_ERROR_SHIFT;
784 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
785 I40E_RXD_QW1_STATUS_SHIFT;
786 decoded = decode_rx_desc_ptype(ptype);
Greg Rose7f12ad72013-12-21 06:12:51 +0000787
Greg Rose7f12ad72013-12-21 06:12:51 +0000788 skb->ip_summed = CHECKSUM_NONE;
789
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700790 skb_checksum_none_assert(skb);
791
Greg Rose7f12ad72013-12-21 06:12:51 +0000792 /* Rx csum enabled and ip headers found? */
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +0000793 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
Greg Rose7f12ad72013-12-21 06:12:51 +0000794 return;
795
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +0000796 /* did the hardware decode the packet and checksum? */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400797 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +0000798 return;
799
800 /* both known and outer_ip must be set for the below code to work */
801 if (!(decoded.known && decoded.outer_ip))
802 return;
803
Alexander Duyckfad57332016-01-24 21:17:22 -0800804 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
805 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
806 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
807 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +0000808
809 if (ipv4 &&
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400810 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
811 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +0000812 goto checksum_fail;
813
Jesse Brandeburgddf1d0d2014-02-13 03:48:39 -0800814 /* likely incorrect csum if alternate IP extension headers found */
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +0000815 if (ipv6 &&
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400816 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +0000817 /* don't increment checksum err here, non-fatal err */
Greg Rose7f12ad72013-12-21 06:12:51 +0000818 return;
819
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +0000820 /* there was some L4 error, count error and punt packet to the stack */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400821 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +0000822 goto checksum_fail;
Greg Rose7f12ad72013-12-21 06:12:51 +0000823
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +0000824 /* handle packets that were not able to be checksummed due
825 * to arrival speed, in this case the stack can compute
826 * the csum.
827 */
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400828 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +0000829 return;
830
Alexander Duyck858296c82016-06-14 15:45:42 -0700831 /* If there is an outer header present that might contain a checksum
832 * we need to bump the checksum level by 1 to reflect the fact that
833 * we are indicating we validated the inner checksum.
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +0000834 */
Alexander Duyck858296c82016-06-14 15:45:42 -0700835 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
836 skb->csum_level = 1;
Alexander Duyckfad57332016-01-24 21:17:22 -0800837
Alexander Duyck858296c82016-06-14 15:45:42 -0700838 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
839 switch (decoded.inner_prot) {
840 case I40E_RX_PTYPE_INNER_PROT_TCP:
841 case I40E_RX_PTYPE_INNER_PROT_UDP:
842 case I40E_RX_PTYPE_INNER_PROT_SCTP:
843 skb->ip_summed = CHECKSUM_UNNECESSARY;
844 /* fall though */
845 default:
846 break;
847 }
Jesse Brandeburg8a3c91c2014-05-20 08:01:43 +0000848
849 return;
850
851checksum_fail:
852 vsi->back->hw_csum_rx_error++;
Greg Rose7f12ad72013-12-21 06:12:51 +0000853}
854
855/**
Anjali Singhai Jain857942f2015-12-09 15:50:21 -0800856 * i40e_ptype_to_htype - get a hash type
Jesse Brandeburg206812b2014-02-12 01:45:33 +0000857 * @ptype: the ptype value from the descriptor
858 *
859 * Returns a hash type to be used by skb_set_hash
860 **/
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700861static inline int i40e_ptype_to_htype(u8 ptype)
Jesse Brandeburg206812b2014-02-12 01:45:33 +0000862{
863 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
864
865 if (!decoded.known)
866 return PKT_HASH_TYPE_NONE;
867
868 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
869 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
870 return PKT_HASH_TYPE_L4;
871 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
872 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
873 return PKT_HASH_TYPE_L3;
874 else
875 return PKT_HASH_TYPE_L2;
876}
877
878/**
Anjali Singhai Jain857942f2015-12-09 15:50:21 -0800879 * i40e_rx_hash - set the hash value in the skb
880 * @ring: descriptor ring
881 * @rx_desc: specific descriptor
882 **/
883static inline void i40e_rx_hash(struct i40e_ring *ring,
884 union i40e_rx_desc *rx_desc,
885 struct sk_buff *skb,
886 u8 rx_ptype)
887{
888 u32 hash;
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700889 const __le64 rss_mask =
Anjali Singhai Jain857942f2015-12-09 15:50:21 -0800890 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
891 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
892
893 if (ring->netdev->features & NETIF_F_RXHASH)
894 return;
895
896 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
897 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
898 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
899 }
900}
901
902/**
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700903 * i40evf_process_skb_fields - Populate skb header fields from Rx descriptor
904 * @rx_ring: rx descriptor ring packet is being transacted on
905 * @rx_desc: pointer to the EOP Rx descriptor
906 * @skb: pointer to current skb being populated
907 * @rx_ptype: the packet type decoded by hardware
Greg Rose7f12ad72013-12-21 06:12:51 +0000908 *
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700909 * This function checks the ring, descriptor, and packet information in
910 * order to populate the hash, checksum, VLAN, protocol, and
911 * other fields within the skb.
Greg Rose7f12ad72013-12-21 06:12:51 +0000912 **/
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700913static inline
914void i40evf_process_skb_fields(struct i40e_ring *rx_ring,
915 union i40e_rx_desc *rx_desc, struct sk_buff *skb,
916 u8 rx_ptype)
Greg Rose7f12ad72013-12-21 06:12:51 +0000917{
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700918 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
Greg Rose7f12ad72013-12-21 06:12:51 +0000919
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700920 /* modifies the skb - consumes the enet header */
921 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
Mitch Williamsa132af22015-01-24 09:58:35 +0000922
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700923 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
Mitch Williamsa132af22015-01-24 09:58:35 +0000924
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700925 skb_record_rx_queue(skb, rx_ring->queue_index);
Mitch Williamsa132af22015-01-24 09:58:35 +0000926}
927
928/**
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700929 * i40e_cleanup_headers - Correct empty headers
930 * @rx_ring: rx descriptor ring packet is being transacted on
931 * @skb: pointer to current skb being fixed
932 *
933 * Also address the case where we are pulling data in on pages only
934 * and as such no data is present in the skb header.
935 *
936 * In addition if skb is not at least 60 bytes we need to pad it so that
937 * it is large enough to qualify as a valid Ethernet frame.
938 *
939 * Returns true if an error was encountered and skb was freed.
Mitch Williamsa132af22015-01-24 09:58:35 +0000940 **/
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700941static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
942{
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700943 /* if eth_skb_pad returns an error the skb was freed */
944 if (eth_skb_pad(skb))
945 return true;
946
947 return false;
948}
949
950/**
951 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
952 * @rx_ring: rx descriptor ring to store buffers on
953 * @old_buff: donor buffer to have page reused
954 *
955 * Synchronizes page for reuse by the adapter
956 **/
957static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
958 struct i40e_rx_buffer *old_buff)
959{
960 struct i40e_rx_buffer *new_buff;
961 u16 nta = rx_ring->next_to_alloc;
962
963 new_buff = &rx_ring->rx_bi[nta];
964
965 /* update, and store next to alloc */
966 nta++;
967 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
968
969 /* transfer page from old buffer to new buffer */
Alexander Duyck17936682017-02-21 15:55:39 -0800970 new_buff->dma = old_buff->dma;
971 new_buff->page = old_buff->page;
972 new_buff->page_offset = old_buff->page_offset;
973 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700974}
975
976/**
Scott Peterson9b37c932017-02-09 23:43:30 -0800977 * i40e_page_is_reusable - check if any reuse is possible
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700978 * @page: page struct to check
Scott Peterson9b37c932017-02-09 23:43:30 -0800979 *
980 * A page is not reusable if it was allocated under low memory
981 * conditions, or it's not in the same NUMA node as this CPU.
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700982 */
Scott Peterson9b37c932017-02-09 23:43:30 -0800983static inline bool i40e_page_is_reusable(struct page *page)
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700984{
Scott Peterson9b37c932017-02-09 23:43:30 -0800985 return (page_to_nid(page) == numa_mem_id()) &&
986 !page_is_pfmemalloc(page);
987}
988
989/**
990 * i40e_can_reuse_rx_page - Determine if this page can be reused by
991 * the adapter for another receive
992 *
993 * @rx_buffer: buffer containing the page
994 * @page: page address from rx_buffer
995 * @truesize: actual size of the buffer in this page
996 *
997 * If page is reusable, rx_buffer->page_offset is adjusted to point to
998 * an unused region in the page.
999 *
1000 * For small pages, @truesize will be a constant value, half the size
1001 * of the memory at page. We'll attempt to alternate between high and
1002 * low halves of the page, with one half ready for use by the hardware
1003 * and the other half being consumed by the stack. We use the page
1004 * ref count to determine whether the stack has finished consuming the
1005 * portion of this page that was passed up with a previous packet. If
1006 * the page ref count is >1, we'll assume the "other" half page is
1007 * still busy, and this page cannot be reused.
1008 *
1009 * For larger pages, @truesize will be the actual space used by the
1010 * received packet (adjusted upward to an even multiple of the cache
1011 * line size). This will advance through the page by the amount
1012 * actually consumed by the received packets while there is still
1013 * space for a buffer. Each region of larger pages will be used at
1014 * most once, after which the page will not be reused.
1015 *
1016 * In either case, if the page is reusable its refcount is increased.
1017 **/
1018static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
1019 struct page *page,
1020 const unsigned int truesize)
1021{
1022#if (PAGE_SIZE >= 8192)
1023 unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
1024#endif
Alexander Duyck17936682017-02-21 15:55:39 -08001025 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
Scott Peterson9b37c932017-02-09 23:43:30 -08001026
1027 /* Is any reuse possible? */
1028 if (unlikely(!i40e_page_is_reusable(page)))
1029 return false;
1030
1031#if (PAGE_SIZE < 8192)
1032 /* if we are only owner of page we can reuse it */
Alexander Duyck17936682017-02-21 15:55:39 -08001033 if (unlikely(page_count(page) != pagecnt_bias))
Scott Peterson9b37c932017-02-09 23:43:30 -08001034 return false;
1035
1036 /* flip page offset to other buffer */
1037 rx_buffer->page_offset ^= truesize;
1038#else
1039 /* move offset up to the next cache line */
1040 rx_buffer->page_offset += truesize;
1041
1042 if (rx_buffer->page_offset > last_offset)
1043 return false;
1044#endif
1045
Alexander Duyck17936682017-02-21 15:55:39 -08001046 /* If we have drained the page fragment pool we need to update
1047 * the pagecnt_bias and page count so that we fully restock the
1048 * number of references the driver holds.
1049 */
1050 if (unlikely(pagecnt_bias == 1)) {
1051 page_ref_add(page, USHRT_MAX);
1052 rx_buffer->pagecnt_bias = USHRT_MAX;
1053 }
Scott Peterson9b37c932017-02-09 23:43:30 -08001054
1055 return true;
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001056}
1057
1058/**
1059 * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
1060 * @rx_ring: rx descriptor ring to transact packets on
1061 * @rx_buffer: buffer containing page to add
Scott Peterson7987dcd2017-02-09 23:37:28 -08001062 * @size: packet length from rx_desc
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001063 * @skb: sk_buff to place the data into
1064 *
1065 * This function will add the data contained in rx_buffer->page to the skb.
1066 * This is done either through a direct copy if the data in the buffer is
1067 * less than the skb header size, otherwise it will just attach the page as
1068 * a frag to the skb.
1069 *
1070 * The function will then update the page offset if necessary and return
1071 * true if the buffer can be reused by the adapter.
1072 **/
1073static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
1074 struct i40e_rx_buffer *rx_buffer,
Scott Peterson7987dcd2017-02-09 23:37:28 -08001075 unsigned int size,
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001076 struct sk_buff *skb)
1077{
1078 struct page *page = rx_buffer->page;
Scott Peterson9b37c932017-02-09 23:43:30 -08001079 unsigned char *va = page_address(page) + rx_buffer->page_offset;
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001080#if (PAGE_SIZE < 8192)
1081 unsigned int truesize = I40E_RXBUFFER_2048;
1082#else
1083 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001084#endif
Scott Peterson9b37c932017-02-09 23:43:30 -08001085 unsigned int pull_len;
1086
1087 if (unlikely(skb_is_nonlinear(skb)))
1088 goto add_tail_frag;
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001089
1090 /* will the data fit in the skb we allocated? if so, just
1091 * copy it as it is pretty small anyway
1092 */
Scott Peterson9b37c932017-02-09 23:43:30 -08001093 if (size <= I40E_RX_HDR_SIZE) {
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001094 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
1095
Scott Peterson9b37c932017-02-09 23:43:30 -08001096 /* page is reusable, we can reuse buffer as-is */
1097 if (likely(i40e_page_is_reusable(page)))
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001098 return true;
1099
1100 /* this page cannot be reused so discard it */
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001101 return false;
1102 }
1103
Scott Peterson9b37c932017-02-09 23:43:30 -08001104 /* we need the header to contain the greater of either
1105 * ETH_HLEN or 60 bytes if the skb->len is less than
1106 * 60 for skb_pad.
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001107 */
Scott Peterson9b37c932017-02-09 23:43:30 -08001108 pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001109
Scott Peterson9b37c932017-02-09 23:43:30 -08001110 /* align pull length to size of long to optimize
1111 * memcpy performance
1112 */
1113 memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
1114
1115 /* update all of the pointers */
1116 va += pull_len;
1117 size -= pull_len;
1118
1119add_tail_frag:
1120 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1121 (unsigned long)va & ~PAGE_MASK, size, truesize);
1122
1123 return i40e_can_reuse_rx_page(rx_buffer, page, truesize);
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001124}
1125
1126/**
1127 * i40evf_fetch_rx_buffer - Allocate skb and populate it
1128 * @rx_ring: rx descriptor ring to transact packets on
1129 * @rx_desc: descriptor containing info written by hardware
1130 *
1131 * This function allocates an skb on the fly, and populates it with the page
1132 * data from the current receive descriptor, taking care to set up the skb
1133 * correctly, as well as handling calling the page recycle function if
1134 * necessary.
1135 */
1136static inline
1137struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring,
Scott Petersone72e5652017-02-09 23:40:25 -08001138 union i40e_rx_desc *rx_desc,
1139 struct sk_buff *skb)
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001140{
Scott Peterson7987dcd2017-02-09 23:37:28 -08001141 u64 local_status_error_len =
1142 le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1143 unsigned int size =
1144 (local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1145 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001146 struct i40e_rx_buffer *rx_buffer;
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001147 struct page *page;
1148
1149 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
1150 page = rx_buffer->page;
1151 prefetchw(page);
1152
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001153 if (likely(!skb)) {
1154 void *page_addr = page_address(page) + rx_buffer->page_offset;
1155
1156 /* prefetch first cache line of first page */
1157 prefetch(page_addr);
1158#if L1_CACHE_BYTES < 128
1159 prefetch(page_addr + L1_CACHE_BYTES);
1160#endif
1161
1162 /* allocate a skb to store the frags */
1163 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
1164 I40E_RX_HDR_SIZE,
1165 GFP_ATOMIC | __GFP_NOWARN);
1166 if (unlikely(!skb)) {
1167 rx_ring->rx_stats.alloc_buff_failed++;
1168 return NULL;
1169 }
1170
1171 /* we will be copying header into skb->data in
1172 * pskb_may_pull so it is in our interest to prefetch
1173 * it now to avoid a possible cache miss
1174 */
1175 prefetchw(skb->data);
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001176 }
1177
1178 /* we are reusing so sync this buffer for CPU use */
1179 dma_sync_single_range_for_cpu(rx_ring->dev,
1180 rx_buffer->dma,
1181 rx_buffer->page_offset,
Scott Peterson7987dcd2017-02-09 23:37:28 -08001182 size,
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001183 DMA_FROM_DEVICE);
1184
1185 /* pull page into skb */
Scott Peterson7987dcd2017-02-09 23:37:28 -08001186 if (i40e_add_rx_frag(rx_ring, rx_buffer, size, skb)) {
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001187 /* hand second half of page back to the ring */
1188 i40e_reuse_rx_page(rx_ring, rx_buffer);
1189 rx_ring->rx_stats.page_reuse_count++;
1190 } else {
1191 /* we are not reusing the buffer so unmap it */
Alexander Duyck59605bc2017-01-30 12:29:35 -08001192 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
1193 DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
Alexander Duyck17936682017-02-21 15:55:39 -08001194 __page_frag_cache_drain(rx_buffer->page,
1195 rx_buffer->pagecnt_bias);
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001196 }
1197
1198 /* clear contents of buffer_info */
1199 rx_buffer->page = NULL;
1200
1201 return skb;
1202}
1203
1204/**
1205 * i40e_is_non_eop - process handling of non-EOP buffers
1206 * @rx_ring: Rx ring being processed
1207 * @rx_desc: Rx descriptor for current buffer
1208 * @skb: Current socket buffer containing buffer in progress
1209 *
1210 * This function updates next to clean. If the buffer is an EOP buffer
1211 * this function exits returning false, otherwise it will place the
1212 * sk_buff in the next buffer to be chained and return true indicating
1213 * that this is in fact a non-EOP buffer.
1214 **/
1215static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
1216 union i40e_rx_desc *rx_desc,
1217 struct sk_buff *skb)
1218{
1219 u32 ntc = rx_ring->next_to_clean + 1;
1220
1221 /* fetch, update, and store next to clean */
1222 ntc = (ntc < rx_ring->count) ? ntc : 0;
1223 rx_ring->next_to_clean = ntc;
1224
1225 prefetch(I40E_RX_DESC(rx_ring, ntc));
1226
1227 /* if we are the last buffer then there is nothing else to do */
1228#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
1229 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
1230 return false;
1231
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001232 rx_ring->rx_stats.non_eop_descs++;
1233
1234 return true;
1235}
1236
1237/**
1238 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1239 * @rx_ring: rx descriptor ring to transact packets on
1240 * @budget: Total limit on number of packets to process
1241 *
1242 * This function provides a "bounce buffer" approach to Rx interrupt
1243 * processing. The advantage to this is that on systems that have
1244 * expensive overhead for IOMMU access this provides a means of avoiding
1245 * it by maintaining the mapping of the page to the system.
1246 *
1247 * Returns amount of work completed
1248 **/
1249static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
Mitch Williamsa132af22015-01-24 09:58:35 +00001250{
1251 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
Scott Petersone72e5652017-02-09 23:40:25 -08001252 struct sk_buff *skb = rx_ring->skb;
Mitch Williamsa132af22015-01-24 09:58:35 +00001253 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001254 bool failure = false;
Mitch Williamsa132af22015-01-24 09:58:35 +00001255
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001256 while (likely(total_rx_packets < budget)) {
1257 union i40e_rx_desc *rx_desc;
Mitch Williamsa132af22015-01-24 09:58:35 +00001258 u16 vlan_tag;
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001259 u8 rx_ptype;
1260 u64 qword;
1261
Mitch Williamsa132af22015-01-24 09:58:35 +00001262 /* return some buffers to hardware, one at a time is too slow */
1263 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001264 failure = failure ||
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001265 i40evf_alloc_rx_buffers(rx_ring, cleaned_count);
Mitch Williamsa132af22015-01-24 09:58:35 +00001266 cleaned_count = 0;
1267 }
1268
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001269 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
1270
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001271 /* status_error_len will always be zero for unused descriptors
1272 * because it's cleared in cleanup, and overlaps with hdr_addr
1273 * which is always zero because packet split isn't used, if the
1274 * hardware wrote DD then it will be non-zero
1275 */
Alexander Duyck99dad8b2016-09-27 11:28:50 -07001276 if (!i40e_test_staterr(rx_desc,
1277 BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001278 break;
1279
Mitch Williamsa132af22015-01-24 09:58:35 +00001280 /* This memory barrier is needed to keep us from reading
1281 * any other fields out of the rx_desc until we know the
1282 * DD bit is set.
1283 */
Alexander Duyck67317162015-04-08 18:49:43 -07001284 dma_rmb();
Mitch Williamsa132af22015-01-24 09:58:35 +00001285
Scott Petersone72e5652017-02-09 23:40:25 -08001286 skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc, skb);
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001287 if (!skb)
1288 break;
Mitch Williamsa132af22015-01-24 09:58:35 +00001289
Mitch Williamsa132af22015-01-24 09:58:35 +00001290 cleaned_count++;
1291
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001292 if (i40e_is_non_eop(rx_ring, rx_desc, skb))
Mitch Williamsa132af22015-01-24 09:58:35 +00001293 continue;
Mitch Williamsa132af22015-01-24 09:58:35 +00001294
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001295 /* ERR_MASK will only have valid bits if EOP set, and
1296 * what we are doing here is actually checking
1297 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1298 * the error field
1299 */
1300 if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
Mitch Williamsa132af22015-01-24 09:58:35 +00001301 dev_kfree_skb_any(skb);
Mitch Williamsa132af22015-01-24 09:58:35 +00001302 continue;
Greg Rose7f12ad72013-12-21 06:12:51 +00001303 }
1304
Scott Petersone72e5652017-02-09 23:40:25 -08001305 if (i40e_cleanup_headers(rx_ring, skb)) {
1306 skb = NULL;
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001307 continue;
Scott Petersone72e5652017-02-09 23:40:25 -08001308 }
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001309
Greg Rose7f12ad72013-12-21 06:12:51 +00001310 /* probably a little skewed due to removing CRC */
1311 total_rx_bytes += skb->len;
Greg Rose7f12ad72013-12-21 06:12:51 +00001312
Alexander Duyck99dad8b2016-09-27 11:28:50 -07001313 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1314 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1315 I40E_RXD_QW1_PTYPE_SHIFT;
1316
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001317 /* populate checksum, VLAN, and protocol */
1318 i40evf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
Greg Rose7f12ad72013-12-21 06:12:51 +00001319
Greg Rose7f12ad72013-12-21 06:12:51 +00001320
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001321 vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
1322 le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
1323
Greg Rose7f12ad72013-12-21 06:12:51 +00001324 i40e_receive_skb(rx_ring, skb, vlan_tag);
Scott Petersone72e5652017-02-09 23:40:25 -08001325 skb = NULL;
Greg Rose7f12ad72013-12-21 06:12:51 +00001326
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001327 /* update budget accounting */
1328 total_rx_packets++;
1329 }
Greg Rose7f12ad72013-12-21 06:12:51 +00001330
Scott Petersone72e5652017-02-09 23:40:25 -08001331 rx_ring->skb = skb;
1332
Greg Rose7f12ad72013-12-21 06:12:51 +00001333 u64_stats_update_begin(&rx_ring->syncp);
1334 rx_ring->stats.packets += total_rx_packets;
1335 rx_ring->stats.bytes += total_rx_bytes;
1336 u64_stats_update_end(&rx_ring->syncp);
1337 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1338 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1339
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001340 /* guarantee a trip back through this routine if there was a failure */
Jesse Brandeburgc2e245a2016-01-13 16:51:46 -08001341 return failure ? budget : total_rx_packets;
Greg Rose7f12ad72013-12-21 06:12:51 +00001342}
1343
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001344static u32 i40e_buildreg_itr(const int type, const u16 itr)
1345{
1346 u32 val;
1347
1348 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
Jesse Brandeburg40d72a52016-01-13 16:51:45 -08001349 /* Don't clear PBA because that can cause lost interrupts that
1350 * came in while we were cleaning/polling
1351 */
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001352 (type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
1353 (itr << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT);
1354
1355 return val;
1356}
1357
1358/* a small macro to shorten up some long lines */
1359#define INTREG I40E_VFINT_DYN_CTLN1
Carolyn Wyborny3c234c42016-12-12 15:44:12 -08001360static inline int get_rx_itr(struct i40e_vsi *vsi, int idx)
Jacob Keller65e87c02016-09-12 14:18:44 -07001361{
1362 struct i40evf_adapter *adapter = vsi->back;
1363
Carolyn Wyborny3c234c42016-12-12 15:44:12 -08001364 return adapter->rx_rings[idx].rx_itr_setting;
Jacob Keller65e87c02016-09-12 14:18:44 -07001365}
1366
Carolyn Wyborny3c234c42016-12-12 15:44:12 -08001367static inline int get_tx_itr(struct i40e_vsi *vsi, int idx)
Jacob Keller65e87c02016-09-12 14:18:44 -07001368{
1369 struct i40evf_adapter *adapter = vsi->back;
1370
Carolyn Wyborny3c234c42016-12-12 15:44:12 -08001371 return adapter->tx_rings[idx].tx_itr_setting;
Jacob Keller65e87c02016-09-12 14:18:44 -07001372}
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001373
Greg Rose7f12ad72013-12-21 06:12:51 +00001374/**
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001375 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
1376 * @vsi: the VSI we care about
1377 * @q_vector: q_vector for which itr is being updated and interrupt enabled
1378 *
1379 **/
1380static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
1381 struct i40e_q_vector *q_vector)
1382{
1383 struct i40e_hw *hw = &vsi->back->hw;
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001384 bool rx = false, tx = false;
1385 u32 rxval, txval;
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001386 int vector;
Jacob Keller65e87c02016-09-12 14:18:44 -07001387 int idx = q_vector->v_idx;
1388 int rx_itr_setting, tx_itr_setting;
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001389
1390 vector = (q_vector->v_idx + vsi->base_vector);
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001391
1392 /* avoid dynamic calculation if in countdown mode OR if
1393 * all dynamic is disabled
1394 */
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001395 rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
1396
Carolyn Wyborny3c234c42016-12-12 15:44:12 -08001397 rx_itr_setting = get_rx_itr(vsi, idx);
1398 tx_itr_setting = get_tx_itr(vsi, idx);
Jacob Keller65e87c02016-09-12 14:18:44 -07001399
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001400 if (q_vector->itr_countdown > 0 ||
Jacob Keller65e87c02016-09-12 14:18:44 -07001401 (!ITR_IS_DYNAMIC(rx_itr_setting) &&
1402 !ITR_IS_DYNAMIC(tx_itr_setting))) {
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001403 goto enable_int;
1404 }
1405
Jacob Keller65e87c02016-09-12 14:18:44 -07001406 if (ITR_IS_DYNAMIC(rx_itr_setting)) {
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001407 rx = i40e_set_new_dynamic_itr(&q_vector->rx);
1408 rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001409 }
Jesse Brandeburg4eeb1ff2015-11-18 17:35:42 -08001410
Jacob Keller65e87c02016-09-12 14:18:44 -07001411 if (ITR_IS_DYNAMIC(tx_itr_setting)) {
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001412 tx = i40e_set_new_dynamic_itr(&q_vector->tx);
1413 txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001414 }
Jesse Brandeburg4eeb1ff2015-11-18 17:35:42 -08001415
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001416 if (rx || tx) {
1417 /* get the higher of the two ITR adjustments and
1418 * use the same value for both ITR registers
1419 * when in adaptive mode (Rx and/or Tx)
1420 */
1421 u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
1422
1423 q_vector->tx.itr = q_vector->rx.itr = itr;
1424 txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
1425 tx = true;
1426 rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
1427 rx = true;
1428 }
1429
1430 /* only need to enable the interrupt once, but need
1431 * to possibly update both ITR values
1432 */
1433 if (rx) {
1434 /* set the INTENA_MSK_MASK so that this first write
1435 * won't actually enable the interrupt, instead just
1436 * updating the ITR (it's bit 31 PF and VF)
1437 */
1438 rxval |= BIT(31);
1439 /* don't check _DOWN because interrupt isn't being enabled */
1440 wr32(hw, INTREG(vector - 1), rxval);
1441 }
1442
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001443enable_int:
Jesse Brandeburg8f5e39c2015-09-28 14:16:51 -04001444 if (!test_bit(__I40E_DOWN, &vsi->state))
1445 wr32(hw, INTREG(vector - 1), txval);
Jesse Brandeburgee2319c2015-09-28 14:16:54 -04001446
1447 if (q_vector->itr_countdown)
1448 q_vector->itr_countdown--;
1449 else
1450 q_vector->itr_countdown = ITR_COUNTDOWN_START;
Carolyn Wybornyde32e3e2015-06-10 13:42:07 -04001451}
1452
1453/**
Greg Rose7f12ad72013-12-21 06:12:51 +00001454 * i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine
1455 * @napi: napi struct with our devices info in it
1456 * @budget: amount of work driver is allowed to do this pass, in packets
1457 *
1458 * This function will clean all queues associated with a q_vector.
1459 *
1460 * Returns the amount of work done
1461 **/
1462int i40evf_napi_poll(struct napi_struct *napi, int budget)
1463{
1464 struct i40e_q_vector *q_vector =
1465 container_of(napi, struct i40e_q_vector, napi);
1466 struct i40e_vsi *vsi = q_vector->vsi;
1467 struct i40e_ring *ring;
1468 bool clean_complete = true;
Anjali Singhai Jainc29af372015-01-10 01:07:19 +00001469 bool arm_wb = false;
Greg Rose7f12ad72013-12-21 06:12:51 +00001470 int budget_per_ring;
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07001471 int work_done = 0;
Greg Rose7f12ad72013-12-21 06:12:51 +00001472
1473 if (test_bit(__I40E_DOWN, &vsi->state)) {
1474 napi_complete(napi);
1475 return 0;
1476 }
1477
1478 /* Since the actual Tx work is minimal, we can give the Tx a larger
1479 * budget and be more aggressive about cleaning up the Tx descriptors.
1480 */
Anjali Singhai Jainc29af372015-01-10 01:07:19 +00001481 i40e_for_each_ring(ring, q_vector->tx) {
Alexander Duycka619afe2016-03-07 09:30:03 -08001482 if (!i40e_clean_tx_irq(vsi, ring, budget)) {
Alexander Duyckf2edaaa2016-03-07 09:29:57 -08001483 clean_complete = false;
1484 continue;
1485 }
1486 arm_wb |= ring->arm_wb;
Jesse Brandeburg0deda862015-07-23 16:54:34 -04001487 ring->arm_wb = false;
Anjali Singhai Jainc29af372015-01-10 01:07:19 +00001488 }
Greg Rose7f12ad72013-12-21 06:12:51 +00001489
Alexander Duyckc67cace2015-09-24 09:04:26 -07001490 /* Handle case where we are called by netpoll with a budget of 0 */
1491 if (budget <= 0)
1492 goto tx_only;
1493
Greg Rose7f12ad72013-12-21 06:12:51 +00001494 /* We attempt to distribute budget to each Rx queue fairly, but don't
1495 * allow the budget to go below 1 because that would exit polling early.
1496 */
1497 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
1498
Mitch Williamsa132af22015-01-24 09:58:35 +00001499 i40e_for_each_ring(ring, q_vector->rx) {
Jesse Brandeburgab9ad982016-04-18 11:33:46 -07001500 int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07001501
1502 work_done += cleaned;
Alexander Duyckf2edaaa2016-03-07 09:29:57 -08001503 /* if we clean as many as budgeted, we must not be done */
1504 if (cleaned >= budget_per_ring)
1505 clean_complete = false;
Mitch Williamsa132af22015-01-24 09:58:35 +00001506 }
Greg Rose7f12ad72013-12-21 06:12:51 +00001507
1508 /* If work not completed, return budget and polling will return */
Anjali Singhai Jainc29af372015-01-10 01:07:19 +00001509 if (!clean_complete) {
Alan Brady96db7762016-09-14 16:24:38 -07001510 const cpumask_t *aff_mask = &q_vector->affinity_mask;
1511 int cpu_id = smp_processor_id();
1512
1513 /* It is possible that the interrupt affinity has changed but,
1514 * if the cpu is pegged at 100%, polling will never exit while
1515 * traffic continues and the interrupt will be stuck on this
1516 * cpu. We check to make sure affinity is correct before we
1517 * continue to poll, otherwise we must stop polling so the
1518 * interrupt can move to the correct cpu.
1519 */
1520 if (likely(cpumask_test_cpu(cpu_id, aff_mask))) {
Alexander Duyckc67cace2015-09-24 09:04:26 -07001521tx_only:
Alan Brady96db7762016-09-14 16:24:38 -07001522 if (arm_wb) {
1523 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
1524 i40e_enable_wb_on_itr(vsi, q_vector);
1525 }
1526 return budget;
Anjali Singhai Jain164c9f52015-10-21 19:47:08 -04001527 }
Anjali Singhai Jainc29af372015-01-10 01:07:19 +00001528 }
Greg Rose7f12ad72013-12-21 06:12:51 +00001529
Anjali Singhai Jain8e0764b2015-06-05 12:20:30 -04001530 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
1531 q_vector->arm_wb_state = false;
1532
Greg Rose7f12ad72013-12-21 06:12:51 +00001533 /* Work is done so exit the polling mode and re-enable the interrupt */
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07001534 napi_complete_done(napi, work_done);
Alan Brady96db7762016-09-14 16:24:38 -07001535
1536 /* If we're prematurely stopping polling to fix the interrupt
1537 * affinity we want to make sure polling starts back up so we
1538 * issue a call to i40evf_force_wb which triggers a SW interrupt.
1539 */
1540 if (!clean_complete)
1541 i40evf_force_wb(vsi, q_vector);
1542 else
1543 i40e_update_enable_itr(vsi, q_vector);
1544
Alexander Duyck6beb84a2016-11-08 13:05:16 -08001545 return min(work_done, budget - 1);
Greg Rose7f12ad72013-12-21 06:12:51 +00001546}
1547
1548/**
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04001549 * i40evf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
Greg Rose7f12ad72013-12-21 06:12:51 +00001550 * @skb: send buffer
1551 * @tx_ring: ring to send buffer on
1552 * @flags: the tx flags to be set
1553 *
1554 * Checks the skb and set up correspondingly several generic transmit flags
1555 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1556 *
1557 * Returns error code indicate the frame should be dropped upon error and the
1558 * otherwise returns 0 to indicate the flags has been set properly.
1559 **/
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04001560static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb,
1561 struct i40e_ring *tx_ring,
1562 u32 *flags)
Greg Rose7f12ad72013-12-21 06:12:51 +00001563{
1564 __be16 protocol = skb->protocol;
1565 u32 tx_flags = 0;
1566
Greg Rose31eaacc2015-03-31 00:45:03 -07001567 if (protocol == htons(ETH_P_8021Q) &&
1568 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1569 /* When HW VLAN acceleration is turned off by the user the
1570 * stack sets the protocol to 8021q so that the driver
1571 * can take any steps required to support the SW only
1572 * VLAN handling. In our case the driver doesn't need
1573 * to take any further steps so just set the protocol
1574 * to the encapsulated ethertype.
1575 */
1576 skb->protocol = vlan_get_protocol(skb);
1577 goto out;
1578 }
1579
Greg Rose7f12ad72013-12-21 06:12:51 +00001580 /* if we have a HW VLAN tag being added, default to the HW one */
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001581 if (skb_vlan_tag_present(skb)) {
1582 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
Greg Rose7f12ad72013-12-21 06:12:51 +00001583 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
1584 /* else if it is a SW VLAN, check the next protocol and store the tag */
1585 } else if (protocol == htons(ETH_P_8021Q)) {
1586 struct vlan_hdr *vhdr, _vhdr;
Jesse Brandeburg6995b362015-08-28 17:55:54 -04001587
Greg Rose7f12ad72013-12-21 06:12:51 +00001588 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
1589 if (!vhdr)
1590 return -EINVAL;
1591
1592 protocol = vhdr->h_vlan_encapsulated_proto;
1593 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
1594 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
1595 }
1596
Greg Rose31eaacc2015-03-31 00:45:03 -07001597out:
Greg Rose7f12ad72013-12-21 06:12:51 +00001598 *flags = tx_flags;
1599 return 0;
1600}
1601
1602/**
1603 * i40e_tso - set up the tso context descriptor
Alexander Duyck52ea3e82016-11-28 16:05:59 -08001604 * @first: pointer to first Tx buffer for xmit
Greg Rose7f12ad72013-12-21 06:12:51 +00001605 * @hdr_len: ptr to the size of the packet header
Shannon Nelson9c883bd2015-10-21 19:47:02 -04001606 * @cd_type_cmd_tso_mss: Quad Word 1
Greg Rose7f12ad72013-12-21 06:12:51 +00001607 *
1608 * Returns 0 if no TSO can happen, 1 if tso is going, or error
1609 **/
Alexander Duyck52ea3e82016-11-28 16:05:59 -08001610static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
1611 u64 *cd_type_cmd_tso_mss)
Greg Rose7f12ad72013-12-21 06:12:51 +00001612{
Alexander Duyck52ea3e82016-11-28 16:05:59 -08001613 struct sk_buff *skb = first->skb;
Alexander Duyck03f9d6a2016-01-24 21:16:20 -08001614 u64 cd_cmd, cd_tso_len, cd_mss;
Alexander Duyckc7770192016-01-24 21:16:35 -08001615 union {
1616 struct iphdr *v4;
1617 struct ipv6hdr *v6;
1618 unsigned char *hdr;
1619 } ip;
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08001620 union {
1621 struct tcphdr *tcp;
Alexander Duyck54532052016-01-24 21:17:29 -08001622 struct udphdr *udp;
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08001623 unsigned char *hdr;
1624 } l4;
1625 u32 paylen, l4_offset;
Alexander Duyck52ea3e82016-11-28 16:05:59 -08001626 u16 gso_segs, gso_size;
Greg Rose7f12ad72013-12-21 06:12:51 +00001627 int err;
Greg Rose7f12ad72013-12-21 06:12:51 +00001628
Shannon Nelsone9f65632016-01-04 10:33:04 -08001629 if (skb->ip_summed != CHECKSUM_PARTIAL)
1630 return 0;
1631
Greg Rose7f12ad72013-12-21 06:12:51 +00001632 if (!skb_is_gso(skb))
1633 return 0;
1634
Francois Romieufe6d4aa2014-03-30 03:14:53 +00001635 err = skb_cow_head(skb, 0);
1636 if (err < 0)
1637 return err;
Greg Rose7f12ad72013-12-21 06:12:51 +00001638
Alexander Duyckc7770192016-01-24 21:16:35 -08001639 ip.hdr = skb_network_header(skb);
1640 l4.hdr = skb_transport_header(skb);
Anjali Singhai85e76d02015-02-21 06:44:16 +00001641
Alexander Duyckc7770192016-01-24 21:16:35 -08001642 /* initialize outer IP header fields */
1643 if (ip.v4->version == 4) {
1644 ip.v4->tot_len = 0;
1645 ip.v4->check = 0;
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08001646 } else {
Alexander Duyckc7770192016-01-24 21:16:35 -08001647 ip.v6->payload_len = 0;
1648 }
1649
Alexander Duyck577389a2016-04-02 00:06:56 -07001650 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04001651 SKB_GSO_GRE_CSUM |
Tom Herbert7e133182016-05-18 09:06:10 -07001652 SKB_GSO_IPXIP4 |
Alexander Duyckbf2d1df2016-05-18 10:44:53 -07001653 SKB_GSO_IPXIP6 |
Alexander Duyck577389a2016-04-02 00:06:56 -07001654 SKB_GSO_UDP_TUNNEL |
Alexander Duyck54532052016-01-24 21:17:29 -08001655 SKB_GSO_UDP_TUNNEL_CSUM)) {
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04001656 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
1657 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
1658 l4.udp->len = 0;
1659
Alexander Duyck54532052016-01-24 21:17:29 -08001660 /* determine offset of outer transport header */
1661 l4_offset = l4.hdr - skb->data;
1662
1663 /* remove payload length from outer checksum */
Alexander Duyck24d41e52016-03-18 16:06:47 -07001664 paylen = skb->len - l4_offset;
Jacob Kellerb9c015d2016-12-12 15:44:17 -08001665 csum_replace_by_diff(&l4.udp->check,
1666 (__force __wsum)htonl(paylen));
Alexander Duyck54532052016-01-24 21:17:29 -08001667 }
1668
Alexander Duyckc7770192016-01-24 21:16:35 -08001669 /* reset pointers to inner headers */
1670 ip.hdr = skb_inner_network_header(skb);
1671 l4.hdr = skb_inner_transport_header(skb);
1672
1673 /* initialize inner IP header fields */
1674 if (ip.v4->version == 4) {
1675 ip.v4->tot_len = 0;
1676 ip.v4->check = 0;
1677 } else {
1678 ip.v6->payload_len = 0;
1679 }
Greg Rose7f12ad72013-12-21 06:12:51 +00001680 }
1681
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08001682 /* determine offset of inner transport header */
1683 l4_offset = l4.hdr - skb->data;
1684
1685 /* remove payload length from inner checksum */
Alexander Duyck24d41e52016-03-18 16:06:47 -07001686 paylen = skb->len - l4_offset;
Jacob Kellerb9c015d2016-12-12 15:44:17 -08001687 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
Alexander Duyckc49a7bc2016-01-24 21:16:28 -08001688
1689 /* compute length of segmentation header */
1690 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
Greg Rose7f12ad72013-12-21 06:12:51 +00001691
Alexander Duyck52ea3e82016-11-28 16:05:59 -08001692 /* pull values out of skb_shinfo */
1693 gso_size = skb_shinfo(skb)->gso_size;
1694 gso_segs = skb_shinfo(skb)->gso_segs;
1695
1696 /* update GSO size and bytecount with header size */
1697 first->gso_segs = gso_segs;
1698 first->bytecount += (first->gso_segs - 1) * *hdr_len;
1699
Greg Rose7f12ad72013-12-21 06:12:51 +00001700 /* find the field values */
1701 cd_cmd = I40E_TX_CTX_DESC_TSO;
1702 cd_tso_len = skb->len - *hdr_len;
Alexander Duyck52ea3e82016-11-28 16:05:59 -08001703 cd_mss = gso_size;
Alexander Duyck03f9d6a2016-01-24 21:16:20 -08001704 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
1705 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
1706 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
Greg Rose7f12ad72013-12-21 06:12:51 +00001707 return 1;
1708}
1709
1710/**
1711 * i40e_tx_enable_csum - Enable Tx checksum offloads
1712 * @skb: send buffer
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04001713 * @tx_flags: pointer to Tx flags currently set
Greg Rose7f12ad72013-12-21 06:12:51 +00001714 * @td_cmd: Tx descriptor command bits to set
1715 * @td_offset: Tx descriptor header offsets to set
Alexander Duyck529f1f62016-01-24 21:17:10 -08001716 * @tx_ring: Tx descriptor ring
Greg Rose7f12ad72013-12-21 06:12:51 +00001717 * @cd_tunneling: ptr to context desc bits
1718 **/
Alexander Duyck529f1f62016-01-24 21:17:10 -08001719static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
1720 u32 *td_cmd, u32 *td_offset,
1721 struct i40e_ring *tx_ring,
1722 u32 *cd_tunneling)
Greg Rose7f12ad72013-12-21 06:12:51 +00001723{
Alexander Duyckb96b78f2016-01-24 21:16:42 -08001724 union {
1725 struct iphdr *v4;
1726 struct ipv6hdr *v6;
1727 unsigned char *hdr;
1728 } ip;
1729 union {
1730 struct tcphdr *tcp;
1731 struct udphdr *udp;
1732 unsigned char *hdr;
1733 } l4;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08001734 unsigned char *exthdr;
Jesse Brandeburgd1bd7432016-04-01 03:56:04 -07001735 u32 offset, cmd = 0;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08001736 __be16 frag_off;
Alexander Duyckb96b78f2016-01-24 21:16:42 -08001737 u8 l4_proto = 0;
1738
Alexander Duyck529f1f62016-01-24 21:17:10 -08001739 if (skb->ip_summed != CHECKSUM_PARTIAL)
1740 return 0;
1741
Alexander Duyckb96b78f2016-01-24 21:16:42 -08001742 ip.hdr = skb_network_header(skb);
1743 l4.hdr = skb_transport_header(skb);
Greg Rose7f12ad72013-12-21 06:12:51 +00001744
Alexander Duyck475b4202016-01-24 21:17:01 -08001745 /* compute outer L2 header size */
1746 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
1747
Greg Rose7f12ad72013-12-21 06:12:51 +00001748 if (skb->encapsulation) {
Jesse Brandeburgd1bd7432016-04-01 03:56:04 -07001749 u32 tunnel = 0;
Alexander Duycka0064722016-01-24 21:16:48 -08001750 /* define outer network header type */
1751 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
Alexander Duyck475b4202016-01-24 21:17:01 -08001752 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
1753 I40E_TX_CTX_EXT_IP_IPV4 :
1754 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1755
Alexander Duycka0064722016-01-24 21:16:48 -08001756 l4_proto = ip.v4->protocol;
1757 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
Alexander Duyck475b4202016-01-24 21:17:01 -08001758 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08001759
1760 exthdr = ip.hdr + sizeof(*ip.v6);
Alexander Duycka0064722016-01-24 21:16:48 -08001761 l4_proto = ip.v6->nexthdr;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08001762 if (l4.hdr != exthdr)
1763 ipv6_skip_exthdr(skb, exthdr - skb->data,
1764 &l4_proto, &frag_off);
Alexander Duycka0064722016-01-24 21:16:48 -08001765 }
1766
1767 /* define outer transport */
1768 switch (l4_proto) {
Anjali Singhai Jain45991202015-02-27 09:15:29 +00001769 case IPPROTO_UDP:
Alexander Duyck475b4202016-01-24 21:17:01 -08001770 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04001771 *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00001772 break;
Alexander Duycka0064722016-01-24 21:16:48 -08001773 case IPPROTO_GRE:
Alexander Duyck475b4202016-01-24 21:17:01 -08001774 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
Alexander Duycka0064722016-01-24 21:16:48 -08001775 *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
1776 break;
Alexander Duyck577389a2016-04-02 00:06:56 -07001777 case IPPROTO_IPIP:
1778 case IPPROTO_IPV6:
1779 *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
1780 l4.hdr = skb_inner_network_header(skb);
1781 break;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00001782 default:
Alexander Duyck529f1f62016-01-24 21:17:10 -08001783 if (*tx_flags & I40E_TX_FLAGS_TSO)
1784 return -1;
1785
1786 skb_checksum_help(skb);
1787 return 0;
Anjali Singhai Jain45991202015-02-27 09:15:29 +00001788 }
Alexander Duyckb96b78f2016-01-24 21:16:42 -08001789
Alexander Duyck577389a2016-04-02 00:06:56 -07001790 /* compute outer L3 header size */
1791 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
1792 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
1793
1794 /* switch IP header pointer from outer to inner header */
1795 ip.hdr = skb_inner_network_header(skb);
1796
Alexander Duyck475b4202016-01-24 21:17:01 -08001797 /* compute tunnel header size */
1798 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
1799 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
1800
Alexander Duyck54532052016-01-24 21:17:29 -08001801 /* indicate if we need to offload outer UDP header */
1802 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
Alexander Duyck1c7b4a22016-04-14 17:19:25 -04001803 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
Alexander Duyck54532052016-01-24 21:17:29 -08001804 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
1805 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
1806
Alexander Duyck475b4202016-01-24 21:17:01 -08001807 /* record tunnel offload values */
1808 *cd_tunneling |= tunnel;
1809
Alexander Duyckb96b78f2016-01-24 21:16:42 -08001810 /* switch L4 header pointer from outer to inner */
Alexander Duyckb96b78f2016-01-24 21:16:42 -08001811 l4.hdr = skb_inner_transport_header(skb);
Alexander Duycka0064722016-01-24 21:16:48 -08001812 l4_proto = 0;
Greg Rose7f12ad72013-12-21 06:12:51 +00001813
Alexander Duycka0064722016-01-24 21:16:48 -08001814 /* reset type as we transition from outer to inner headers */
1815 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
1816 if (ip.v4->version == 4)
1817 *tx_flags |= I40E_TX_FLAGS_IPV4;
1818 if (ip.v6->version == 6)
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04001819 *tx_flags |= I40E_TX_FLAGS_IPV6;
Greg Rose7f12ad72013-12-21 06:12:51 +00001820 }
1821
1822 /* Enable IP checksum offloads */
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04001823 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
Alexander Duyckb96b78f2016-01-24 21:16:42 -08001824 l4_proto = ip.v4->protocol;
Greg Rose7f12ad72013-12-21 06:12:51 +00001825 /* the stack computes the IP header already, the only time we
1826 * need the hardware to recompute it is in the case of TSO.
1827 */
Alexander Duyck475b4202016-01-24 21:17:01 -08001828 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
1829 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
1830 I40E_TX_DESC_CMD_IIPT_IPV4;
Anjali Singhai Jain89232c32015-04-16 20:06:00 -04001831 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
Alexander Duyck475b4202016-01-24 21:17:01 -08001832 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
Alexander Duycka3fd9d82016-01-24 21:16:54 -08001833
1834 exthdr = ip.hdr + sizeof(*ip.v6);
1835 l4_proto = ip.v6->nexthdr;
1836 if (l4.hdr != exthdr)
1837 ipv6_skip_exthdr(skb, exthdr - skb->data,
1838 &l4_proto, &frag_off);
Greg Rose7f12ad72013-12-21 06:12:51 +00001839 }
Alexander Duyckb96b78f2016-01-24 21:16:42 -08001840
Alexander Duyck475b4202016-01-24 21:17:01 -08001841 /* compute inner L3 header size */
1842 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
Greg Rose7f12ad72013-12-21 06:12:51 +00001843
1844 /* Enable L4 checksum offloads */
Alexander Duyckb96b78f2016-01-24 21:16:42 -08001845 switch (l4_proto) {
Greg Rose7f12ad72013-12-21 06:12:51 +00001846 case IPPROTO_TCP:
1847 /* enable checksum offloads */
Alexander Duyck475b4202016-01-24 21:17:01 -08001848 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
1849 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
Greg Rose7f12ad72013-12-21 06:12:51 +00001850 break;
1851 case IPPROTO_SCTP:
1852 /* enable SCTP checksum offload */
Alexander Duyck475b4202016-01-24 21:17:01 -08001853 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
1854 offset |= (sizeof(struct sctphdr) >> 2) <<
1855 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
Greg Rose7f12ad72013-12-21 06:12:51 +00001856 break;
1857 case IPPROTO_UDP:
1858 /* enable UDP checksum offload */
Alexander Duyck475b4202016-01-24 21:17:01 -08001859 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
1860 offset |= (sizeof(struct udphdr) >> 2) <<
1861 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
Greg Rose7f12ad72013-12-21 06:12:51 +00001862 break;
1863 default:
Alexander Duyck529f1f62016-01-24 21:17:10 -08001864 if (*tx_flags & I40E_TX_FLAGS_TSO)
1865 return -1;
1866 skb_checksum_help(skb);
1867 return 0;
Greg Rose7f12ad72013-12-21 06:12:51 +00001868 }
Alexander Duyck475b4202016-01-24 21:17:01 -08001869
1870 *td_cmd |= cmd;
1871 *td_offset |= offset;
Alexander Duyck529f1f62016-01-24 21:17:10 -08001872
1873 return 1;
Greg Rose7f12ad72013-12-21 06:12:51 +00001874}
1875
1876/**
1877 * i40e_create_tx_ctx Build the Tx context descriptor
1878 * @tx_ring: ring to create the descriptor on
1879 * @cd_type_cmd_tso_mss: Quad Word 1
1880 * @cd_tunneling: Quad Word 0 - bits 0-31
1881 * @cd_l2tag2: Quad Word 0 - bits 32-63
1882 **/
1883static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
1884 const u64 cd_type_cmd_tso_mss,
1885 const u32 cd_tunneling, const u32 cd_l2tag2)
1886{
1887 struct i40e_tx_context_desc *context_desc;
1888 int i = tx_ring->next_to_use;
1889
Jesse Brandeburgff40dd52014-02-14 02:14:41 +00001890 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
1891 !cd_tunneling && !cd_l2tag2)
Greg Rose7f12ad72013-12-21 06:12:51 +00001892 return;
1893
1894 /* grab the next descriptor */
1895 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
1896
1897 i++;
1898 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1899
1900 /* cpu_to_le32 and assign to struct fields */
1901 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
1902 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
Jesse Brandeburg3efbbb22014-06-04 20:41:54 +00001903 context_desc->rsvd = cpu_to_le16(0);
Greg Rose7f12ad72013-12-21 06:12:51 +00001904 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
1905}
1906
Jesse Brandeburg4eeb1ff2015-11-18 17:35:42 -08001907/**
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07001908 * __i40evf_chk_linearize - Check if there are more than 8 buffers per packet
Anjali Singhai71da6192015-02-21 06:42:35 +00001909 * @skb: send buffer
Anjali Singhai71da6192015-02-21 06:42:35 +00001910 *
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07001911 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
1912 * and so we need to figure out the cases where we need to linearize the skb.
1913 *
1914 * For TSO we need to count the TSO header and segment payload separately.
1915 * As such we need to check cases where we have 7 fragments or more as we
1916 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
1917 * the segment payload in the first descriptor, and another 7 for the
1918 * fragments.
Anjali Singhai71da6192015-02-21 06:42:35 +00001919 **/
Alexander Duyck2d374902016-02-17 11:02:50 -08001920bool __i40evf_chk_linearize(struct sk_buff *skb)
Anjali Singhai71da6192015-02-21 06:42:35 +00001921{
Alexander Duyck2d374902016-02-17 11:02:50 -08001922 const struct skb_frag_struct *frag, *stale;
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07001923 int nr_frags, sum;
Anjali Singhai71da6192015-02-21 06:42:35 +00001924
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07001925 /* no need to check if number of frags is less than 7 */
Alexander Duyck2d374902016-02-17 11:02:50 -08001926 nr_frags = skb_shinfo(skb)->nr_frags;
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07001927 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
Alexander Duyck2d374902016-02-17 11:02:50 -08001928 return false;
Anjali Singhai71da6192015-02-21 06:42:35 +00001929
Alexander Duyck2d374902016-02-17 11:02:50 -08001930 /* We need to walk through the list and validate that each group
Alexander Duyck841493a2016-09-06 18:05:04 -07001931 * of 6 fragments totals at least gso_size.
Alexander Duyck2d374902016-02-17 11:02:50 -08001932 */
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07001933 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
Alexander Duyck2d374902016-02-17 11:02:50 -08001934 frag = &skb_shinfo(skb)->frags[0];
1935
1936 /* Initialize size to the negative value of gso_size minus 1. We
1937 * use this as the worst case scenerio in which the frag ahead
1938 * of us only provides one byte which is why we are limited to 6
1939 * descriptors for a single transmit as the header and previous
1940 * fragment are already consuming 2 descriptors.
1941 */
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07001942 sum = 1 - skb_shinfo(skb)->gso_size;
Alexander Duyck2d374902016-02-17 11:02:50 -08001943
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07001944 /* Add size of frags 0 through 4 to create our initial sum */
1945 sum += skb_frag_size(frag++);
1946 sum += skb_frag_size(frag++);
1947 sum += skb_frag_size(frag++);
1948 sum += skb_frag_size(frag++);
1949 sum += skb_frag_size(frag++);
Alexander Duyck2d374902016-02-17 11:02:50 -08001950
1951 /* Walk through fragments adding latest fragment, testing it, and
1952 * then removing stale fragments from the sum.
1953 */
1954 stale = &skb_shinfo(skb)->frags[0];
1955 for (;;) {
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07001956 sum += skb_frag_size(frag++);
Alexander Duyck2d374902016-02-17 11:02:50 -08001957
1958 /* if sum is negative we failed to make sufficient progress */
1959 if (sum < 0)
1960 return true;
1961
Alexander Duyck841493a2016-09-06 18:05:04 -07001962 if (!nr_frags--)
Alexander Duyck2d374902016-02-17 11:02:50 -08001963 break;
1964
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -07001965 sum -= skb_frag_size(stale++);
Anjali Singhai71da6192015-02-21 06:42:35 +00001966 }
1967
Alexander Duyck2d374902016-02-17 11:02:50 -08001968 return false;
Anjali Singhai71da6192015-02-21 06:42:35 +00001969}
1970
Greg Rose7f12ad72013-12-21 06:12:51 +00001971/**
Jesse Brandeburg8f6a2b02015-04-16 20:06:09 -04001972 * __i40evf_maybe_stop_tx - 2nd level check for tx stop conditions
1973 * @tx_ring: the ring to be checked
1974 * @size: the size buffer we want to assure is available
1975 *
1976 * Returns -EBUSY if a stop is needed, else 0
1977 **/
Alexander Duyck4ec441d2016-02-17 11:02:43 -08001978int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
Jesse Brandeburg8f6a2b02015-04-16 20:06:09 -04001979{
1980 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
1981 /* Memory barrier before checking head and tail */
1982 smp_mb();
1983
1984 /* Check again in a case another CPU has just made room available. */
1985 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
1986 return -EBUSY;
1987
1988 /* A reprieve! - use start_queue because it doesn't call schedule */
1989 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
1990 ++tx_ring->tx_stats.restart_queue;
1991 return 0;
1992}
1993
1994/**
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04001995 * i40evf_tx_map - Build the Tx descriptor
Greg Rose7f12ad72013-12-21 06:12:51 +00001996 * @tx_ring: ring to send buffer on
1997 * @skb: send buffer
1998 * @first: first buffer info buffer to use
1999 * @tx_flags: collected send information
2000 * @hdr_len: size of the packet header
2001 * @td_cmd: the command field in the descriptor
2002 * @td_offset: offset for checksum or crc
2003 **/
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002004static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2005 struct i40e_tx_buffer *first, u32 tx_flags,
2006 const u8 hdr_len, u32 td_cmd, u32 td_offset)
Greg Rose7f12ad72013-12-21 06:12:51 +00002007{
2008 unsigned int data_len = skb->data_len;
2009 unsigned int size = skb_headlen(skb);
2010 struct skb_frag_struct *frag;
2011 struct i40e_tx_buffer *tx_bi;
2012 struct i40e_tx_desc *tx_desc;
2013 u16 i = tx_ring->next_to_use;
2014 u32 td_tag = 0;
2015 dma_addr_t dma;
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002016 u16 desc_count = 1;
Greg Rose7f12ad72013-12-21 06:12:51 +00002017
2018 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
2019 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
2020 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
2021 I40E_TX_FLAGS_VLAN_SHIFT;
2022 }
2023
Greg Rose7f12ad72013-12-21 06:12:51 +00002024 first->tx_flags = tx_flags;
2025
2026 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2027
2028 tx_desc = I40E_TX_DESC(tx_ring, i);
2029 tx_bi = first;
2030
2031 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002032 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
2033
Greg Rose7f12ad72013-12-21 06:12:51 +00002034 if (dma_mapping_error(tx_ring->dev, dma))
2035 goto dma_error;
2036
2037 /* record length, and DMA address */
2038 dma_unmap_len_set(tx_bi, len, size);
2039 dma_unmap_addr_set(tx_bi, dma, dma);
2040
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002041 /* align size to end of page */
2042 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
Greg Rose7f12ad72013-12-21 06:12:51 +00002043 tx_desc->buffer_addr = cpu_to_le64(dma);
2044
2045 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
2046 tx_desc->cmd_type_offset_bsz =
2047 build_ctob(td_cmd, td_offset,
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002048 max_data, td_tag);
Greg Rose7f12ad72013-12-21 06:12:51 +00002049
2050 tx_desc++;
2051 i++;
Anjali Singhai Jain6a7fded2015-10-26 19:44:29 -04002052 desc_count++;
2053
Greg Rose7f12ad72013-12-21 06:12:51 +00002054 if (i == tx_ring->count) {
2055 tx_desc = I40E_TX_DESC(tx_ring, 0);
2056 i = 0;
2057 }
2058
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002059 dma += max_data;
2060 size -= max_data;
Greg Rose7f12ad72013-12-21 06:12:51 +00002061
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002062 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
Greg Rose7f12ad72013-12-21 06:12:51 +00002063 tx_desc->buffer_addr = cpu_to_le64(dma);
2064 }
2065
2066 if (likely(!data_len))
2067 break;
2068
2069 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2070 size, td_tag);
2071
2072 tx_desc++;
2073 i++;
Anjali Singhai Jain6a7fded2015-10-26 19:44:29 -04002074 desc_count++;
2075
Greg Rose7f12ad72013-12-21 06:12:51 +00002076 if (i == tx_ring->count) {
2077 tx_desc = I40E_TX_DESC(tx_ring, 0);
2078 i = 0;
2079 }
2080
2081 size = skb_frag_size(frag);
2082 data_len -= size;
2083
2084 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2085 DMA_TO_DEVICE);
2086
2087 tx_bi = &tx_ring->tx_bi[i];
2088 }
2089
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002090 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
Greg Rose7f12ad72013-12-21 06:12:51 +00002091
2092 i++;
2093 if (i == tx_ring->count)
2094 i = 0;
2095
2096 tx_ring->next_to_use = i;
2097
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002098 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
Anjali Singhai Jain6a7fded2015-10-26 19:44:29 -04002099
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002100 /* write last descriptor with EOP bit */
2101 td_cmd |= I40E_TX_DESC_CMD_EOP;
2102
2103 /* We can OR these values together as they both are checked against
2104 * 4 below and at this point desc_count will be used as a boolean value
2105 * after this if/else block.
2106 */
2107 desc_count |= ++tx_ring->packet_stride;
2108
Anjali Singhai Jain6a7fded2015-10-26 19:44:29 -04002109 /* Algorithm to optimize tail and RS bit setting:
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002110 * if queue is stopped
2111 * mark RS bit
2112 * reset packet counter
2113 * else if xmit_more is supported and is true
2114 * advance packet counter to 4
2115 * reset desc_count to 0
Anjali Singhai Jain6a7fded2015-10-26 19:44:29 -04002116 *
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002117 * if desc_count >= 4
2118 * mark RS bit
2119 * reset packet counter
2120 * if desc_count > 0
2121 * update tail
Anjali Singhai Jain6a7fded2015-10-26 19:44:29 -04002122 *
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002123 * Note: If there are less than 4 descriptors
Anjali Singhai Jain6a7fded2015-10-26 19:44:29 -04002124 * pending and interrupts were disabled the service task will
2125 * trigger a force WB.
2126 */
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002127 if (netif_xmit_stopped(txring_txq(tx_ring))) {
2128 goto do_rs;
2129 } else if (skb->xmit_more) {
2130 /* set stride to arm on next packet and reset desc_count */
2131 tx_ring->packet_stride = WB_STRIDE;
2132 desc_count = 0;
2133 } else if (desc_count >= WB_STRIDE) {
2134do_rs:
2135 /* write last descriptor with RS bit set */
2136 td_cmd |= I40E_TX_DESC_CMD_RS;
Anjali Singhai Jain6a7fded2015-10-26 19:44:29 -04002137 tx_ring->packet_stride = 0;
Anjali Singhai Jain6a7fded2015-10-26 19:44:29 -04002138 }
Anjali Singhai Jain6a7fded2015-10-26 19:44:29 -04002139
2140 tx_desc->cmd_type_offset_bsz =
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002141 build_ctob(td_cmd, td_offset, size, td_tag);
2142
2143 /* Force memory writes to complete before letting h/w know there
2144 * are new descriptors to fetch.
2145 *
2146 * We also use this memory barrier to make certain all of the
2147 * status bits have been updated before next_to_watch is written.
2148 */
2149 wmb();
2150
2151 /* set next_to_watch value indicating a packet is present */
2152 first->next_to_watch = tx_desc;
Anjali Singhai Jain6a7fded2015-10-26 19:44:29 -04002153
Greg Rose7f12ad72013-12-21 06:12:51 +00002154 /* notify HW of packet */
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002155 if (desc_count) {
Anjali Singhai Jain6a7fded2015-10-26 19:44:29 -04002156 writel(i, tx_ring->tail);
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002157
2158 /* we need this if more than one processor can write to our tail
2159 * at a time, it synchronizes IO on IA64/Altix systems
2160 */
2161 mmiowb();
Anjali Singhai Jain6a7fded2015-10-26 19:44:29 -04002162 }
Alexander Duyck1dc8b532016-10-11 15:26:54 -07002163
Greg Rose7f12ad72013-12-21 06:12:51 +00002164 return;
2165
2166dma_error:
2167 dev_info(tx_ring->dev, "TX DMA map failed\n");
2168
2169 /* clear dma mappings for failed tx_bi map */
2170 for (;;) {
2171 tx_bi = &tx_ring->tx_bi[i];
2172 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
2173 if (tx_bi == first)
2174 break;
2175 if (i == 0)
2176 i = tx_ring->count;
2177 i--;
2178 }
2179
2180 tx_ring->next_to_use = i;
2181}
2182
2183/**
Greg Rose7f12ad72013-12-21 06:12:51 +00002184 * i40e_xmit_frame_ring - Sends buffer on Tx ring
2185 * @skb: send buffer
2186 * @tx_ring: ring to send buffer on
2187 *
2188 * Returns NETDEV_TX_OK if sent, else an error code
2189 **/
2190static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2191 struct i40e_ring *tx_ring)
2192{
2193 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
2194 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2195 struct i40e_tx_buffer *first;
2196 u32 td_offset = 0;
2197 u32 tx_flags = 0;
2198 __be16 protocol;
2199 u32 td_cmd = 0;
2200 u8 hdr_len = 0;
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002201 int tso, count;
Jesse Brandeburg6995b362015-08-28 17:55:54 -04002202
Jesse Brandeburgb74118f2015-10-26 19:44:30 -04002203 /* prefetch the data, we'll need it later */
2204 prefetch(skb->data);
2205
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002206 count = i40e_xmit_descriptor_count(skb);
Alexander Duyck2d374902016-02-17 11:02:50 -08002207 if (i40e_chk_linearize(skb, count)) {
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002208 if (__skb_linearize(skb)) {
2209 dev_kfree_skb_any(skb);
2210 return NETDEV_TX_OK;
2211 }
Alexander Duyck5c4654d2016-02-19 12:17:08 -08002212 count = i40e_txd_use_count(skb->len);
Alexander Duyck2d374902016-02-17 11:02:50 -08002213 tx_ring->tx_stats.tx_linearize++;
2214 }
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002215
2216 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2217 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
2218 * + 4 desc gap to avoid the cache line where head is,
2219 * + 1 desc for context descriptor,
2220 * otherwise try next time
2221 */
2222 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2223 tx_ring->tx_stats.tx_busy++;
Greg Rose7f12ad72013-12-21 06:12:51 +00002224 return NETDEV_TX_BUSY;
Alexander Duyck4ec441d2016-02-17 11:02:43 -08002225 }
Greg Rose7f12ad72013-12-21 06:12:51 +00002226
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002227 /* record the location of the first descriptor for this packet */
2228 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2229 first->skb = skb;
2230 first->bytecount = skb->len;
2231 first->gso_segs = 1;
2232
Greg Rose7f12ad72013-12-21 06:12:51 +00002233 /* prepare the xmit flags */
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002234 if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
Greg Rose7f12ad72013-12-21 06:12:51 +00002235 goto out_drop;
2236
2237 /* obtain protocol of skb */
Vlad Yasevicha12c4152014-08-25 10:34:53 -04002238 protocol = vlan_get_protocol(skb);
Greg Rose7f12ad72013-12-21 06:12:51 +00002239
Greg Rose7f12ad72013-12-21 06:12:51 +00002240 /* setup IPv4/IPv6 offloads */
2241 if (protocol == htons(ETH_P_IP))
2242 tx_flags |= I40E_TX_FLAGS_IPV4;
2243 else if (protocol == htons(ETH_P_IPV6))
2244 tx_flags |= I40E_TX_FLAGS_IPV6;
2245
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002246 tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
Greg Rose7f12ad72013-12-21 06:12:51 +00002247
2248 if (tso < 0)
2249 goto out_drop;
2250 else if (tso)
2251 tx_flags |= I40E_TX_FLAGS_TSO;
2252
Greg Rose7f12ad72013-12-21 06:12:51 +00002253 /* Always offload the checksum, since it's in the data descriptor */
Alexander Duyck529f1f62016-01-24 21:17:10 -08002254 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2255 tx_ring, &cd_tunneling);
2256 if (tso < 0)
2257 goto out_drop;
Greg Rose7f12ad72013-12-21 06:12:51 +00002258
Alexander Duyck3bc67972016-02-17 11:02:56 -08002259 skb_tx_timestamp(skb);
2260
2261 /* always enable CRC insertion offload */
2262 td_cmd |= I40E_TX_DESC_CMD_ICRC;
2263
Greg Rose7f12ad72013-12-21 06:12:51 +00002264 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2265 cd_tunneling, cd_l2tag2);
2266
Jesse Brandeburg3e587cf2015-04-16 20:06:10 -04002267 i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2268 td_cmd, td_offset);
Greg Rose7f12ad72013-12-21 06:12:51 +00002269
Greg Rose7f12ad72013-12-21 06:12:51 +00002270 return NETDEV_TX_OK;
2271
2272out_drop:
Alexander Duyck52ea3e82016-11-28 16:05:59 -08002273 dev_kfree_skb_any(first->skb);
2274 first->skb = NULL;
Greg Rose7f12ad72013-12-21 06:12:51 +00002275 return NETDEV_TX_OK;
2276}
2277
2278/**
2279 * i40evf_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2280 * @skb: send buffer
2281 * @netdev: network interface device structure
2282 *
2283 * Returns NETDEV_TX_OK if sent, else an error code
2284 **/
2285netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2286{
2287 struct i40evf_adapter *adapter = netdev_priv(netdev);
Mitch Williams0dd438d2015-10-26 19:44:40 -04002288 struct i40e_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping];
Greg Rose7f12ad72013-12-21 06:12:51 +00002289
2290 /* hardware can't handle really short frames, hardware padding works
2291 * beyond this point
2292 */
2293 if (unlikely(skb->len < I40E_MIN_TX_LEN)) {
2294 if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len))
2295 return NETDEV_TX_OK;
2296 skb->len = I40E_MIN_TX_LEN;
2297 skb_set_tail_pointer(skb, I40E_MIN_TX_LEN);
2298 }
2299
2300 return i40e_xmit_frame_ring(skb, tx_ring);
2301}