blob: e382d4bdf4303a9f07dfa52fa31a5b9d491baa46 [file] [log] [blame]
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001/* QLogic qede NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
Daniel Borkmanna67edbf2017-01-25 02:28:18 +010035#include <linux/bpf_trace.h>
Mintz, Yuvalcdda9262017-01-01 13:57:01 +020036#include <net/udp_tunnel.h>
37#include <linux/ip.h>
38#include <net/ipv6.h>
39#include <net/tcp.h>
40#include <linux/if_ether.h>
41#include <linux/if_vlan.h>
42#include <net/ip6_checksum.h>
Sudarsana Reddy Kalluru4c552152017-02-15 10:24:11 +020043#include "qede_ptp.h"
Mintz, Yuvalcdda9262017-01-01 13:57:01 +020044
45#include <linux/qed/qed_if.h>
46#include "qede.h"
47/*********************************
48 * Content also used by slowpath *
49 *********************************/
50
Mintz, Yuvale3eef7e2017-01-01 13:57:04 +020051int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
Mintz, Yuvalcdda9262017-01-01 13:57:01 +020052{
53 struct sw_rx_data *sw_rx_data;
54 struct eth_rx_bd *rx_bd;
55 dma_addr_t mapping;
56 struct page *data;
57
Mintz, Yuvale3eef7e2017-01-01 13:57:04 +020058 /* In case lazy-allocation is allowed, postpone allocation until the
59 * end of the NAPI run. We'd still need to make sure the Rx ring has
60 * sufficient buffers to guarantee an additional Rx interrupt.
61 */
62 if (allow_lazy && likely(rxq->filled_buffers > 12)) {
63 rxq->filled_buffers--;
64 return 0;
65 }
66
Mintz, Yuvalcdda9262017-01-01 13:57:01 +020067 data = alloc_pages(GFP_ATOMIC, 0);
68 if (unlikely(!data))
69 return -ENOMEM;
70
71 /* Map the entire page as it would be used
72 * for multiple RX buffer segment size mapping.
73 */
74 mapping = dma_map_page(rxq->dev, data, 0,
75 PAGE_SIZE, rxq->data_direction);
76 if (unlikely(dma_mapping_error(rxq->dev, mapping))) {
77 __free_page(data);
78 return -ENOMEM;
79 }
80
81 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
82 sw_rx_data->page_offset = 0;
83 sw_rx_data->data = data;
84 sw_rx_data->mapping = mapping;
85
86 /* Advance PROD and get BD pointer */
87 rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
88 WARN_ON(!rx_bd);
89 rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
Mintz, Yuval15ed8a42017-04-07 11:05:00 +030090 rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping) +
91 rxq->rx_headroom);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +020092
93 rxq->sw_rx_prod++;
Mintz, Yuvale3eef7e2017-01-01 13:57:04 +020094 rxq->filled_buffers++;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +020095
96 return 0;
97}
98
99/* Unmap the data and free skb */
100int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len)
101{
102 u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
103 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
104 struct eth_tx_1st_bd *first_bd;
105 struct eth_tx_bd *tx_data_bd;
106 int bds_consumed = 0;
107 int nbds;
108 bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD;
109 int i, split_bd_len = 0;
110
111 if (unlikely(!skb)) {
112 DP_ERR(edev,
113 "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
114 idx, txq->sw_tx_cons, txq->sw_tx_prod);
115 return -1;
116 }
117
118 *len = skb->len;
119
120 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
121
122 bds_consumed++;
123
124 nbds = first_bd->data.nbds;
125
126 if (data_split) {
127 struct eth_tx_bd *split = (struct eth_tx_bd *)
128 qed_chain_consume(&txq->tx_pbl);
129 split_bd_len = BD_UNMAP_LEN(split);
130 bds_consumed++;
131 }
132 dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
133 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
134
135 /* Unmap the data of the skb frags */
136 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
137 tx_data_bd = (struct eth_tx_bd *)
138 qed_chain_consume(&txq->tx_pbl);
139 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
140 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
141 }
142
143 while (bds_consumed++ < nbds)
144 qed_chain_consume(&txq->tx_pbl);
145
146 /* Free skb */
147 dev_kfree_skb_any(skb);
148 txq->sw_tx_ring.skbs[idx].skb = NULL;
149 txq->sw_tx_ring.skbs[idx].flags = 0;
150
151 return 0;
152}
153
154/* Unmap the data and free skb when mapping failed during start_xmit */
155static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq,
156 struct eth_tx_1st_bd *first_bd,
157 int nbd, bool data_split)
158{
159 u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
160 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
161 struct eth_tx_bd *tx_data_bd;
162 int i, split_bd_len = 0;
163
164 /* Return prod to its position before this skb was handled */
165 qed_chain_set_prod(&txq->tx_pbl,
166 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
167
168 first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
169
170 if (data_split) {
171 struct eth_tx_bd *split = (struct eth_tx_bd *)
172 qed_chain_produce(&txq->tx_pbl);
173 split_bd_len = BD_UNMAP_LEN(split);
174 nbd--;
175 }
176
177 dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd),
178 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
179
180 /* Unmap the data of the skb frags */
181 for (i = 0; i < nbd; i++) {
182 tx_data_bd = (struct eth_tx_bd *)
183 qed_chain_produce(&txq->tx_pbl);
184 if (tx_data_bd->nbytes)
185 dma_unmap_page(txq->dev,
186 BD_UNMAP_ADDR(tx_data_bd),
187 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
188 }
189
190 /* Return again prod to its position before this skb was handled */
191 qed_chain_set_prod(&txq->tx_pbl,
192 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
193
194 /* Free skb */
195 dev_kfree_skb_any(skb);
196 txq->sw_tx_ring.skbs[idx].skb = NULL;
197 txq->sw_tx_ring.skbs[idx].flags = 0;
198}
199
200static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext)
201{
202 u32 rc = XMIT_L4_CSUM;
203 __be16 l3_proto;
204
205 if (skb->ip_summed != CHECKSUM_PARTIAL)
206 return XMIT_PLAIN;
207
208 l3_proto = vlan_get_protocol(skb);
209 if (l3_proto == htons(ETH_P_IPV6) &&
210 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
211 *ipv6_ext = 1;
212
213 if (skb->encapsulation) {
214 rc |= XMIT_ENC;
215 if (skb_is_gso(skb)) {
216 unsigned short gso_type = skb_shinfo(skb)->gso_type;
217
218 if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) ||
219 (gso_type & SKB_GSO_GRE_CSUM))
220 rc |= XMIT_ENC_GSO_L4_CSUM;
221
222 rc |= XMIT_LSO;
223 return rc;
224 }
225 }
226
227 if (skb_is_gso(skb))
228 rc |= XMIT_LSO;
229
230 return rc;
231}
232
233static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
234 struct eth_tx_2nd_bd *second_bd,
235 struct eth_tx_3rd_bd *third_bd)
236{
237 u8 l4_proto;
238 u16 bd2_bits1 = 0, bd2_bits2 = 0;
239
240 bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
241
242 bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
243 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
244 << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
245
246 bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
247 ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
248
249 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
250 l4_proto = ipv6_hdr(skb)->nexthdr;
251 else
252 l4_proto = ip_hdr(skb)->protocol;
253
254 if (l4_proto == IPPROTO_UDP)
255 bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
256
257 if (third_bd)
258 third_bd->data.bitfields |=
259 cpu_to_le16(((tcp_hdrlen(skb) / 4) &
260 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
261 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
262
263 second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
264 second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
265}
266
267static int map_frag_to_bd(struct qede_tx_queue *txq,
268 skb_frag_t *frag, struct eth_tx_bd *bd)
269{
270 dma_addr_t mapping;
271
272 /* Map skb non-linear frag data for DMA */
273 mapping = skb_frag_dma_map(txq->dev, frag, 0,
274 skb_frag_size(frag), DMA_TO_DEVICE);
275 if (unlikely(dma_mapping_error(txq->dev, mapping)))
276 return -ENOMEM;
277
278 /* Setup the data pointer of the frag data */
279 BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
280
281 return 0;
282}
283
284static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
285{
286 if (is_encap_pkt)
287 return (skb_inner_transport_header(skb) +
288 inner_tcp_hdrlen(skb) - skb->data);
289 else
290 return (skb_transport_header(skb) +
291 tcp_hdrlen(skb) - skb->data);
292}
293
294/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
295#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
296static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type)
297{
298 int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
299
300 if (xmit_type & XMIT_LSO) {
301 int hlen;
302
303 hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
304
305 /* linear payload would require its own BD */
306 if (skb_headlen(skb) > hlen)
307 allowed_frags--;
308 }
309
310 return (skb_shinfo(skb)->nr_frags > allowed_frags);
311}
312#endif
313
314static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
315{
316 /* wmb makes sure that the BDs data is updated before updating the
317 * producer, otherwise FW may read old data from the BDs.
318 */
319 wmb();
320 barrier();
321 writel(txq->tx_db.raw, txq->doorbell_addr);
322
323 /* mmiowb is needed to synchronize doorbell writes from more than one
324 * processor. It guarantees that the write arrives to the device before
325 * the queue lock is released and another start_xmit is called (possibly
326 * on another CPU). Without this barrier, the next doorbell can bypass
327 * this doorbell. This is applicable to IA64/Altix systems.
328 */
329 mmiowb();
330}
331
332static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
333 struct sw_rx_data *metadata, u16 padding, u16 length)
334{
335 struct qede_tx_queue *txq = fp->xdp_tx;
336 u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
337 struct eth_tx_1st_bd *first_bd;
338
339 if (!qed_chain_get_elem_left(&txq->tx_pbl)) {
340 txq->stopped_cnt++;
341 return -ENOMEM;
342 }
343
344 first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
345
346 memset(first_bd, 0, sizeof(*first_bd));
347 first_bd->data.bd_flags.bitfields =
348 BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
349 first_bd->data.bitfields |=
350 (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
351 ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
352 first_bd->data.nbds = 1;
353
354 /* We can safely ignore the offset, as it's 0 for XDP */
355 BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length);
356
357 /* Synchronize the buffer back to device, as program [probably]
358 * has changed it.
359 */
360 dma_sync_single_for_device(&edev->pdev->dev,
361 metadata->mapping + padding,
362 length, PCI_DMA_TODEVICE);
363
Mintz, Yuval89e1afc2017-04-07 11:04:58 +0300364 txq->sw_tx_ring.xdp[idx].page = metadata->data;
365 txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200366 txq->sw_tx_prod++;
367
368 /* Mark the fastpath for future XDP doorbell */
369 fp->xdp_xmit = 1;
370
371 return 0;
372}
373
374int qede_txq_has_work(struct qede_tx_queue *txq)
375{
376 u16 hw_bd_cons;
377
378 /* Tell compiler that consumer and producer can change */
379 barrier();
380 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
381 if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
382 return 0;
383
384 return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
385}
386
387static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
388{
Mintz, Yuval89e1afc2017-04-07 11:04:58 +0300389 u16 hw_bd_cons, idx;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200390
391 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
392 barrier();
393
394 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
Mintz, Yuval89e1afc2017-04-07 11:04:58 +0300395 qed_chain_consume(&txq->tx_pbl);
396 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200397
Mintz, Yuval89e1afc2017-04-07 11:04:58 +0300398 dma_unmap_page(&edev->pdev->dev,
399 txq->sw_tx_ring.xdp[idx].mapping,
400 PAGE_SIZE, DMA_BIDIRECTIONAL);
401 __free_page(txq->sw_tx_ring.xdp[idx].page);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200402
403 txq->sw_tx_cons++;
404 txq->xmit_pkts++;
405 }
406}
407
408static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
409{
410 struct netdev_queue *netdev_txq;
411 u16 hw_bd_cons;
412 unsigned int pkts_compl = 0, bytes_compl = 0;
413 int rc;
414
415 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
416
417 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
418 barrier();
419
420 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
421 int len = 0;
422
423 rc = qede_free_tx_pkt(edev, txq, &len);
424 if (rc) {
425 DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
426 hw_bd_cons,
427 qed_chain_get_cons_idx(&txq->tx_pbl));
428 break;
429 }
430
431 bytes_compl += len;
432 pkts_compl++;
433 txq->sw_tx_cons++;
434 txq->xmit_pkts++;
435 }
436
437 netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
438
439 /* Need to make the tx_bd_cons update visible to start_xmit()
440 * before checking for netif_tx_queue_stopped(). Without the
441 * memory barrier, there is a small possibility that
442 * start_xmit() will miss it and cause the queue to be stopped
443 * forever.
444 * On the other hand we need an rmb() here to ensure the proper
445 * ordering of bit testing in the following
446 * netif_tx_queue_stopped(txq) call.
447 */
448 smp_mb();
449
450 if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
451 /* Taking tx_lock is needed to prevent reenabling the queue
452 * while it's empty. This could have happen if rx_action() gets
453 * suspended in qede_tx_int() after the condition before
454 * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
455 *
456 * stops the queue->sees fresh tx_bd_cons->releases the queue->
457 * sends some packets consuming the whole queue again->
458 * stops the queue
459 */
460
461 __netif_tx_lock(netdev_txq, smp_processor_id());
462
463 if ((netif_tx_queue_stopped(netdev_txq)) &&
464 (edev->state == QEDE_STATE_OPEN) &&
465 (qed_chain_get_elem_left(&txq->tx_pbl)
466 >= (MAX_SKB_FRAGS + 1))) {
467 netif_tx_wake_queue(netdev_txq);
468 DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
469 "Wake queue was called\n");
470 }
471
472 __netif_tx_unlock(netdev_txq);
473 }
474
475 return 0;
476}
477
478bool qede_has_rx_work(struct qede_rx_queue *rxq)
479{
480 u16 hw_comp_cons, sw_comp_cons;
481
482 /* Tell compiler that status block fields can change */
483 barrier();
484
485 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
486 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
487
488 return hw_comp_cons != sw_comp_cons;
489}
490
491static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
492{
493 qed_chain_consume(&rxq->rx_bd_ring);
494 rxq->sw_rx_cons++;
495}
496
497/* This function reuses the buffer(from an offset) from
498 * consumer index to producer index in the bd ring
499 */
500static inline void qede_reuse_page(struct qede_rx_queue *rxq,
501 struct sw_rx_data *curr_cons)
502{
503 struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
504 struct sw_rx_data *curr_prod;
505 dma_addr_t new_mapping;
506
507 curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
508 *curr_prod = *curr_cons;
509
510 new_mapping = curr_prod->mapping + curr_prod->page_offset;
511
512 rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
Mintz, Yuval15ed8a42017-04-07 11:05:00 +0300513 rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping) +
514 rxq->rx_headroom);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200515
516 rxq->sw_rx_prod++;
517 curr_cons->data = NULL;
518}
519
520/* In case of allocation failures reuse buffers
521 * from consumer index to produce buffers for firmware
522 */
523void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count)
524{
525 struct sw_rx_data *curr_cons;
526
527 for (; count > 0; count--) {
528 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
529 qede_reuse_page(rxq, curr_cons);
530 qede_rx_bd_ring_consume(rxq);
531 }
532}
533
534static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq,
535 struct sw_rx_data *curr_cons)
536{
537 /* Move to the next segment in the page */
538 curr_cons->page_offset += rxq->rx_buf_seg_size;
539
540 if (curr_cons->page_offset == PAGE_SIZE) {
Mintz, Yuvale3eef7e2017-01-01 13:57:04 +0200541 if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200542 /* Since we failed to allocate new buffer
543 * current buffer can be used again.
544 */
545 curr_cons->page_offset -= rxq->rx_buf_seg_size;
546
547 return -ENOMEM;
548 }
549
550 dma_unmap_page(rxq->dev, curr_cons->mapping,
551 PAGE_SIZE, rxq->data_direction);
552 } else {
553 /* Increment refcount of the page as we don't want
554 * network stack to take the ownership of the page
555 * which can be recycled multiple times by the driver.
556 */
557 page_ref_inc(curr_cons->data);
558 qede_reuse_page(rxq, curr_cons);
559 }
560
561 return 0;
562}
563
564void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
565{
566 u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
567 u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
568 struct eth_rx_prod_data rx_prods = {0};
569
570 /* Update producers */
571 rx_prods.bd_prod = cpu_to_le16(bd_prod);
572 rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
573
574 /* Make sure that the BD and SGE data is updated before updating the
575 * producers since FW might read the BD/SGE right after the producer
576 * is updated.
577 */
578 wmb();
579
580 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
581 (u32 *)&rx_prods);
582
583 /* mmiowb is needed to synchronize doorbell writes from more than one
584 * processor. It guarantees that the write arrives to the device before
585 * the napi lock is released and another qede_poll is called (possibly
586 * on another CPU). Without this barrier, the next doorbell can bypass
587 * this doorbell. This is applicable to IA64/Altix systems.
588 */
589 mmiowb();
590}
591
592static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
593{
594 enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
595 enum rss_hash_type htype;
596 u32 hash = 0;
597
598 htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
599 if (htype) {
600 hash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
601 (htype == RSS_HASH_TYPE_IPV6)) ?
602 PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
603 hash = le32_to_cpu(rss_hash);
604 }
605 skb_set_hash(skb, hash, hash_type);
606}
607
608static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
609{
610 skb_checksum_none_assert(skb);
611
612 if (csum_flag & QEDE_CSUM_UNNECESSARY)
613 skb->ip_summed = CHECKSUM_UNNECESSARY;
614
Manish Chopra7ca547b2017-01-01 13:57:05 +0200615 if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY) {
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200616 skb->csum_level = 1;
Manish Chopra7ca547b2017-01-01 13:57:05 +0200617 skb->encapsulation = 1;
618 }
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200619}
620
621static inline void qede_skb_receive(struct qede_dev *edev,
622 struct qede_fastpath *fp,
623 struct qede_rx_queue *rxq,
624 struct sk_buff *skb, u16 vlan_tag)
625{
626 if (vlan_tag)
627 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
628
629 napi_gro_receive(&fp->napi, skb);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200630}
631
632static void qede_set_gro_params(struct qede_dev *edev,
633 struct sk_buff *skb,
634 struct eth_fast_path_rx_tpa_start_cqe *cqe)
635{
636 u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
637
638 if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) &
639 PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2)
640 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
641 else
642 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
643
644 skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
645 cqe->header_len;
646}
647
648static int qede_fill_frag_skb(struct qede_dev *edev,
649 struct qede_rx_queue *rxq,
650 u8 tpa_agg_index, u16 len_on_bd)
651{
652 struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
653 NUM_RX_BDS_MAX];
654 struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
655 struct sk_buff *skb = tpa_info->skb;
656
657 if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
658 goto out;
659
660 /* Add one frag and update the appropriate fields in the skb */
661 skb_fill_page_desc(skb, tpa_info->frag_id++,
662 current_bd->data, current_bd->page_offset,
663 len_on_bd);
664
665 if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) {
666 /* Incr page ref count to reuse on allocation failure
667 * so that it doesn't get freed while freeing SKB.
668 */
669 page_ref_inc(current_bd->data);
670 goto out;
671 }
672
673 qed_chain_consume(&rxq->rx_bd_ring);
674 rxq->sw_rx_cons++;
675
676 skb->data_len += len_on_bd;
677 skb->truesize += rxq->rx_buf_seg_size;
678 skb->len += len_on_bd;
679
680 return 0;
681
682out:
683 tpa_info->state = QEDE_AGG_STATE_ERROR;
684 qede_recycle_rx_bd_ring(rxq, 1);
685
686 return -ENOMEM;
687}
688
689static bool qede_tunn_exist(u16 flag)
690{
691 return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
692 PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
693}
694
695static u8 qede_check_tunn_csum(u16 flag)
696{
697 u16 csum_flag = 0;
698 u8 tcsum = 0;
699
700 if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
701 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
702 csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
703 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
704
705 if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
706 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
707 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
708 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
709 tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
710 }
711
712 csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
713 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
714 PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
715 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
716
717 if (csum_flag & flag)
718 return QEDE_CSUM_ERROR;
719
720 return QEDE_CSUM_UNNECESSARY | tcsum;
721}
722
723static void qede_tpa_start(struct qede_dev *edev,
724 struct qede_rx_queue *rxq,
725 struct eth_fast_path_rx_tpa_start_cqe *cqe)
726{
727 struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
728 struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
729 struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
730 struct sw_rx_data *replace_buf = &tpa_info->buffer;
731 dma_addr_t mapping = tpa_info->buffer_mapping;
732 struct sw_rx_data *sw_rx_data_cons;
733 struct sw_rx_data *sw_rx_data_prod;
734
735 sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
736 sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
737
738 /* Use pre-allocated replacement buffer - we can't release the agg.
739 * start until its over and we don't want to risk allocation failing
740 * here, so re-allocate when aggregation will be over.
741 */
742 sw_rx_data_prod->mapping = replace_buf->mapping;
743
744 sw_rx_data_prod->data = replace_buf->data;
745 rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping));
746 rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping));
747 sw_rx_data_prod->page_offset = replace_buf->page_offset;
748
749 rxq->sw_rx_prod++;
750
751 /* move partial skb from cons to pool (don't unmap yet)
752 * save mapping, incase we drop the packet later on.
753 */
754 tpa_info->buffer = *sw_rx_data_cons;
755 mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi),
756 le32_to_cpu(rx_bd_cons->addr.lo));
757
758 tpa_info->buffer_mapping = mapping;
759 rxq->sw_rx_cons++;
760
761 /* set tpa state to start only if we are able to allocate skb
762 * for this aggregation, otherwise mark as error and aggregation will
763 * be dropped
764 */
765 tpa_info->skb = netdev_alloc_skb(edev->ndev,
766 le16_to_cpu(cqe->len_on_first_bd));
767 if (unlikely(!tpa_info->skb)) {
768 DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
769 tpa_info->state = QEDE_AGG_STATE_ERROR;
770 goto cons_buf;
771 }
772
773 /* Start filling in the aggregation info */
774 skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
775 tpa_info->frag_id = 0;
776 tpa_info->state = QEDE_AGG_STATE_START;
777
778 /* Store some information from first CQE */
779 tpa_info->start_cqe_placement_offset = cqe->placement_offset;
780 tpa_info->start_cqe_bd_len = le16_to_cpu(cqe->len_on_first_bd);
781 if ((le16_to_cpu(cqe->pars_flags.flags) >>
782 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
783 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
784 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
785 else
786 tpa_info->vlan_tag = 0;
787
788 qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash);
789
790 /* This is needed in order to enable forwarding support */
791 qede_set_gro_params(edev, tpa_info->skb, cqe);
792
793cons_buf: /* We still need to handle bd_len_list to consume buffers */
794 if (likely(cqe->ext_bd_len_list[0]))
795 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
796 le16_to_cpu(cqe->ext_bd_len_list[0]));
797
798 if (unlikely(cqe->ext_bd_len_list[1])) {
799 DP_ERR(edev,
800 "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
801 tpa_info->state = QEDE_AGG_STATE_ERROR;
802 }
803}
804
805#ifdef CONFIG_INET
806static void qede_gro_ip_csum(struct sk_buff *skb)
807{
808 const struct iphdr *iph = ip_hdr(skb);
809 struct tcphdr *th;
810
811 skb_set_transport_header(skb, sizeof(struct iphdr));
812 th = tcp_hdr(skb);
813
814 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
815 iph->saddr, iph->daddr, 0);
816
817 tcp_gro_complete(skb);
818}
819
820static void qede_gro_ipv6_csum(struct sk_buff *skb)
821{
822 struct ipv6hdr *iph = ipv6_hdr(skb);
823 struct tcphdr *th;
824
825 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
826 th = tcp_hdr(skb);
827
828 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
829 &iph->saddr, &iph->daddr, 0);
830 tcp_gro_complete(skb);
831}
832#endif
833
834static void qede_gro_receive(struct qede_dev *edev,
835 struct qede_fastpath *fp,
836 struct sk_buff *skb,
837 u16 vlan_tag)
838{
839 /* FW can send a single MTU sized packet from gro flow
840 * due to aggregation timeout/last segment etc. which
841 * is not expected to be a gro packet. If a skb has zero
842 * frags then simply push it in the stack as non gso skb.
843 */
844 if (unlikely(!skb->data_len)) {
845 skb_shinfo(skb)->gso_type = 0;
846 skb_shinfo(skb)->gso_size = 0;
847 goto send_skb;
848 }
849
850#ifdef CONFIG_INET
851 if (skb_shinfo(skb)->gso_size) {
852 skb_reset_network_header(skb);
853
854 switch (skb->protocol) {
855 case htons(ETH_P_IP):
856 qede_gro_ip_csum(skb);
857 break;
858 case htons(ETH_P_IPV6):
859 qede_gro_ipv6_csum(skb);
860 break;
861 default:
862 DP_ERR(edev,
863 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
864 ntohs(skb->protocol));
865 }
866 }
867#endif
868
869send_skb:
870 skb_record_rx_queue(skb, fp->rxq->rxq_id);
871 qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag);
872}
873
874static inline void qede_tpa_cont(struct qede_dev *edev,
875 struct qede_rx_queue *rxq,
876 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
877{
878 int i;
879
880 for (i = 0; cqe->len_list[i]; i++)
881 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
882 le16_to_cpu(cqe->len_list[i]));
883
884 if (unlikely(i > 1))
885 DP_ERR(edev,
886 "Strange - TPA cont with more than a single len_list entry\n");
887}
888
Mintz, Yuval10a01762017-04-07 11:04:57 +0300889static int qede_tpa_end(struct qede_dev *edev,
890 struct qede_fastpath *fp,
891 struct eth_fast_path_rx_tpa_end_cqe *cqe)
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200892{
893 struct qede_rx_queue *rxq = fp->rxq;
894 struct qede_agg_info *tpa_info;
895 struct sk_buff *skb;
896 int i;
897
898 tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
899 skb = tpa_info->skb;
900
901 for (i = 0; cqe->len_list[i]; i++)
902 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
903 le16_to_cpu(cqe->len_list[i]));
904 if (unlikely(i > 1))
905 DP_ERR(edev,
906 "Strange - TPA emd with more than a single len_list entry\n");
907
908 if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
909 goto err;
910
911 /* Sanity */
912 if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
913 DP_ERR(edev,
914 "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
915 cqe->num_of_bds, tpa_info->frag_id);
916 if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
917 DP_ERR(edev,
918 "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
919 le16_to_cpu(cqe->total_packet_len), skb->len);
920
921 memcpy(skb->data,
922 page_address(tpa_info->buffer.data) +
923 tpa_info->start_cqe_placement_offset +
924 tpa_info->buffer.page_offset, tpa_info->start_cqe_bd_len);
925
926 /* Finalize the SKB */
927 skb->protocol = eth_type_trans(skb, edev->ndev);
928 skb->ip_summed = CHECKSUM_UNNECESSARY;
929
930 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
931 * to skb_shinfo(skb)->gso_segs
932 */
933 NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
934
935 qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
936
937 tpa_info->state = QEDE_AGG_STATE_NONE;
938
Mintz, Yuval10a01762017-04-07 11:04:57 +0300939 return 1;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200940err:
941 tpa_info->state = QEDE_AGG_STATE_NONE;
942 dev_kfree_skb_any(tpa_info->skb);
943 tpa_info->skb = NULL;
Mintz, Yuval10a01762017-04-07 11:04:57 +0300944 return 0;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200945}
946
947static u8 qede_check_notunn_csum(u16 flag)
948{
949 u16 csum_flag = 0;
950 u8 csum = 0;
951
952 if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
953 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
954 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
955 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
956 csum = QEDE_CSUM_UNNECESSARY;
957 }
958
959 csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
960 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
961
962 if (csum_flag & flag)
963 return QEDE_CSUM_ERROR;
964
965 return csum;
966}
967
968static u8 qede_check_csum(u16 flag)
969{
970 if (!qede_tunn_exist(flag))
971 return qede_check_notunn_csum(flag);
972 else
973 return qede_check_tunn_csum(flag);
974}
975
976static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
977 u16 flag)
978{
979 u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
980
981 if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
982 ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
983 (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
984 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
985 return true;
986
987 return false;
988}
989
990/* Return true iff packet is to be passed to stack */
991static bool qede_rx_xdp(struct qede_dev *edev,
992 struct qede_fastpath *fp,
993 struct qede_rx_queue *rxq,
994 struct bpf_prog *prog,
995 struct sw_rx_data *bd,
Mintz, Yuval15ed8a42017-04-07 11:05:00 +0300996 struct eth_fast_path_rx_reg_cqe *cqe,
997 u16 data_offset)
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200998{
999 u16 len = le16_to_cpu(cqe->len_on_first_bd);
1000 struct xdp_buff xdp;
1001 enum xdp_action act;
1002
Mintz, Yuval15ed8a42017-04-07 11:05:00 +03001003 xdp.data = page_address(bd->data) + data_offset;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001004 xdp.data_end = xdp.data + len;
1005
1006 /* Queues always have a full reset currently, so for the time
1007 * being until there's atomic program replace just mark read
1008 * side for map helpers.
1009 */
1010 rcu_read_lock();
1011 act = bpf_prog_run_xdp(prog, &xdp);
1012 rcu_read_unlock();
1013
1014 if (act == XDP_PASS)
1015 return true;
1016
1017 /* Count number of packets not to be passed to stack */
1018 rxq->xdp_no_pass++;
1019
1020 switch (act) {
1021 case XDP_TX:
1022 /* We need the replacement buffer before transmit. */
Mintz, Yuvale3eef7e2017-01-01 13:57:04 +02001023 if (qede_alloc_rx_buffer(rxq, true)) {
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001024 qede_recycle_rx_bd_ring(rxq, 1);
Daniel Borkmanna67edbf2017-01-25 02:28:18 +01001025 trace_xdp_exception(edev->ndev, prog, act);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001026 return false;
1027 }
1028
1029 /* Now if there's a transmission problem, we'd still have to
1030 * throw current buffer, as replacement was already allocated.
1031 */
Mintz, Yuval15ed8a42017-04-07 11:05:00 +03001032 if (qede_xdp_xmit(edev, fp, bd, data_offset, len)) {
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001033 dma_unmap_page(rxq->dev, bd->mapping,
1034 PAGE_SIZE, DMA_BIDIRECTIONAL);
1035 __free_page(bd->data);
Daniel Borkmanna67edbf2017-01-25 02:28:18 +01001036 trace_xdp_exception(edev->ndev, prog, act);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001037 }
1038
1039 /* Regardless, we've consumed an Rx BD */
1040 qede_rx_bd_ring_consume(rxq);
1041 return false;
1042
1043 default:
1044 bpf_warn_invalid_xdp_action(act);
1045 case XDP_ABORTED:
Daniel Borkmanna67edbf2017-01-25 02:28:18 +01001046 trace_xdp_exception(edev->ndev, prog, act);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001047 case XDP_DROP:
1048 qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
1049 }
1050
1051 return false;
1052}
1053
1054static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
1055 struct qede_rx_queue *rxq,
1056 struct sw_rx_data *bd, u16 len,
1057 u16 pad)
1058{
Mintz, Yuval15ed8a42017-04-07 11:05:00 +03001059 unsigned int offset = bd->page_offset + pad;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001060 struct skb_frag_struct *frag;
1061 struct page *page = bd->data;
1062 unsigned int pull_len;
1063 struct sk_buff *skb;
1064 unsigned char *va;
1065
1066 /* Allocate a new SKB with a sufficient large header len */
1067 skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
1068 if (unlikely(!skb))
1069 return NULL;
1070
1071 /* Copy data into SKB - if it's small, we can simply copy it and
1072 * re-use the already allcoated & mapped memory.
1073 */
1074 if (len + pad <= edev->rx_copybreak) {
1075 memcpy(skb_put(skb, len),
Mintz, Yuval15ed8a42017-04-07 11:05:00 +03001076 page_address(page) + offset, len);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001077 qede_reuse_page(rxq, bd);
1078 goto out;
1079 }
1080
1081 frag = &skb_shinfo(skb)->frags[0];
1082
1083 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
Mintz, Yuval15ed8a42017-04-07 11:05:00 +03001084 page, offset, len, rxq->rx_buf_seg_size);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001085
1086 va = skb_frag_address(frag);
1087 pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
1088
1089 /* Align the pull_len to optimize memcpy */
1090 memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
1091
1092 /* Correct the skb & frag sizes offset after the pull */
1093 skb_frag_size_sub(frag, pull_len);
1094 frag->page_offset += pull_len;
1095 skb->data_len -= pull_len;
1096 skb->tail += pull_len;
1097
1098 if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
1099 /* Incr page ref count to reuse on allocation failure so
1100 * that it doesn't get freed while freeing SKB [as its
1101 * already mapped there].
1102 */
1103 page_ref_inc(page);
1104 dev_kfree_skb_any(skb);
1105 return NULL;
1106 }
1107
1108out:
1109 /* We've consumed the first BD and prepared an SKB */
1110 qede_rx_bd_ring_consume(rxq);
1111 return skb;
1112}
1113
1114static int qede_rx_build_jumbo(struct qede_dev *edev,
1115 struct qede_rx_queue *rxq,
1116 struct sk_buff *skb,
1117 struct eth_fast_path_rx_reg_cqe *cqe,
1118 u16 first_bd_len)
1119{
1120 u16 pkt_len = le16_to_cpu(cqe->pkt_len);
1121 struct sw_rx_data *bd;
1122 u16 bd_cons_idx;
1123 u8 num_frags;
1124
1125 pkt_len -= first_bd_len;
1126
1127 /* We've already used one BD for the SKB. Now take care of the rest */
1128 for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) {
1129 u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
1130 pkt_len;
1131
1132 if (unlikely(!cur_size)) {
1133 DP_ERR(edev,
1134 "Still got %d BDs for mapping jumbo, but length became 0\n",
1135 num_frags);
1136 goto out;
1137 }
1138
1139 /* We need a replacement buffer for each BD */
Mintz, Yuvale3eef7e2017-01-01 13:57:04 +02001140 if (unlikely(qede_alloc_rx_buffer(rxq, true)))
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001141 goto out;
1142
1143 /* Now that we've allocated the replacement buffer,
1144 * we can safely consume the next BD and map it to the SKB.
1145 */
1146 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1147 bd = &rxq->sw_rx_ring[bd_cons_idx];
1148 qede_rx_bd_ring_consume(rxq);
1149
1150 dma_unmap_page(rxq->dev, bd->mapping,
1151 PAGE_SIZE, DMA_FROM_DEVICE);
1152
1153 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
1154 bd->data, 0, cur_size);
1155
1156 skb->truesize += PAGE_SIZE;
1157 skb->data_len += cur_size;
1158 skb->len += cur_size;
1159 pkt_len -= cur_size;
1160 }
1161
1162 if (unlikely(pkt_len))
1163 DP_ERR(edev,
1164 "Mapped all BDs of jumbo, but still have %d bytes\n",
1165 pkt_len);
1166
1167out:
1168 return num_frags;
1169}
1170
1171static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
1172 struct qede_fastpath *fp,
1173 struct qede_rx_queue *rxq,
1174 union eth_rx_cqe *cqe,
1175 enum eth_rx_cqe_type type)
1176{
1177 switch (type) {
1178 case ETH_RX_CQE_TYPE_TPA_START:
1179 qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start);
1180 return 0;
1181 case ETH_RX_CQE_TYPE_TPA_CONT:
1182 qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
1183 return 0;
1184 case ETH_RX_CQE_TYPE_TPA_END:
Mintz, Yuval10a01762017-04-07 11:04:57 +03001185 return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001186 default:
1187 return 0;
1188 }
1189}
1190
1191static int qede_rx_process_cqe(struct qede_dev *edev,
1192 struct qede_fastpath *fp,
1193 struct qede_rx_queue *rxq)
1194{
1195 struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog);
1196 struct eth_fast_path_rx_reg_cqe *fp_cqe;
1197 u16 len, pad, bd_cons_idx, parse_flag;
1198 enum eth_rx_cqe_type cqe_type;
1199 union eth_rx_cqe *cqe;
1200 struct sw_rx_data *bd;
1201 struct sk_buff *skb;
1202 __le16 flags;
1203 u8 csum_flag;
1204
1205 /* Get the CQE from the completion ring */
1206 cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
1207 cqe_type = cqe->fast_path_regular.type;
1208
1209 /* Process an unlikely slowpath event */
1210 if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
1211 struct eth_slow_path_rx_cqe *sp_cqe;
1212
1213 sp_cqe = (struct eth_slow_path_rx_cqe *)cqe;
1214 edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe);
1215 return 0;
1216 }
1217
1218 /* Handle TPA cqes */
1219 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR)
1220 return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type);
1221
1222 /* Get the data from the SW ring; Consume it only after it's evident
1223 * we wouldn't recycle it.
1224 */
1225 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1226 bd = &rxq->sw_rx_ring[bd_cons_idx];
1227
1228 fp_cqe = &cqe->fast_path_regular;
1229 len = le16_to_cpu(fp_cqe->len_on_first_bd);
Mintz, Yuval15ed8a42017-04-07 11:05:00 +03001230 pad = fp_cqe->placement_offset + rxq->rx_headroom;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001231
1232 /* Run eBPF program if one is attached */
1233 if (xdp_prog)
Mintz, Yuval15ed8a42017-04-07 11:05:00 +03001234 if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe, pad))
Mintz, Yuval10a01762017-04-07 11:04:57 +03001235 return 0;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001236
1237 /* If this is an error packet then drop it */
1238 flags = cqe->fast_path_regular.pars_flags.flags;
1239 parse_flag = le16_to_cpu(flags);
1240
1241 csum_flag = qede_check_csum(parse_flag);
1242 if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
1243 if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) {
1244 rxq->rx_ip_frags++;
1245 } else {
1246 DP_NOTICE(edev,
1247 "CQE has error, flags = %x, dropping incoming packet\n",
1248 parse_flag);
1249 rxq->rx_hw_errors++;
1250 qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
1251 return 0;
1252 }
1253 }
1254
1255 /* Basic validation passed; Need to prepare an SKB. This would also
1256 * guarantee to finally consume the first BD upon success.
1257 */
1258 skb = qede_rx_allocate_skb(edev, rxq, bd, len, pad);
1259 if (!skb) {
1260 rxq->rx_alloc_errors++;
1261 qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
1262 return 0;
1263 }
1264
1265 /* In case of Jumbo packet, several PAGE_SIZEd buffers will be pointed
1266 * by a single cqe.
1267 */
1268 if (fp_cqe->bd_num > 1) {
1269 u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb,
1270 fp_cqe, len);
1271
1272 if (unlikely(unmapped_frags > 0)) {
1273 qede_recycle_rx_bd_ring(rxq, unmapped_frags);
1274 dev_kfree_skb_any(skb);
1275 return 0;
1276 }
1277 }
1278
1279 /* The SKB contains all the data. Now prepare meta-magic */
1280 skb->protocol = eth_type_trans(skb, edev->ndev);
1281 qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash);
1282 qede_set_skb_csum(skb, csum_flag);
1283 skb_record_rx_queue(skb, rxq->rxq_id);
Sudarsana Reddy Kalluru4c552152017-02-15 10:24:11 +02001284 qede_ptp_record_rx_ts(edev, cqe, skb);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001285
1286 /* SKB is prepared - pass it to stack */
1287 qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag));
1288
1289 return 1;
1290}
1291
1292static int qede_rx_int(struct qede_fastpath *fp, int budget)
1293{
1294 struct qede_rx_queue *rxq = fp->rxq;
1295 struct qede_dev *edev = fp->edev;
Mintz, Yuval10a01762017-04-07 11:04:57 +03001296 int work_done = 0, rcv_pkts = 0;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001297 u16 hw_comp_cons, sw_comp_cons;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001298
1299 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
1300 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1301
1302 /* Memory barrier to prevent the CPU from doing speculative reads of CQE
1303 * / BD in the while-loop before reading hw_comp_cons. If the CQE is
1304 * read before it is written by FW, then FW writes CQE and SB, and then
1305 * the CPU reads the hw_comp_cons, it will use an old CQE.
1306 */
1307 rmb();
1308
1309 /* Loop to complete all indicated BDs */
1310 while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
Mintz, Yuval10a01762017-04-07 11:04:57 +03001311 rcv_pkts += qede_rx_process_cqe(edev, fp, rxq);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001312 qed_chain_recycle_consumed(&rxq->rx_comp_ring);
1313 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1314 work_done++;
1315 }
1316
Mintz, Yuval10a01762017-04-07 11:04:57 +03001317 rxq->rcv_pkts += rcv_pkts;
1318
Mintz, Yuvale3eef7e2017-01-01 13:57:04 +02001319 /* Allocate replacement buffers */
1320 while (rxq->num_rx_buffers - rxq->filled_buffers)
1321 if (qede_alloc_rx_buffer(rxq, false))
1322 break;
1323
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001324 /* Update producers */
1325 qede_update_rx_prod(edev, rxq);
1326
1327 return work_done;
1328}
1329
1330static bool qede_poll_is_more_work(struct qede_fastpath *fp)
1331{
1332 qed_sb_update_sb_idx(fp->sb_info);
1333
1334 /* *_has_*_work() reads the status block, thus we need to ensure that
1335 * status block indices have been actually read (qed_sb_update_sb_idx)
1336 * prior to this check (*_has_*_work) so that we won't write the
1337 * "newer" value of the status block to HW (if there was a DMA right
1338 * after qede_has_rx_work and if there is no rmb, the memory reading
1339 * (qed_sb_update_sb_idx) may be postponed to right before *_ack_sb).
1340 * In this case there will never be another interrupt until there is
1341 * another update of the status block, while there is still unhandled
1342 * work.
1343 */
1344 rmb();
1345
1346 if (likely(fp->type & QEDE_FASTPATH_RX))
1347 if (qede_has_rx_work(fp->rxq))
1348 return true;
1349
1350 if (fp->type & QEDE_FASTPATH_XDP)
1351 if (qede_txq_has_work(fp->xdp_tx))
1352 return true;
1353
1354 if (likely(fp->type & QEDE_FASTPATH_TX))
1355 if (qede_txq_has_work(fp->txq))
1356 return true;
1357
1358 return false;
1359}
1360
1361/*********************
1362 * NDO & API related *
1363 *********************/
1364int qede_poll(struct napi_struct *napi, int budget)
1365{
1366 struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
1367 napi);
1368 struct qede_dev *edev = fp->edev;
1369 int rx_work_done = 0;
1370
1371 if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq))
1372 qede_tx_int(edev, fp->txq);
1373
1374 if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx))
1375 qede_xdp_tx_int(edev, fp->xdp_tx);
1376
1377 rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
1378 qede_has_rx_work(fp->rxq)) ?
1379 qede_rx_int(fp, budget) : 0;
1380 if (rx_work_done < budget) {
1381 if (!qede_poll_is_more_work(fp)) {
Eric Dumazet6ad20162017-01-30 08:22:01 -08001382 napi_complete_done(napi, rx_work_done);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001383
1384 /* Update and reenable interrupts */
1385 qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
1386 } else {
1387 rx_work_done = budget;
1388 }
1389 }
1390
1391 if (fp->xdp_xmit) {
1392 u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);
1393
1394 fp->xdp_xmit = 0;
1395 fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
1396 qede_update_tx_producer(fp->xdp_tx);
1397 }
1398
1399 return rx_work_done;
1400}
1401
1402irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
1403{
1404 struct qede_fastpath *fp = fp_cookie;
1405
1406 qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
1407
1408 napi_schedule_irqoff(&fp->napi);
1409 return IRQ_HANDLED;
1410}
1411
1412/* Main transmit function */
1413netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1414{
1415 struct qede_dev *edev = netdev_priv(ndev);
1416 struct netdev_queue *netdev_txq;
1417 struct qede_tx_queue *txq;
1418 struct eth_tx_1st_bd *first_bd;
1419 struct eth_tx_2nd_bd *second_bd = NULL;
1420 struct eth_tx_3rd_bd *third_bd = NULL;
1421 struct eth_tx_bd *tx_data_bd = NULL;
1422 u16 txq_index;
1423 u8 nbd = 0;
1424 dma_addr_t mapping;
1425 int rc, frag_idx = 0, ipv6_ext = 0;
1426 u8 xmit_type;
1427 u16 idx;
1428 u16 hlen;
1429 bool data_split = false;
1430
1431 /* Get tx-queue context and netdev index */
1432 txq_index = skb_get_queue_mapping(skb);
1433 WARN_ON(txq_index >= QEDE_TSS_COUNT(edev));
1434 txq = edev->fp_array[edev->fp_num_rx + txq_index].txq;
1435 netdev_txq = netdev_get_tx_queue(ndev, txq_index);
1436
1437 WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
1438
1439 xmit_type = qede_xmit_type(skb, &ipv6_ext);
1440
1441#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
1442 if (qede_pkt_req_lin(skb, xmit_type)) {
1443 if (skb_linearize(skb)) {
1444 DP_NOTICE(edev,
1445 "SKB linearization failed - silently dropping this SKB\n");
1446 dev_kfree_skb_any(skb);
1447 return NETDEV_TX_OK;
1448 }
1449 }
1450#endif
1451
1452 /* Fill the entry in the SW ring and the BDs in the FW ring */
1453 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
1454 txq->sw_tx_ring.skbs[idx].skb = skb;
1455 first_bd = (struct eth_tx_1st_bd *)
1456 qed_chain_produce(&txq->tx_pbl);
1457 memset(first_bd, 0, sizeof(*first_bd));
1458 first_bd->data.bd_flags.bitfields =
1459 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
1460
Sudarsana Reddy Kalluru4c552152017-02-15 10:24:11 +02001461 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1462 qede_ptp_tx_ts(edev, skb);
1463
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001464 /* Map skb linear data for DMA and set in the first BD */
1465 mapping = dma_map_single(txq->dev, skb->data,
1466 skb_headlen(skb), DMA_TO_DEVICE);
1467 if (unlikely(dma_mapping_error(txq->dev, mapping))) {
1468 DP_NOTICE(edev, "SKB mapping failed\n");
1469 qede_free_failed_tx_pkt(txq, first_bd, 0, false);
1470 qede_update_tx_producer(txq);
1471 return NETDEV_TX_OK;
1472 }
1473 nbd++;
1474 BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
1475
1476 /* In case there is IPv6 with extension headers or LSO we need 2nd and
1477 * 3rd BDs.
1478 */
1479 if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
1480 second_bd = (struct eth_tx_2nd_bd *)
1481 qed_chain_produce(&txq->tx_pbl);
1482 memset(second_bd, 0, sizeof(*second_bd));
1483
1484 nbd++;
1485 third_bd = (struct eth_tx_3rd_bd *)
1486 qed_chain_produce(&txq->tx_pbl);
1487 memset(third_bd, 0, sizeof(*third_bd));
1488
1489 nbd++;
1490 /* We need to fill in additional data in second_bd... */
1491 tx_data_bd = (struct eth_tx_bd *)second_bd;
1492 }
1493
1494 if (skb_vlan_tag_present(skb)) {
1495 first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
1496 first_bd->data.bd_flags.bitfields |=
1497 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
1498 }
1499
1500 /* Fill the parsing flags & params according to the requested offload */
1501 if (xmit_type & XMIT_L4_CSUM) {
1502 /* We don't re-calculate IP checksum as it is already done by
1503 * the upper stack
1504 */
1505 first_bd->data.bd_flags.bitfields |=
1506 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
1507
1508 if (xmit_type & XMIT_ENC) {
1509 first_bd->data.bd_flags.bitfields |=
1510 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1511 first_bd->data.bitfields |=
1512 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
1513 }
1514
1515 /* Legacy FW had flipped behavior in regard to this bit -
1516 * I.e., needed to set to prevent FW from touching encapsulated
1517 * packets when it didn't need to.
1518 */
1519 if (unlikely(txq->is_legacy))
1520 first_bd->data.bitfields ^=
1521 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
1522
1523 /* If the packet is IPv6 with extension header, indicate that
1524 * to FW and pass few params, since the device cracker doesn't
1525 * support parsing IPv6 with extension header/s.
1526 */
1527 if (unlikely(ipv6_ext))
1528 qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
1529 }
1530
1531 if (xmit_type & XMIT_LSO) {
1532 first_bd->data.bd_flags.bitfields |=
1533 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
1534 third_bd->data.lso_mss =
1535 cpu_to_le16(skb_shinfo(skb)->gso_size);
1536
1537 if (unlikely(xmit_type & XMIT_ENC)) {
1538 first_bd->data.bd_flags.bitfields |=
1539 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
1540
1541 if (xmit_type & XMIT_ENC_GSO_L4_CSUM) {
1542 u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
1543
1544 first_bd->data.bd_flags.bitfields |= 1 << tmp;
1545 }
1546 hlen = qede_get_skb_hlen(skb, true);
1547 } else {
1548 first_bd->data.bd_flags.bitfields |=
1549 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1550 hlen = qede_get_skb_hlen(skb, false);
1551 }
1552
1553 /* @@@TBD - if will not be removed need to check */
1554 third_bd->data.bitfields |=
1555 cpu_to_le16(1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
1556
1557 /* Make life easier for FW guys who can't deal with header and
1558 * data on same BD. If we need to split, use the second bd...
1559 */
1560 if (unlikely(skb_headlen(skb) > hlen)) {
1561 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1562 "TSO split header size is %d (%x:%x)\n",
1563 first_bd->nbytes, first_bd->addr.hi,
1564 first_bd->addr.lo);
1565
1566 mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
1567 le32_to_cpu(first_bd->addr.lo)) +
1568 hlen;
1569
1570 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
1571 le16_to_cpu(first_bd->nbytes) -
1572 hlen);
1573
1574 /* this marks the BD as one that has no
1575 * individual mapping
1576 */
1577 txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD;
1578
1579 first_bd->nbytes = cpu_to_le16(hlen);
1580
1581 tx_data_bd = (struct eth_tx_bd *)third_bd;
1582 data_split = true;
1583 }
1584 } else {
1585 first_bd->data.bitfields |=
1586 (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
1587 ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
1588 }
1589
1590 /* Handle fragmented skb */
1591 /* special handle for frags inside 2nd and 3rd bds.. */
1592 while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
1593 rc = map_frag_to_bd(txq,
1594 &skb_shinfo(skb)->frags[frag_idx],
1595 tx_data_bd);
1596 if (rc) {
1597 qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
1598 qede_update_tx_producer(txq);
1599 return NETDEV_TX_OK;
1600 }
1601
1602 if (tx_data_bd == (struct eth_tx_bd *)second_bd)
1603 tx_data_bd = (struct eth_tx_bd *)third_bd;
1604 else
1605 tx_data_bd = NULL;
1606
1607 frag_idx++;
1608 }
1609
1610 /* map last frags into 4th, 5th .... */
1611 for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
1612 tx_data_bd = (struct eth_tx_bd *)
1613 qed_chain_produce(&txq->tx_pbl);
1614
1615 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
1616
1617 rc = map_frag_to_bd(txq,
1618 &skb_shinfo(skb)->frags[frag_idx],
1619 tx_data_bd);
1620 if (rc) {
1621 qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
1622 qede_update_tx_producer(txq);
1623 return NETDEV_TX_OK;
1624 }
1625 }
1626
1627 /* update the first BD with the actual num BDs */
1628 first_bd->data.nbds = nbd;
1629
1630 netdev_tx_sent_queue(netdev_txq, skb->len);
1631
1632 skb_tx_timestamp(skb);
1633
1634 /* Advance packet producer only before sending the packet since mapping
1635 * of pages may fail.
1636 */
1637 txq->sw_tx_prod++;
1638
1639 /* 'next page' entries are counted in the producer value */
1640 txq->tx_db.data.bd_prod =
1641 cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
1642
1643 if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
1644 qede_update_tx_producer(txq);
1645
1646 if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
1647 < (MAX_SKB_FRAGS + 1))) {
1648 if (skb->xmit_more)
1649 qede_update_tx_producer(txq);
1650
1651 netif_tx_stop_queue(netdev_txq);
1652 txq->stopped_cnt++;
1653 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1654 "Stop queue was called\n");
1655 /* paired memory barrier is in qede_tx_int(), we have to keep
1656 * ordering of set_bit() in netif_tx_stop_queue() and read of
1657 * fp->bd_tx_cons
1658 */
1659 smp_mb();
1660
1661 if ((qed_chain_get_elem_left(&txq->tx_pbl) >=
1662 (MAX_SKB_FRAGS + 1)) &&
1663 (edev->state == QEDE_STATE_OPEN)) {
1664 netif_tx_wake_queue(netdev_txq);
1665 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1666 "Wake queue was called\n");
1667 }
1668 }
1669
1670 return NETDEV_TX_OK;
1671}
1672
1673/* 8B udp header + 8B base tunnel header + 32B option length */
1674#define QEDE_MAX_TUN_HDR_LEN 48
1675
1676netdev_features_t qede_features_check(struct sk_buff *skb,
1677 struct net_device *dev,
1678 netdev_features_t features)
1679{
1680 if (skb->encapsulation) {
1681 u8 l4_proto = 0;
1682
1683 switch (vlan_get_protocol(skb)) {
1684 case htons(ETH_P_IP):
1685 l4_proto = ip_hdr(skb)->protocol;
1686 break;
1687 case htons(ETH_P_IPV6):
1688 l4_proto = ipv6_hdr(skb)->nexthdr;
1689 break;
1690 default:
1691 return features;
1692 }
1693
1694 /* Disable offloads for geneve tunnels, as HW can't parse
1695 * the geneve header which has option length greater than 32B.
1696 */
1697 if ((l4_proto == IPPROTO_UDP) &&
1698 ((skb_inner_mac_header(skb) -
1699 skb_transport_header(skb)) > QEDE_MAX_TUN_HDR_LEN))
1700 return features & ~(NETIF_F_CSUM_MASK |
1701 NETIF_F_GSO_MASK);
1702 }
1703
1704 return features;
1705}