blob: 6c702399b801db1cf88f247ebcdedea93a0d24f9 [file] [log] [blame]
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001/* QLogic qede NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
Daniel Borkmanna67edbf2017-01-25 02:28:18 +010035#include <linux/bpf_trace.h>
Mintz, Yuvalcdda9262017-01-01 13:57:01 +020036#include <net/udp_tunnel.h>
37#include <linux/ip.h>
38#include <net/ipv6.h>
39#include <net/tcp.h>
40#include <linux/if_ether.h>
41#include <linux/if_vlan.h>
42#include <net/ip6_checksum.h>
Sudarsana Reddy Kalluru4c552152017-02-15 10:24:11 +020043#include "qede_ptp.h"
Mintz, Yuvalcdda9262017-01-01 13:57:01 +020044
45#include <linux/qed/qed_if.h>
46#include "qede.h"
47/*********************************
48 * Content also used by slowpath *
49 *********************************/
50
Mintz, Yuvale3eef7e2017-01-01 13:57:04 +020051int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
Mintz, Yuvalcdda9262017-01-01 13:57:01 +020052{
53 struct sw_rx_data *sw_rx_data;
54 struct eth_rx_bd *rx_bd;
55 dma_addr_t mapping;
56 struct page *data;
57
Mintz, Yuvale3eef7e2017-01-01 13:57:04 +020058 /* In case lazy-allocation is allowed, postpone allocation until the
59 * end of the NAPI run. We'd still need to make sure the Rx ring has
60 * sufficient buffers to guarantee an additional Rx interrupt.
61 */
62 if (allow_lazy && likely(rxq->filled_buffers > 12)) {
63 rxq->filled_buffers--;
64 return 0;
65 }
66
Mintz, Yuvalcdda9262017-01-01 13:57:01 +020067 data = alloc_pages(GFP_ATOMIC, 0);
68 if (unlikely(!data))
69 return -ENOMEM;
70
71 /* Map the entire page as it would be used
72 * for multiple RX buffer segment size mapping.
73 */
74 mapping = dma_map_page(rxq->dev, data, 0,
75 PAGE_SIZE, rxq->data_direction);
76 if (unlikely(dma_mapping_error(rxq->dev, mapping))) {
77 __free_page(data);
78 return -ENOMEM;
79 }
80
81 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
82 sw_rx_data->page_offset = 0;
83 sw_rx_data->data = data;
84 sw_rx_data->mapping = mapping;
85
86 /* Advance PROD and get BD pointer */
87 rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
88 WARN_ON(!rx_bd);
89 rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
Mintz, Yuval15ed8a42017-04-07 11:05:00 +030090 rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping) +
91 rxq->rx_headroom);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +020092
93 rxq->sw_rx_prod++;
Mintz, Yuvale3eef7e2017-01-01 13:57:04 +020094 rxq->filled_buffers++;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +020095
96 return 0;
97}
98
99/* Unmap the data and free skb */
100int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len)
101{
Sudarsana Reddy Kalluru5a052d62017-05-21 12:10:53 +0300102 u16 idx = txq->sw_tx_cons;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200103 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
104 struct eth_tx_1st_bd *first_bd;
105 struct eth_tx_bd *tx_data_bd;
106 int bds_consumed = 0;
107 int nbds;
108 bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD;
109 int i, split_bd_len = 0;
110
111 if (unlikely(!skb)) {
112 DP_ERR(edev,
113 "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
114 idx, txq->sw_tx_cons, txq->sw_tx_prod);
115 return -1;
116 }
117
118 *len = skb->len;
119
120 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
121
122 bds_consumed++;
123
124 nbds = first_bd->data.nbds;
125
126 if (data_split) {
127 struct eth_tx_bd *split = (struct eth_tx_bd *)
128 qed_chain_consume(&txq->tx_pbl);
129 split_bd_len = BD_UNMAP_LEN(split);
130 bds_consumed++;
131 }
132 dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
133 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
134
135 /* Unmap the data of the skb frags */
136 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
137 tx_data_bd = (struct eth_tx_bd *)
138 qed_chain_consume(&txq->tx_pbl);
139 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
140 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
141 }
142
143 while (bds_consumed++ < nbds)
144 qed_chain_consume(&txq->tx_pbl);
145
146 /* Free skb */
147 dev_kfree_skb_any(skb);
148 txq->sw_tx_ring.skbs[idx].skb = NULL;
149 txq->sw_tx_ring.skbs[idx].flags = 0;
150
151 return 0;
152}
153
154/* Unmap the data and free skb when mapping failed during start_xmit */
155static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq,
156 struct eth_tx_1st_bd *first_bd,
157 int nbd, bool data_split)
158{
Sudarsana Reddy Kalluru5a052d62017-05-21 12:10:53 +0300159 u16 idx = txq->sw_tx_prod;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200160 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
161 struct eth_tx_bd *tx_data_bd;
162 int i, split_bd_len = 0;
163
164 /* Return prod to its position before this skb was handled */
165 qed_chain_set_prod(&txq->tx_pbl,
166 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
167
168 first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
169
170 if (data_split) {
171 struct eth_tx_bd *split = (struct eth_tx_bd *)
172 qed_chain_produce(&txq->tx_pbl);
173 split_bd_len = BD_UNMAP_LEN(split);
174 nbd--;
175 }
176
177 dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd),
178 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
179
180 /* Unmap the data of the skb frags */
181 for (i = 0; i < nbd; i++) {
182 tx_data_bd = (struct eth_tx_bd *)
183 qed_chain_produce(&txq->tx_pbl);
184 if (tx_data_bd->nbytes)
185 dma_unmap_page(txq->dev,
186 BD_UNMAP_ADDR(tx_data_bd),
187 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
188 }
189
190 /* Return again prod to its position before this skb was handled */
191 qed_chain_set_prod(&txq->tx_pbl,
192 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
193
194 /* Free skb */
195 dev_kfree_skb_any(skb);
196 txq->sw_tx_ring.skbs[idx].skb = NULL;
197 txq->sw_tx_ring.skbs[idx].flags = 0;
198}
199
200static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext)
201{
202 u32 rc = XMIT_L4_CSUM;
203 __be16 l3_proto;
204
205 if (skb->ip_summed != CHECKSUM_PARTIAL)
206 return XMIT_PLAIN;
207
208 l3_proto = vlan_get_protocol(skb);
209 if (l3_proto == htons(ETH_P_IPV6) &&
210 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
211 *ipv6_ext = 1;
212
213 if (skb->encapsulation) {
214 rc |= XMIT_ENC;
215 if (skb_is_gso(skb)) {
216 unsigned short gso_type = skb_shinfo(skb)->gso_type;
217
218 if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) ||
219 (gso_type & SKB_GSO_GRE_CSUM))
220 rc |= XMIT_ENC_GSO_L4_CSUM;
221
222 rc |= XMIT_LSO;
223 return rc;
224 }
225 }
226
227 if (skb_is_gso(skb))
228 rc |= XMIT_LSO;
229
230 return rc;
231}
232
233static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
234 struct eth_tx_2nd_bd *second_bd,
235 struct eth_tx_3rd_bd *third_bd)
236{
237 u8 l4_proto;
238 u16 bd2_bits1 = 0, bd2_bits2 = 0;
239
240 bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
241
242 bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
243 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
244 << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
245
246 bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
247 ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
248
249 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
250 l4_proto = ipv6_hdr(skb)->nexthdr;
251 else
252 l4_proto = ip_hdr(skb)->protocol;
253
254 if (l4_proto == IPPROTO_UDP)
255 bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
256
257 if (third_bd)
258 third_bd->data.bitfields |=
259 cpu_to_le16(((tcp_hdrlen(skb) / 4) &
260 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
261 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
262
263 second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
264 second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
265}
266
267static int map_frag_to_bd(struct qede_tx_queue *txq,
268 skb_frag_t *frag, struct eth_tx_bd *bd)
269{
270 dma_addr_t mapping;
271
272 /* Map skb non-linear frag data for DMA */
273 mapping = skb_frag_dma_map(txq->dev, frag, 0,
274 skb_frag_size(frag), DMA_TO_DEVICE);
275 if (unlikely(dma_mapping_error(txq->dev, mapping)))
276 return -ENOMEM;
277
278 /* Setup the data pointer of the frag data */
279 BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
280
281 return 0;
282}
283
284static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
285{
286 if (is_encap_pkt)
287 return (skb_inner_transport_header(skb) +
288 inner_tcp_hdrlen(skb) - skb->data);
289 else
290 return (skb_transport_header(skb) +
291 tcp_hdrlen(skb) - skb->data);
292}
293
294/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
295#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
296static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type)
297{
298 int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
299
300 if (xmit_type & XMIT_LSO) {
301 int hlen;
302
303 hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
304
305 /* linear payload would require its own BD */
306 if (skb_headlen(skb) > hlen)
307 allowed_frags--;
308 }
309
310 return (skb_shinfo(skb)->nr_frags > allowed_frags);
311}
312#endif
313
314static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
315{
316 /* wmb makes sure that the BDs data is updated before updating the
317 * producer, otherwise FW may read old data from the BDs.
318 */
319 wmb();
320 barrier();
321 writel(txq->tx_db.raw, txq->doorbell_addr);
322
Manish Choprab9fc8282018-03-27 06:34:41 -0700323 /* Fence required to flush the write combined buffer, since another
324 * CPU may write to the same doorbell address and data may be lost
325 * due to relaxed order nature of write combined bar.
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200326 */
Manish Choprab9fc8282018-03-27 06:34:41 -0700327 wmb();
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200328}
329
330static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
331 struct sw_rx_data *metadata, u16 padding, u16 length)
332{
333 struct qede_tx_queue *txq = fp->xdp_tx;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200334 struct eth_tx_1st_bd *first_bd;
Sudarsana Reddy Kalluru5a052d62017-05-21 12:10:53 +0300335 u16 idx = txq->sw_tx_prod;
Manish Chopra48848a02017-05-23 09:41:18 +0300336 u16 val;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200337
338 if (!qed_chain_get_elem_left(&txq->tx_pbl)) {
339 txq->stopped_cnt++;
340 return -ENOMEM;
341 }
342
343 first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
344
345 memset(first_bd, 0, sizeof(*first_bd));
346 first_bd->data.bd_flags.bitfields =
347 BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
Manish Chopra48848a02017-05-23 09:41:18 +0300348
349 val = (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
350 ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
351
352 first_bd->data.bitfields |= cpu_to_le16(val);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200353 first_bd->data.nbds = 1;
354
355 /* We can safely ignore the offset, as it's 0 for XDP */
356 BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length);
357
358 /* Synchronize the buffer back to device, as program [probably]
359 * has changed it.
360 */
361 dma_sync_single_for_device(&edev->pdev->dev,
362 metadata->mapping + padding,
363 length, PCI_DMA_TODEVICE);
364
Mintz, Yuval89e1afc2017-04-07 11:04:58 +0300365 txq->sw_tx_ring.xdp[idx].page = metadata->data;
366 txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping;
Sudarsana Reddy Kalluru5a052d62017-05-21 12:10:53 +0300367 txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200368
369 /* Mark the fastpath for future XDP doorbell */
370 fp->xdp_xmit = 1;
371
372 return 0;
373}
374
375int qede_txq_has_work(struct qede_tx_queue *txq)
376{
377 u16 hw_bd_cons;
378
379 /* Tell compiler that consumer and producer can change */
380 barrier();
381 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
382 if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
383 return 0;
384
385 return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
386}
387
388static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
389{
Mintz, Yuval89e1afc2017-04-07 11:04:58 +0300390 u16 hw_bd_cons, idx;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200391
392 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
393 barrier();
394
395 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
Mintz, Yuval89e1afc2017-04-07 11:04:58 +0300396 qed_chain_consume(&txq->tx_pbl);
Sudarsana Reddy Kalluru5a052d62017-05-21 12:10:53 +0300397 idx = txq->sw_tx_cons;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200398
Mintz, Yuval89e1afc2017-04-07 11:04:58 +0300399 dma_unmap_page(&edev->pdev->dev,
400 txq->sw_tx_ring.xdp[idx].mapping,
401 PAGE_SIZE, DMA_BIDIRECTIONAL);
402 __free_page(txq->sw_tx_ring.xdp[idx].page);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200403
Sudarsana Reddy Kalluru5a052d62017-05-21 12:10:53 +0300404 txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200405 txq->xmit_pkts++;
406 }
407}
408
409static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
410{
411 struct netdev_queue *netdev_txq;
412 u16 hw_bd_cons;
413 unsigned int pkts_compl = 0, bytes_compl = 0;
414 int rc;
415
416 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
417
418 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
419 barrier();
420
421 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
422 int len = 0;
423
424 rc = qede_free_tx_pkt(edev, txq, &len);
425 if (rc) {
426 DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
427 hw_bd_cons,
428 qed_chain_get_cons_idx(&txq->tx_pbl));
429 break;
430 }
431
432 bytes_compl += len;
433 pkts_compl++;
Sudarsana Reddy Kalluru5a052d62017-05-21 12:10:53 +0300434 txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200435 txq->xmit_pkts++;
436 }
437
438 netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
439
440 /* Need to make the tx_bd_cons update visible to start_xmit()
441 * before checking for netif_tx_queue_stopped(). Without the
442 * memory barrier, there is a small possibility that
443 * start_xmit() will miss it and cause the queue to be stopped
444 * forever.
445 * On the other hand we need an rmb() here to ensure the proper
446 * ordering of bit testing in the following
447 * netif_tx_queue_stopped(txq) call.
448 */
449 smp_mb();
450
451 if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
452 /* Taking tx_lock is needed to prevent reenabling the queue
453 * while it's empty. This could have happen if rx_action() gets
454 * suspended in qede_tx_int() after the condition before
455 * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
456 *
457 * stops the queue->sees fresh tx_bd_cons->releases the queue->
458 * sends some packets consuming the whole queue again->
459 * stops the queue
460 */
461
462 __netif_tx_lock(netdev_txq, smp_processor_id());
463
464 if ((netif_tx_queue_stopped(netdev_txq)) &&
465 (edev->state == QEDE_STATE_OPEN) &&
466 (qed_chain_get_elem_left(&txq->tx_pbl)
467 >= (MAX_SKB_FRAGS + 1))) {
468 netif_tx_wake_queue(netdev_txq);
469 DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
470 "Wake queue was called\n");
471 }
472
473 __netif_tx_unlock(netdev_txq);
474 }
475
476 return 0;
477}
478
479bool qede_has_rx_work(struct qede_rx_queue *rxq)
480{
481 u16 hw_comp_cons, sw_comp_cons;
482
483 /* Tell compiler that status block fields can change */
484 barrier();
485
486 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
487 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
488
489 return hw_comp_cons != sw_comp_cons;
490}
491
492static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
493{
494 qed_chain_consume(&rxq->rx_bd_ring);
495 rxq->sw_rx_cons++;
496}
497
498/* This function reuses the buffer(from an offset) from
499 * consumer index to producer index in the bd ring
500 */
501static inline void qede_reuse_page(struct qede_rx_queue *rxq,
502 struct sw_rx_data *curr_cons)
503{
504 struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
505 struct sw_rx_data *curr_prod;
506 dma_addr_t new_mapping;
507
508 curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
509 *curr_prod = *curr_cons;
510
511 new_mapping = curr_prod->mapping + curr_prod->page_offset;
512
513 rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
Mintz, Yuval15ed8a42017-04-07 11:05:00 +0300514 rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping) +
515 rxq->rx_headroom);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200516
517 rxq->sw_rx_prod++;
518 curr_cons->data = NULL;
519}
520
521/* In case of allocation failures reuse buffers
522 * from consumer index to produce buffers for firmware
523 */
524void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count)
525{
526 struct sw_rx_data *curr_cons;
527
528 for (; count > 0; count--) {
529 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
530 qede_reuse_page(rxq, curr_cons);
531 qede_rx_bd_ring_consume(rxq);
532 }
533}
534
535static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq,
536 struct sw_rx_data *curr_cons)
537{
538 /* Move to the next segment in the page */
539 curr_cons->page_offset += rxq->rx_buf_seg_size;
540
541 if (curr_cons->page_offset == PAGE_SIZE) {
Mintz, Yuvale3eef7e2017-01-01 13:57:04 +0200542 if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200543 /* Since we failed to allocate new buffer
544 * current buffer can be used again.
545 */
546 curr_cons->page_offset -= rxq->rx_buf_seg_size;
547
548 return -ENOMEM;
549 }
550
551 dma_unmap_page(rxq->dev, curr_cons->mapping,
552 PAGE_SIZE, rxq->data_direction);
553 } else {
554 /* Increment refcount of the page as we don't want
555 * network stack to take the ownership of the page
556 * which can be recycled multiple times by the driver.
557 */
558 page_ref_inc(curr_cons->data);
559 qede_reuse_page(rxq, curr_cons);
560 }
561
562 return 0;
563}
564
565void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
566{
567 u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
568 u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
569 struct eth_rx_prod_data rx_prods = {0};
570
571 /* Update producers */
572 rx_prods.bd_prod = cpu_to_le16(bd_prod);
573 rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
574
575 /* Make sure that the BD and SGE data is updated before updating the
576 * producers since FW might read the BD/SGE right after the producer
577 * is updated.
578 */
579 wmb();
580
581 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
582 (u32 *)&rx_prods);
583
584 /* mmiowb is needed to synchronize doorbell writes from more than one
585 * processor. It guarantees that the write arrives to the device before
586 * the napi lock is released and another qede_poll is called (possibly
587 * on another CPU). Without this barrier, the next doorbell can bypass
588 * this doorbell. This is applicable to IA64/Altix systems.
589 */
590 mmiowb();
591}
592
593static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
594{
595 enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
596 enum rss_hash_type htype;
597 u32 hash = 0;
598
599 htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
600 if (htype) {
601 hash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
602 (htype == RSS_HASH_TYPE_IPV6)) ?
603 PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
604 hash = le32_to_cpu(rss_hash);
605 }
606 skb_set_hash(skb, hash, hash_type);
607}
608
609static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
610{
611 skb_checksum_none_assert(skb);
612
613 if (csum_flag & QEDE_CSUM_UNNECESSARY)
614 skb->ip_summed = CHECKSUM_UNNECESSARY;
615
Manish Chopra7ca547b2017-01-01 13:57:05 +0200616 if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY) {
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200617 skb->csum_level = 1;
Manish Chopra7ca547b2017-01-01 13:57:05 +0200618 skb->encapsulation = 1;
619 }
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200620}
621
622static inline void qede_skb_receive(struct qede_dev *edev,
623 struct qede_fastpath *fp,
624 struct qede_rx_queue *rxq,
625 struct sk_buff *skb, u16 vlan_tag)
626{
627 if (vlan_tag)
628 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
629
630 napi_gro_receive(&fp->napi, skb);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200631}
632
633static void qede_set_gro_params(struct qede_dev *edev,
634 struct sk_buff *skb,
635 struct eth_fast_path_rx_tpa_start_cqe *cqe)
636{
637 u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
638
639 if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) &
640 PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2)
641 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
642 else
643 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
644
645 skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
646 cqe->header_len;
647}
648
649static int qede_fill_frag_skb(struct qede_dev *edev,
650 struct qede_rx_queue *rxq,
651 u8 tpa_agg_index, u16 len_on_bd)
652{
653 struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
654 NUM_RX_BDS_MAX];
655 struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
656 struct sk_buff *skb = tpa_info->skb;
657
658 if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
659 goto out;
660
661 /* Add one frag and update the appropriate fields in the skb */
662 skb_fill_page_desc(skb, tpa_info->frag_id++,
Manish Chopra8a863392018-05-17 12:05:00 -0700663 current_bd->data,
664 current_bd->page_offset + rxq->rx_headroom,
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200665 len_on_bd);
666
667 if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) {
668 /* Incr page ref count to reuse on allocation failure
669 * so that it doesn't get freed while freeing SKB.
670 */
671 page_ref_inc(current_bd->data);
672 goto out;
673 }
674
Manish Chopra8a863392018-05-17 12:05:00 -0700675 qede_rx_bd_ring_consume(rxq);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200676
677 skb->data_len += len_on_bd;
678 skb->truesize += rxq->rx_buf_seg_size;
679 skb->len += len_on_bd;
680
681 return 0;
682
683out:
684 tpa_info->state = QEDE_AGG_STATE_ERROR;
685 qede_recycle_rx_bd_ring(rxq, 1);
686
687 return -ENOMEM;
688}
689
690static bool qede_tunn_exist(u16 flag)
691{
692 return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
693 PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
694}
695
696static u8 qede_check_tunn_csum(u16 flag)
697{
698 u16 csum_flag = 0;
699 u8 tcsum = 0;
700
701 if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
702 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
703 csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
704 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
705
706 if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
707 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
708 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
709 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
710 tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
711 }
712
713 csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
714 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
715 PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
716 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
717
718 if (csum_flag & flag)
719 return QEDE_CSUM_ERROR;
720
721 return QEDE_CSUM_UNNECESSARY | tcsum;
722}
723
Manish Chopra8a863392018-05-17 12:05:00 -0700724static inline struct sk_buff *
725qede_build_skb(struct qede_rx_queue *rxq,
726 struct sw_rx_data *bd, u16 len, u16 pad)
727{
728 struct sk_buff *skb;
729 void *buf;
730
731 buf = page_address(bd->data) + bd->page_offset;
732 skb = build_skb(buf, rxq->rx_buf_seg_size);
733
734 skb_reserve(skb, pad);
735 skb_put(skb, len);
736
737 return skb;
738}
739
740static struct sk_buff *
741qede_tpa_rx_build_skb(struct qede_dev *edev,
742 struct qede_rx_queue *rxq,
743 struct sw_rx_data *bd, u16 len, u16 pad,
744 bool alloc_skb)
745{
746 struct sk_buff *skb;
747
748 skb = qede_build_skb(rxq, bd, len, pad);
749 bd->page_offset += rxq->rx_buf_seg_size;
750
751 if (bd->page_offset == PAGE_SIZE) {
752 if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
753 DP_NOTICE(edev,
754 "Failed to allocate RX buffer for tpa start\n");
755 bd->page_offset -= rxq->rx_buf_seg_size;
756 page_ref_inc(bd->data);
757 dev_kfree_skb_any(skb);
758 return NULL;
759 }
760 } else {
761 page_ref_inc(bd->data);
762 qede_reuse_page(rxq, bd);
763 }
764
765 /* We've consumed the first BD and prepared an SKB */
766 qede_rx_bd_ring_consume(rxq);
767
768 return skb;
769}
770
771static struct sk_buff *
772qede_rx_build_skb(struct qede_dev *edev,
773 struct qede_rx_queue *rxq,
774 struct sw_rx_data *bd, u16 len, u16 pad)
775{
776 struct sk_buff *skb = NULL;
777
778 /* For smaller frames still need to allocate skb, memcpy
779 * data and benefit in reusing the page segment instead of
780 * un-mapping it.
781 */
782 if ((len + pad <= edev->rx_copybreak)) {
783 unsigned int offset = bd->page_offset + pad;
784
785 skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
786 if (unlikely(!skb))
787 return NULL;
788
789 skb_reserve(skb, pad);
790 memcpy(skb_put(skb, len),
791 page_address(bd->data) + offset, len);
792 qede_reuse_page(rxq, bd);
793 goto out;
794 }
795
796 skb = qede_build_skb(rxq, bd, len, pad);
797
798 if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
799 /* Incr page ref count to reuse on allocation failure so
800 * that it doesn't get freed while freeing SKB [as its
801 * already mapped there].
802 */
803 page_ref_inc(bd->data);
804 dev_kfree_skb_any(skb);
805 return NULL;
806 }
807out:
808 /* We've consumed the first BD and prepared an SKB */
809 qede_rx_bd_ring_consume(rxq);
810
811 return skb;
812}
813
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200814static void qede_tpa_start(struct qede_dev *edev,
815 struct qede_rx_queue *rxq,
816 struct eth_fast_path_rx_tpa_start_cqe *cqe)
817{
818 struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200819 struct sw_rx_data *sw_rx_data_cons;
Manish Chopra8a863392018-05-17 12:05:00 -0700820 u16 pad;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200821
822 sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
Manish Chopra8a863392018-05-17 12:05:00 -0700823 pad = cqe->placement_offset + rxq->rx_headroom;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200824
Manish Chopra8a863392018-05-17 12:05:00 -0700825 tpa_info->skb = qede_tpa_rx_build_skb(edev, rxq, sw_rx_data_cons,
826 le16_to_cpu(cqe->len_on_first_bd),
827 pad, false);
828 tpa_info->buffer.page_offset = sw_rx_data_cons->page_offset;
829 tpa_info->buffer.mapping = sw_rx_data_cons->mapping;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200830
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200831 if (unlikely(!tpa_info->skb)) {
832 DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
Manish Chopra8a863392018-05-17 12:05:00 -0700833
834 /* Consume from ring but do not produce since
835 * this might be used by FW still, it will be re-used
836 * at TPA end.
837 */
838 tpa_info->tpa_start_fail = true;
839 qede_rx_bd_ring_consume(rxq);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200840 tpa_info->state = QEDE_AGG_STATE_ERROR;
841 goto cons_buf;
842 }
843
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200844 tpa_info->frag_id = 0;
845 tpa_info->state = QEDE_AGG_STATE_START;
846
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200847 if ((le16_to_cpu(cqe->pars_flags.flags) >>
848 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
849 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
850 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
851 else
852 tpa_info->vlan_tag = 0;
853
854 qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash);
855
856 /* This is needed in order to enable forwarding support */
857 qede_set_gro_params(edev, tpa_info->skb, cqe);
858
859cons_buf: /* We still need to handle bd_len_list to consume buffers */
860 if (likely(cqe->ext_bd_len_list[0]))
861 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
862 le16_to_cpu(cqe->ext_bd_len_list[0]));
863
864 if (unlikely(cqe->ext_bd_len_list[1])) {
865 DP_ERR(edev,
866 "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
867 tpa_info->state = QEDE_AGG_STATE_ERROR;
868 }
869}
870
871#ifdef CONFIG_INET
872static void qede_gro_ip_csum(struct sk_buff *skb)
873{
874 const struct iphdr *iph = ip_hdr(skb);
875 struct tcphdr *th;
876
877 skb_set_transport_header(skb, sizeof(struct iphdr));
878 th = tcp_hdr(skb);
879
880 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
881 iph->saddr, iph->daddr, 0);
882
883 tcp_gro_complete(skb);
884}
885
886static void qede_gro_ipv6_csum(struct sk_buff *skb)
887{
888 struct ipv6hdr *iph = ipv6_hdr(skb);
889 struct tcphdr *th;
890
891 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
892 th = tcp_hdr(skb);
893
894 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
895 &iph->saddr, &iph->daddr, 0);
896 tcp_gro_complete(skb);
897}
898#endif
899
900static void qede_gro_receive(struct qede_dev *edev,
901 struct qede_fastpath *fp,
902 struct sk_buff *skb,
903 u16 vlan_tag)
904{
905 /* FW can send a single MTU sized packet from gro flow
906 * due to aggregation timeout/last segment etc. which
907 * is not expected to be a gro packet. If a skb has zero
908 * frags then simply push it in the stack as non gso skb.
909 */
910 if (unlikely(!skb->data_len)) {
911 skb_shinfo(skb)->gso_type = 0;
912 skb_shinfo(skb)->gso_size = 0;
913 goto send_skb;
914 }
915
916#ifdef CONFIG_INET
917 if (skb_shinfo(skb)->gso_size) {
918 skb_reset_network_header(skb);
919
920 switch (skb->protocol) {
921 case htons(ETH_P_IP):
922 qede_gro_ip_csum(skb);
923 break;
924 case htons(ETH_P_IPV6):
925 qede_gro_ipv6_csum(skb);
926 break;
927 default:
928 DP_ERR(edev,
929 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
930 ntohs(skb->protocol));
931 }
932 }
933#endif
934
935send_skb:
936 skb_record_rx_queue(skb, fp->rxq->rxq_id);
937 qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag);
938}
939
940static inline void qede_tpa_cont(struct qede_dev *edev,
941 struct qede_rx_queue *rxq,
942 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
943{
944 int i;
945
946 for (i = 0; cqe->len_list[i]; i++)
947 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
948 le16_to_cpu(cqe->len_list[i]));
949
950 if (unlikely(i > 1))
951 DP_ERR(edev,
952 "Strange - TPA cont with more than a single len_list entry\n");
953}
954
Mintz, Yuval10a01762017-04-07 11:04:57 +0300955static int qede_tpa_end(struct qede_dev *edev,
956 struct qede_fastpath *fp,
957 struct eth_fast_path_rx_tpa_end_cqe *cqe)
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200958{
959 struct qede_rx_queue *rxq = fp->rxq;
960 struct qede_agg_info *tpa_info;
961 struct sk_buff *skb;
962 int i;
963
964 tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
965 skb = tpa_info->skb;
966
Manish Chopra8a863392018-05-17 12:05:00 -0700967 if (tpa_info->buffer.page_offset == PAGE_SIZE)
968 dma_unmap_page(rxq->dev, tpa_info->buffer.mapping,
969 PAGE_SIZE, rxq->data_direction);
970
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200971 for (i = 0; cqe->len_list[i]; i++)
972 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
973 le16_to_cpu(cqe->len_list[i]));
974 if (unlikely(i > 1))
975 DP_ERR(edev,
976 "Strange - TPA emd with more than a single len_list entry\n");
977
978 if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
979 goto err;
980
981 /* Sanity */
982 if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
983 DP_ERR(edev,
984 "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
985 cqe->num_of_bds, tpa_info->frag_id);
986 if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
987 DP_ERR(edev,
988 "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
989 le16_to_cpu(cqe->total_packet_len), skb->len);
990
Mintz, Yuvalcdda9262017-01-01 13:57:01 +0200991 /* Finalize the SKB */
992 skb->protocol = eth_type_trans(skb, edev->ndev);
993 skb->ip_summed = CHECKSUM_UNNECESSARY;
994
995 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
996 * to skb_shinfo(skb)->gso_segs
997 */
998 NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
999
1000 qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
1001
1002 tpa_info->state = QEDE_AGG_STATE_NONE;
1003
Mintz, Yuval10a01762017-04-07 11:04:57 +03001004 return 1;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001005err:
1006 tpa_info->state = QEDE_AGG_STATE_NONE;
Manish Chopra8a863392018-05-17 12:05:00 -07001007
1008 if (tpa_info->tpa_start_fail) {
1009 qede_reuse_page(rxq, &tpa_info->buffer);
1010 tpa_info->tpa_start_fail = false;
1011 }
1012
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001013 dev_kfree_skb_any(tpa_info->skb);
1014 tpa_info->skb = NULL;
Mintz, Yuval10a01762017-04-07 11:04:57 +03001015 return 0;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001016}
1017
1018static u8 qede_check_notunn_csum(u16 flag)
1019{
1020 u16 csum_flag = 0;
1021 u8 csum = 0;
1022
1023 if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
1024 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
1025 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
1026 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
1027 csum = QEDE_CSUM_UNNECESSARY;
1028 }
1029
1030 csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
1031 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
1032
1033 if (csum_flag & flag)
1034 return QEDE_CSUM_ERROR;
1035
1036 return csum;
1037}
1038
1039static u8 qede_check_csum(u16 flag)
1040{
1041 if (!qede_tunn_exist(flag))
1042 return qede_check_notunn_csum(flag);
1043 else
1044 return qede_check_tunn_csum(flag);
1045}
1046
1047static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
1048 u16 flag)
1049{
1050 u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
1051
1052 if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
1053 ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
1054 (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
1055 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
1056 return true;
1057
1058 return false;
1059}
1060
1061/* Return true iff packet is to be passed to stack */
1062static bool qede_rx_xdp(struct qede_dev *edev,
1063 struct qede_fastpath *fp,
1064 struct qede_rx_queue *rxq,
1065 struct bpf_prog *prog,
1066 struct sw_rx_data *bd,
Mintz, Yuval15ed8a42017-04-07 11:05:00 +03001067 struct eth_fast_path_rx_reg_cqe *cqe,
Mintz, Yuval059eeb02017-04-07 11:05:01 +03001068 u16 *data_offset, u16 *len)
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001069{
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001070 struct xdp_buff xdp;
1071 enum xdp_action act;
1072
Mintz, Yuval059eeb02017-04-07 11:05:01 +03001073 xdp.data_hard_start = page_address(bd->data);
1074 xdp.data = xdp.data_hard_start + *data_offset;
Daniel Borkmannde8f3a82017-09-25 02:25:51 +02001075 xdp_set_data_meta_invalid(&xdp);
Mintz, Yuval059eeb02017-04-07 11:05:01 +03001076 xdp.data_end = xdp.data + *len;
Jesper Dangaard Brouerc0124f32018-01-03 11:25:34 +01001077 xdp.rxq = &rxq->xdp_rxq;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001078
1079 /* Queues always have a full reset currently, so for the time
1080 * being until there's atomic program replace just mark read
1081 * side for map helpers.
1082 */
1083 rcu_read_lock();
1084 act = bpf_prog_run_xdp(prog, &xdp);
1085 rcu_read_unlock();
1086
Mintz, Yuval059eeb02017-04-07 11:05:01 +03001087 /* Recalculate, as XDP might have changed the headers */
1088 *data_offset = xdp.data - xdp.data_hard_start;
1089 *len = xdp.data_end - xdp.data;
1090
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001091 if (act == XDP_PASS)
1092 return true;
1093
1094 /* Count number of packets not to be passed to stack */
1095 rxq->xdp_no_pass++;
1096
1097 switch (act) {
1098 case XDP_TX:
1099 /* We need the replacement buffer before transmit. */
Mintz, Yuvale3eef7e2017-01-01 13:57:04 +02001100 if (qede_alloc_rx_buffer(rxq, true)) {
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001101 qede_recycle_rx_bd_ring(rxq, 1);
Daniel Borkmanna67edbf2017-01-25 02:28:18 +01001102 trace_xdp_exception(edev->ndev, prog, act);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001103 return false;
1104 }
1105
1106 /* Now if there's a transmission problem, we'd still have to
1107 * throw current buffer, as replacement was already allocated.
1108 */
Mintz, Yuval059eeb02017-04-07 11:05:01 +03001109 if (qede_xdp_xmit(edev, fp, bd, *data_offset, *len)) {
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001110 dma_unmap_page(rxq->dev, bd->mapping,
1111 PAGE_SIZE, DMA_BIDIRECTIONAL);
1112 __free_page(bd->data);
Daniel Borkmanna67edbf2017-01-25 02:28:18 +01001113 trace_xdp_exception(edev->ndev, prog, act);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001114 }
1115
1116 /* Regardless, we've consumed an Rx BD */
1117 qede_rx_bd_ring_consume(rxq);
1118 return false;
1119
1120 default:
1121 bpf_warn_invalid_xdp_action(act);
1122 case XDP_ABORTED:
Daniel Borkmanna67edbf2017-01-25 02:28:18 +01001123 trace_xdp_exception(edev->ndev, prog, act);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001124 case XDP_DROP:
1125 qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
1126 }
1127
1128 return false;
1129}
1130
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001131static int qede_rx_build_jumbo(struct qede_dev *edev,
1132 struct qede_rx_queue *rxq,
1133 struct sk_buff *skb,
1134 struct eth_fast_path_rx_reg_cqe *cqe,
1135 u16 first_bd_len)
1136{
1137 u16 pkt_len = le16_to_cpu(cqe->pkt_len);
1138 struct sw_rx_data *bd;
1139 u16 bd_cons_idx;
1140 u8 num_frags;
1141
1142 pkt_len -= first_bd_len;
1143
1144 /* We've already used one BD for the SKB. Now take care of the rest */
1145 for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) {
1146 u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
1147 pkt_len;
1148
1149 if (unlikely(!cur_size)) {
1150 DP_ERR(edev,
1151 "Still got %d BDs for mapping jumbo, but length became 0\n",
1152 num_frags);
1153 goto out;
1154 }
1155
1156 /* We need a replacement buffer for each BD */
Mintz, Yuvale3eef7e2017-01-01 13:57:04 +02001157 if (unlikely(qede_alloc_rx_buffer(rxq, true)))
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001158 goto out;
1159
1160 /* Now that we've allocated the replacement buffer,
1161 * we can safely consume the next BD and map it to the SKB.
1162 */
1163 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1164 bd = &rxq->sw_rx_ring[bd_cons_idx];
1165 qede_rx_bd_ring_consume(rxq);
1166
1167 dma_unmap_page(rxq->dev, bd->mapping,
1168 PAGE_SIZE, DMA_FROM_DEVICE);
1169
1170 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
Manish Chopra8a863392018-05-17 12:05:00 -07001171 bd->data, rxq->rx_headroom, cur_size);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001172
1173 skb->truesize += PAGE_SIZE;
1174 skb->data_len += cur_size;
1175 skb->len += cur_size;
1176 pkt_len -= cur_size;
1177 }
1178
1179 if (unlikely(pkt_len))
1180 DP_ERR(edev,
1181 "Mapped all BDs of jumbo, but still have %d bytes\n",
1182 pkt_len);
1183
1184out:
1185 return num_frags;
1186}
1187
1188static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
1189 struct qede_fastpath *fp,
1190 struct qede_rx_queue *rxq,
1191 union eth_rx_cqe *cqe,
1192 enum eth_rx_cqe_type type)
1193{
1194 switch (type) {
1195 case ETH_RX_CQE_TYPE_TPA_START:
1196 qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start);
1197 return 0;
1198 case ETH_RX_CQE_TYPE_TPA_CONT:
1199 qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
1200 return 0;
1201 case ETH_RX_CQE_TYPE_TPA_END:
Mintz, Yuval10a01762017-04-07 11:04:57 +03001202 return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001203 default:
1204 return 0;
1205 }
1206}
1207
1208static int qede_rx_process_cqe(struct qede_dev *edev,
1209 struct qede_fastpath *fp,
1210 struct qede_rx_queue *rxq)
1211{
1212 struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog);
1213 struct eth_fast_path_rx_reg_cqe *fp_cqe;
1214 u16 len, pad, bd_cons_idx, parse_flag;
1215 enum eth_rx_cqe_type cqe_type;
1216 union eth_rx_cqe *cqe;
1217 struct sw_rx_data *bd;
1218 struct sk_buff *skb;
1219 __le16 flags;
1220 u8 csum_flag;
1221
1222 /* Get the CQE from the completion ring */
1223 cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
1224 cqe_type = cqe->fast_path_regular.type;
1225
1226 /* Process an unlikely slowpath event */
1227 if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
1228 struct eth_slow_path_rx_cqe *sp_cqe;
1229
1230 sp_cqe = (struct eth_slow_path_rx_cqe *)cqe;
1231 edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe);
1232 return 0;
1233 }
1234
1235 /* Handle TPA cqes */
1236 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR)
1237 return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type);
1238
1239 /* Get the data from the SW ring; Consume it only after it's evident
1240 * we wouldn't recycle it.
1241 */
1242 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1243 bd = &rxq->sw_rx_ring[bd_cons_idx];
1244
1245 fp_cqe = &cqe->fast_path_regular;
1246 len = le16_to_cpu(fp_cqe->len_on_first_bd);
Mintz, Yuval15ed8a42017-04-07 11:05:00 +03001247 pad = fp_cqe->placement_offset + rxq->rx_headroom;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001248
1249 /* Run eBPF program if one is attached */
1250 if (xdp_prog)
Mintz, Yuval059eeb02017-04-07 11:05:01 +03001251 if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe,
1252 &pad, &len))
Mintz, Yuval10a01762017-04-07 11:04:57 +03001253 return 0;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001254
1255 /* If this is an error packet then drop it */
1256 flags = cqe->fast_path_regular.pars_flags.flags;
1257 parse_flag = le16_to_cpu(flags);
1258
1259 csum_flag = qede_check_csum(parse_flag);
1260 if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
Manish Chopra58f101b2018-03-28 03:35:52 -07001261 if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag))
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001262 rxq->rx_ip_frags++;
Manish Chopra58f101b2018-03-28 03:35:52 -07001263 else
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001264 rxq->rx_hw_errors++;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001265 }
1266
1267 /* Basic validation passed; Need to prepare an SKB. This would also
1268 * guarantee to finally consume the first BD upon success.
1269 */
Manish Chopra8a863392018-05-17 12:05:00 -07001270 skb = qede_rx_build_skb(edev, rxq, bd, len, pad);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001271 if (!skb) {
1272 rxq->rx_alloc_errors++;
1273 qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
1274 return 0;
1275 }
1276
1277 /* In case of Jumbo packet, several PAGE_SIZEd buffers will be pointed
1278 * by a single cqe.
1279 */
1280 if (fp_cqe->bd_num > 1) {
1281 u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb,
1282 fp_cqe, len);
1283
1284 if (unlikely(unmapped_frags > 0)) {
1285 qede_recycle_rx_bd_ring(rxq, unmapped_frags);
1286 dev_kfree_skb_any(skb);
1287 return 0;
1288 }
1289 }
1290
1291 /* The SKB contains all the data. Now prepare meta-magic */
1292 skb->protocol = eth_type_trans(skb, edev->ndev);
1293 qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash);
1294 qede_set_skb_csum(skb, csum_flag);
1295 skb_record_rx_queue(skb, rxq->rxq_id);
Sudarsana Reddy Kalluru4c552152017-02-15 10:24:11 +02001296 qede_ptp_record_rx_ts(edev, cqe, skb);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001297
1298 /* SKB is prepared - pass it to stack */
1299 qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag));
1300
1301 return 1;
1302}
1303
1304static int qede_rx_int(struct qede_fastpath *fp, int budget)
1305{
1306 struct qede_rx_queue *rxq = fp->rxq;
1307 struct qede_dev *edev = fp->edev;
Mintz, Yuval10a01762017-04-07 11:04:57 +03001308 int work_done = 0, rcv_pkts = 0;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001309 u16 hw_comp_cons, sw_comp_cons;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001310
1311 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
1312 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1313
1314 /* Memory barrier to prevent the CPU from doing speculative reads of CQE
1315 * / BD in the while-loop before reading hw_comp_cons. If the CQE is
1316 * read before it is written by FW, then FW writes CQE and SB, and then
1317 * the CPU reads the hw_comp_cons, it will use an old CQE.
1318 */
1319 rmb();
1320
1321 /* Loop to complete all indicated BDs */
1322 while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
Mintz, Yuval10a01762017-04-07 11:04:57 +03001323 rcv_pkts += qede_rx_process_cqe(edev, fp, rxq);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001324 qed_chain_recycle_consumed(&rxq->rx_comp_ring);
1325 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1326 work_done++;
1327 }
1328
Mintz, Yuval10a01762017-04-07 11:04:57 +03001329 rxq->rcv_pkts += rcv_pkts;
1330
Mintz, Yuvale3eef7e2017-01-01 13:57:04 +02001331 /* Allocate replacement buffers */
1332 while (rxq->num_rx_buffers - rxq->filled_buffers)
1333 if (qede_alloc_rx_buffer(rxq, false))
1334 break;
1335
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001336 /* Update producers */
1337 qede_update_rx_prod(edev, rxq);
1338
1339 return work_done;
1340}
1341
1342static bool qede_poll_is_more_work(struct qede_fastpath *fp)
1343{
1344 qed_sb_update_sb_idx(fp->sb_info);
1345
1346 /* *_has_*_work() reads the status block, thus we need to ensure that
1347 * status block indices have been actually read (qed_sb_update_sb_idx)
1348 * prior to this check (*_has_*_work) so that we won't write the
1349 * "newer" value of the status block to HW (if there was a DMA right
1350 * after qede_has_rx_work and if there is no rmb, the memory reading
1351 * (qed_sb_update_sb_idx) may be postponed to right before *_ack_sb).
1352 * In this case there will never be another interrupt until there is
1353 * another update of the status block, while there is still unhandled
1354 * work.
1355 */
1356 rmb();
1357
1358 if (likely(fp->type & QEDE_FASTPATH_RX))
1359 if (qede_has_rx_work(fp->rxq))
1360 return true;
1361
1362 if (fp->type & QEDE_FASTPATH_XDP)
1363 if (qede_txq_has_work(fp->xdp_tx))
1364 return true;
1365
1366 if (likely(fp->type & QEDE_FASTPATH_TX))
1367 if (qede_txq_has_work(fp->txq))
1368 return true;
1369
1370 return false;
1371}
1372
1373/*********************
1374 * NDO & API related *
1375 *********************/
1376int qede_poll(struct napi_struct *napi, int budget)
1377{
1378 struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
1379 napi);
1380 struct qede_dev *edev = fp->edev;
1381 int rx_work_done = 0;
1382
1383 if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq))
1384 qede_tx_int(edev, fp->txq);
1385
1386 if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx))
1387 qede_xdp_tx_int(edev, fp->xdp_tx);
1388
1389 rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
1390 qede_has_rx_work(fp->rxq)) ?
1391 qede_rx_int(fp, budget) : 0;
1392 if (rx_work_done < budget) {
1393 if (!qede_poll_is_more_work(fp)) {
Eric Dumazet6ad20162017-01-30 08:22:01 -08001394 napi_complete_done(napi, rx_work_done);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001395
1396 /* Update and reenable interrupts */
1397 qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
1398 } else {
1399 rx_work_done = budget;
1400 }
1401 }
1402
1403 if (fp->xdp_xmit) {
1404 u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);
1405
1406 fp->xdp_xmit = 0;
1407 fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
1408 qede_update_tx_producer(fp->xdp_tx);
1409 }
1410
1411 return rx_work_done;
1412}
1413
1414irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
1415{
1416 struct qede_fastpath *fp = fp_cookie;
1417
1418 qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
1419
1420 napi_schedule_irqoff(&fp->napi);
1421 return IRQ_HANDLED;
1422}
1423
1424/* Main transmit function */
1425netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1426{
1427 struct qede_dev *edev = netdev_priv(ndev);
1428 struct netdev_queue *netdev_txq;
1429 struct qede_tx_queue *txq;
1430 struct eth_tx_1st_bd *first_bd;
1431 struct eth_tx_2nd_bd *second_bd = NULL;
1432 struct eth_tx_3rd_bd *third_bd = NULL;
1433 struct eth_tx_bd *tx_data_bd = NULL;
Manish Chopra48848a02017-05-23 09:41:18 +03001434 u16 txq_index, val = 0;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001435 u8 nbd = 0;
1436 dma_addr_t mapping;
1437 int rc, frag_idx = 0, ipv6_ext = 0;
1438 u8 xmit_type;
1439 u16 idx;
1440 u16 hlen;
1441 bool data_split = false;
1442
1443 /* Get tx-queue context and netdev index */
1444 txq_index = skb_get_queue_mapping(skb);
1445 WARN_ON(txq_index >= QEDE_TSS_COUNT(edev));
1446 txq = edev->fp_array[edev->fp_num_rx + txq_index].txq;
1447 netdev_txq = netdev_get_tx_queue(ndev, txq_index);
1448
1449 WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
1450
1451 xmit_type = qede_xmit_type(skb, &ipv6_ext);
1452
1453#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
1454 if (qede_pkt_req_lin(skb, xmit_type)) {
1455 if (skb_linearize(skb)) {
1456 DP_NOTICE(edev,
1457 "SKB linearization failed - silently dropping this SKB\n");
1458 dev_kfree_skb_any(skb);
1459 return NETDEV_TX_OK;
1460 }
1461 }
1462#endif
1463
1464 /* Fill the entry in the SW ring and the BDs in the FW ring */
Sudarsana Reddy Kalluru5a052d62017-05-21 12:10:53 +03001465 idx = txq->sw_tx_prod;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001466 txq->sw_tx_ring.skbs[idx].skb = skb;
1467 first_bd = (struct eth_tx_1st_bd *)
1468 qed_chain_produce(&txq->tx_pbl);
1469 memset(first_bd, 0, sizeof(*first_bd));
1470 first_bd->data.bd_flags.bitfields =
1471 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
1472
Sudarsana Reddy Kalluru4c552152017-02-15 10:24:11 +02001473 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1474 qede_ptp_tx_ts(edev, skb);
1475
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001476 /* Map skb linear data for DMA and set in the first BD */
1477 mapping = dma_map_single(txq->dev, skb->data,
1478 skb_headlen(skb), DMA_TO_DEVICE);
1479 if (unlikely(dma_mapping_error(txq->dev, mapping))) {
1480 DP_NOTICE(edev, "SKB mapping failed\n");
1481 qede_free_failed_tx_pkt(txq, first_bd, 0, false);
1482 qede_update_tx_producer(txq);
1483 return NETDEV_TX_OK;
1484 }
1485 nbd++;
1486 BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
1487
1488 /* In case there is IPv6 with extension headers or LSO we need 2nd and
1489 * 3rd BDs.
1490 */
1491 if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
1492 second_bd = (struct eth_tx_2nd_bd *)
1493 qed_chain_produce(&txq->tx_pbl);
1494 memset(second_bd, 0, sizeof(*second_bd));
1495
1496 nbd++;
1497 third_bd = (struct eth_tx_3rd_bd *)
1498 qed_chain_produce(&txq->tx_pbl);
1499 memset(third_bd, 0, sizeof(*third_bd));
1500
1501 nbd++;
1502 /* We need to fill in additional data in second_bd... */
1503 tx_data_bd = (struct eth_tx_bd *)second_bd;
1504 }
1505
1506 if (skb_vlan_tag_present(skb)) {
1507 first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
1508 first_bd->data.bd_flags.bitfields |=
1509 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
1510 }
1511
1512 /* Fill the parsing flags & params according to the requested offload */
1513 if (xmit_type & XMIT_L4_CSUM) {
1514 /* We don't re-calculate IP checksum as it is already done by
1515 * the upper stack
1516 */
1517 first_bd->data.bd_flags.bitfields |=
1518 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
1519
1520 if (xmit_type & XMIT_ENC) {
1521 first_bd->data.bd_flags.bitfields |=
1522 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
Manish Chopra48848a02017-05-23 09:41:18 +03001523
1524 val |= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001525 }
1526
1527 /* Legacy FW had flipped behavior in regard to this bit -
1528 * I.e., needed to set to prevent FW from touching encapsulated
1529 * packets when it didn't need to.
1530 */
1531 if (unlikely(txq->is_legacy))
Manish Chopra48848a02017-05-23 09:41:18 +03001532 val ^= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001533
1534 /* If the packet is IPv6 with extension header, indicate that
1535 * to FW and pass few params, since the device cracker doesn't
1536 * support parsing IPv6 with extension header/s.
1537 */
1538 if (unlikely(ipv6_ext))
1539 qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
1540 }
1541
1542 if (xmit_type & XMIT_LSO) {
1543 first_bd->data.bd_flags.bitfields |=
1544 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
1545 third_bd->data.lso_mss =
1546 cpu_to_le16(skb_shinfo(skb)->gso_size);
1547
1548 if (unlikely(xmit_type & XMIT_ENC)) {
1549 first_bd->data.bd_flags.bitfields |=
1550 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
1551
1552 if (xmit_type & XMIT_ENC_GSO_L4_CSUM) {
1553 u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
1554
1555 first_bd->data.bd_flags.bitfields |= 1 << tmp;
1556 }
1557 hlen = qede_get_skb_hlen(skb, true);
1558 } else {
1559 first_bd->data.bd_flags.bitfields |=
1560 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1561 hlen = qede_get_skb_hlen(skb, false);
1562 }
1563
1564 /* @@@TBD - if will not be removed need to check */
1565 third_bd->data.bitfields |=
1566 cpu_to_le16(1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
1567
1568 /* Make life easier for FW guys who can't deal with header and
1569 * data on same BD. If we need to split, use the second bd...
1570 */
1571 if (unlikely(skb_headlen(skb) > hlen)) {
1572 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1573 "TSO split header size is %d (%x:%x)\n",
1574 first_bd->nbytes, first_bd->addr.hi,
1575 first_bd->addr.lo);
1576
1577 mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
1578 le32_to_cpu(first_bd->addr.lo)) +
1579 hlen;
1580
1581 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
1582 le16_to_cpu(first_bd->nbytes) -
1583 hlen);
1584
1585 /* this marks the BD as one that has no
1586 * individual mapping
1587 */
1588 txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD;
1589
1590 first_bd->nbytes = cpu_to_le16(hlen);
1591
1592 tx_data_bd = (struct eth_tx_bd *)third_bd;
1593 data_split = true;
1594 }
1595 } else {
Manish Chopra48848a02017-05-23 09:41:18 +03001596 val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
1597 ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001598 }
1599
Manish Chopra48848a02017-05-23 09:41:18 +03001600 first_bd->data.bitfields = cpu_to_le16(val);
1601
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001602 /* Handle fragmented skb */
1603 /* special handle for frags inside 2nd and 3rd bds.. */
1604 while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
1605 rc = map_frag_to_bd(txq,
1606 &skb_shinfo(skb)->frags[frag_idx],
1607 tx_data_bd);
1608 if (rc) {
1609 qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
1610 qede_update_tx_producer(txq);
1611 return NETDEV_TX_OK;
1612 }
1613
1614 if (tx_data_bd == (struct eth_tx_bd *)second_bd)
1615 tx_data_bd = (struct eth_tx_bd *)third_bd;
1616 else
1617 tx_data_bd = NULL;
1618
1619 frag_idx++;
1620 }
1621
1622 /* map last frags into 4th, 5th .... */
1623 for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
1624 tx_data_bd = (struct eth_tx_bd *)
1625 qed_chain_produce(&txq->tx_pbl);
1626
1627 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
1628
1629 rc = map_frag_to_bd(txq,
1630 &skb_shinfo(skb)->frags[frag_idx],
1631 tx_data_bd);
1632 if (rc) {
1633 qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
1634 qede_update_tx_producer(txq);
1635 return NETDEV_TX_OK;
1636 }
1637 }
1638
1639 /* update the first BD with the actual num BDs */
1640 first_bd->data.nbds = nbd;
1641
1642 netdev_tx_sent_queue(netdev_txq, skb->len);
1643
1644 skb_tx_timestamp(skb);
1645
1646 /* Advance packet producer only before sending the packet since mapping
1647 * of pages may fail.
1648 */
Sudarsana Reddy Kalluru5a052d62017-05-21 12:10:53 +03001649 txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001650
1651 /* 'next page' entries are counted in the producer value */
1652 txq->tx_db.data.bd_prod =
1653 cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
1654
1655 if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
1656 qede_update_tx_producer(txq);
1657
1658 if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
1659 < (MAX_SKB_FRAGS + 1))) {
1660 if (skb->xmit_more)
1661 qede_update_tx_producer(txq);
1662
1663 netif_tx_stop_queue(netdev_txq);
1664 txq->stopped_cnt++;
1665 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1666 "Stop queue was called\n");
1667 /* paired memory barrier is in qede_tx_int(), we have to keep
1668 * ordering of set_bit() in netif_tx_stop_queue() and read of
1669 * fp->bd_tx_cons
1670 */
1671 smp_mb();
1672
1673 if ((qed_chain_get_elem_left(&txq->tx_pbl) >=
1674 (MAX_SKB_FRAGS + 1)) &&
1675 (edev->state == QEDE_STATE_OPEN)) {
1676 netif_tx_wake_queue(netdev_txq);
1677 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1678 "Wake queue was called\n");
1679 }
1680 }
1681
1682 return NETDEV_TX_OK;
1683}
1684
1685/* 8B udp header + 8B base tunnel header + 32B option length */
1686#define QEDE_MAX_TUN_HDR_LEN 48
1687
1688netdev_features_t qede_features_check(struct sk_buff *skb,
1689 struct net_device *dev,
1690 netdev_features_t features)
1691{
1692 if (skb->encapsulation) {
1693 u8 l4_proto = 0;
1694
1695 switch (vlan_get_protocol(skb)) {
1696 case htons(ETH_P_IP):
1697 l4_proto = ip_hdr(skb)->protocol;
1698 break;
1699 case htons(ETH_P_IPV6):
1700 l4_proto = ipv6_hdr(skb)->nexthdr;
1701 break;
1702 default:
1703 return features;
1704 }
1705
1706 /* Disable offloads for geneve tunnels, as HW can't parse
Chopra, Manish369bfd42017-04-24 10:00:46 -07001707 * the geneve header which has option length greater than 32b
1708 * and disable offloads for the ports which are not offloaded.
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001709 */
Chopra, Manish369bfd42017-04-24 10:00:46 -07001710 if (l4_proto == IPPROTO_UDP) {
1711 struct qede_dev *edev = netdev_priv(dev);
1712 u16 hdrlen, vxln_port, gnv_port;
1713
1714 hdrlen = QEDE_MAX_TUN_HDR_LEN;
1715 vxln_port = edev->vxlan_dst_port;
1716 gnv_port = edev->geneve_dst_port;
1717
1718 if ((skb_inner_mac_header(skb) -
1719 skb_transport_header(skb)) > hdrlen ||
1720 (ntohs(udp_hdr(skb)->dest) != vxln_port &&
1721 ntohs(udp_hdr(skb)->dest) != gnv_port))
1722 return features & ~(NETIF_F_CSUM_MASK |
1723 NETIF_F_GSO_MASK);
1724 }
Mintz, Yuvalcdda9262017-01-01 13:57:01 +02001725 }
1726
1727 return features;
1728}