blob: 46b71a6688927ef992a1d4ad809e03e0290288d9 [file] [log] [blame]
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001/* QLogic qed NIC Driver
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02002 * Copyright (c) 2015-2017 QLogic Corporation
Yuval Mintz0a7fb112016-10-01 21:59:55 +03003 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
Yuval Mintz0a7fb112016-10-01 21:59:55 +03009 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +020010 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Yuval Mintz0a7fb112016-10-01 21:59:55 +030031 */
32
33#include <linux/types.h>
34#include <asm/byteorder.h>
35#include <linux/dma-mapping.h>
36#include <linux/if_vlan.h>
37#include <linux/kernel.h>
38#include <linux/pci.h>
39#include <linux/slab.h>
40#include <linux/stddef.h>
Yuval Mintz0a7fb112016-10-01 21:59:55 +030041#include <linux/workqueue.h>
42#include <net/ipv6.h>
43#include <linux/bitops.h>
44#include <linux/delay.h>
45#include <linux/errno.h>
46#include <linux/etherdevice.h>
47#include <linux/io.h>
48#include <linux/list.h>
49#include <linux/mutex.h>
50#include <linux/spinlock.h>
51#include <linux/string.h>
52#include <linux/qed/qed_ll2_if.h>
53#include "qed.h"
54#include "qed_cxt.h"
55#include "qed_dev_api.h"
56#include "qed_hsi.h"
57#include "qed_hw.h"
58#include "qed_int.h"
59#include "qed_ll2.h"
60#include "qed_mcp.h"
Yuval Mintz1d6cff42016-12-01 00:21:07 -080061#include "qed_ooo.h"
Yuval Mintz0a7fb112016-10-01 21:59:55 +030062#include "qed_reg_addr.h"
63#include "qed_sp.h"
Yuval Mintz0189efb2016-10-13 22:57:02 +030064#include "qed_roce.h"
Yuval Mintz0a7fb112016-10-01 21:59:55 +030065
66#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
67#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
68
69#define QED_LL2_TX_SIZE (256)
70#define QED_LL2_RX_SIZE (4096)
71
72struct qed_cb_ll2_info {
73 int rx_cnt;
74 u32 rx_size;
75 u8 handle;
76 bool frags_mapped;
77
78 /* Lock protecting LL2 buffer lists in sleepless context */
79 spinlock_t lock;
80 struct list_head list;
81
82 const struct qed_ll2_cb_ops *cbs;
83 void *cb_cookie;
84};
85
86struct qed_ll2_buffer {
87 struct list_head list;
88 void *data;
89 dma_addr_t phys_addr;
90};
91
92static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn,
93 u8 connection_handle,
94 void *cookie,
95 dma_addr_t first_frag_addr,
96 bool b_last_fragment,
97 bool b_last_packet)
98{
99 struct qed_dev *cdev = p_hwfn->cdev;
100 struct sk_buff *skb = cookie;
101
102 /* All we need to do is release the mapping */
103 dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
104 skb_headlen(skb), DMA_TO_DEVICE);
105
106 if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
107 cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
108 b_last_fragment);
109
110 if (cdev->ll2->frags_mapped)
111 /* Case where mapped frags were received, need to
112 * free skb with nr_frags marked as 0
113 */
114 skb_shinfo(skb)->nr_frags = 0;
115
116 dev_kfree_skb_any(skb);
117}
118
119static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
120 u8 **data, dma_addr_t *phys_addr)
121{
122 *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
123 if (!(*data)) {
124 DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
125 return -ENOMEM;
126 }
127
128 *phys_addr = dma_map_single(&cdev->pdev->dev,
129 ((*data) + NET_SKB_PAD),
130 cdev->ll2->rx_size, DMA_FROM_DEVICE);
131 if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
132 DP_INFO(cdev, "Failed to map LL2 buffer data\n");
133 kfree((*data));
134 return -ENOMEM;
135 }
136
137 return 0;
138}
139
140static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
141 struct qed_ll2_buffer *buffer)
142{
143 spin_lock_bh(&cdev->ll2->lock);
144
145 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
146 cdev->ll2->rx_size, DMA_FROM_DEVICE);
147 kfree(buffer->data);
148 list_del(&buffer->list);
149
150 cdev->ll2->rx_cnt--;
151 if (!cdev->ll2->rx_cnt)
152 DP_INFO(cdev, "All LL2 entries were removed\n");
153
154 spin_unlock_bh(&cdev->ll2->lock);
155
156 return 0;
157}
158
159static void qed_ll2_kill_buffers(struct qed_dev *cdev)
160{
161 struct qed_ll2_buffer *buffer, *tmp_buffer;
162
163 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
164 qed_ll2_dealloc_buffer(cdev, buffer);
165}
166
Yuval Mintz8c93bea2016-10-13 22:57:03 +0300167static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
Mintz, Yuval68be9102017-06-09 17:13:19 +0300168 struct qed_ll2_comp_rx_data *data)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300169{
Mintz, Yuval68be9102017-06-09 17:13:19 +0300170 struct qed_ll2_buffer *buffer = data->cookie;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300171 struct qed_dev *cdev = p_hwfn->cdev;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300172 dma_addr_t new_phys_addr;
173 struct sk_buff *skb;
174 bool reuse = false;
175 int rc = -EINVAL;
176 u8 *new_data;
177
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300178 DP_VERBOSE(p_hwfn,
179 (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
180 "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
Mintz, Yuval68be9102017-06-09 17:13:19 +0300181 (u64)data->rx_buf_addr,
182 data->u.placement_offset,
183 data->length.packet_length,
184 data->parse_flags,
185 data->vlan, data->opaque_data_0, data->opaque_data_1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300186
187 if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
188 print_hex_dump(KERN_INFO, "",
189 DUMP_PREFIX_OFFSET, 16, 1,
Mintz, Yuval68be9102017-06-09 17:13:19 +0300190 buffer->data, data->length.packet_length, false);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300191 }
192
193 /* Determine if data is valid */
Mintz, Yuval68be9102017-06-09 17:13:19 +0300194 if (data->length.packet_length < ETH_HLEN)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300195 reuse = true;
196
197 /* Allocate a replacement for buffer; Reuse upon failure */
198 if (!reuse)
199 rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
200 &new_phys_addr);
201
202 /* If need to reuse or there's no replacement buffer, repost this */
203 if (rc)
204 goto out_post;
Mintz, Yuval752ecb22017-03-14 15:26:00 +0200205 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
206 cdev->ll2->rx_size, DMA_FROM_DEVICE);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300207
208 skb = build_skb(buffer->data, 0);
209 if (!skb) {
210 rc = -ENOMEM;
211 goto out_post;
212 }
213
Mintz, Yuval68be9102017-06-09 17:13:19 +0300214 data->u.placement_offset += NET_SKB_PAD;
215 skb_reserve(skb, data->u.placement_offset);
216 skb_put(skb, data->length.packet_length);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300217 skb_checksum_none_assert(skb);
218
219 /* Get parital ethernet information instead of eth_type_trans(),
220 * Since we don't have an associated net_device.
221 */
222 skb_reset_mac_header(skb);
223 skb->protocol = eth_hdr(skb)->h_proto;
224
225 /* Pass SKB onward */
226 if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
Mintz, Yuval68be9102017-06-09 17:13:19 +0300227 if (data->vlan)
228 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
229 data->vlan);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300230 cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
Mintz, Yuval68be9102017-06-09 17:13:19 +0300231 data->opaque_data_0,
232 data->opaque_data_1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300233 }
234
235 /* Update Buffer information and update FW producer */
236 buffer->data = new_data;
237 buffer->phys_addr = new_phys_addr;
238
239out_post:
240 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
241 buffer->phys_addr, 0, buffer, 1);
242
243 if (rc)
244 qed_ll2_dealloc_buffer(cdev, buffer);
245}
246
247static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
248 u8 connection_handle,
249 bool b_lock,
250 bool b_only_active)
251{
252 struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
253
254 if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
255 return NULL;
256
257 if (!p_hwfn->p_ll2_info)
258 return NULL;
259
260 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
261
262 if (b_only_active) {
263 if (b_lock)
264 mutex_lock(&p_ll2_conn->mutex);
265 if (p_ll2_conn->b_active)
266 p_ret = p_ll2_conn;
267 if (b_lock)
268 mutex_unlock(&p_ll2_conn->mutex);
269 } else {
270 p_ret = p_ll2_conn;
271 }
272
273 return p_ret;
274}
275
276static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
277 u8 connection_handle)
278{
279 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
280}
281
282static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
283 u8 connection_handle)
284{
285 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
286}
287
288static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
289 *p_hwfn,
290 u8 connection_handle)
291{
292 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
293}
294
295static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
296{
297 bool b_last_packet = false, b_last_frag = false;
298 struct qed_ll2_tx_packet *p_pkt = NULL;
299 struct qed_ll2_info *p_ll2_conn;
300 struct qed_ll2_tx_queue *p_tx;
Ram Amraniabd49672016-10-01 22:00:01 +0300301 dma_addr_t tx_frag;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300302
303 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
304 if (!p_ll2_conn)
305 return;
306
307 p_tx = &p_ll2_conn->tx_queue;
308
309 while (!list_empty(&p_tx->active_descq)) {
310 p_pkt = list_first_entry(&p_tx->active_descq,
311 struct qed_ll2_tx_packet, list_entry);
312 if (!p_pkt)
313 break;
314
315 list_del(&p_pkt->list_entry);
316 b_last_packet = list_empty(&p_tx->active_descq);
317 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
Arnd Bergmann0629a332017-01-18 15:52:52 +0100318 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800319 struct qed_ooo_buffer *p_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300320
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800321 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
322 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
323 p_buffer);
324 } else {
325 p_tx->cur_completing_packet = *p_pkt;
326 p_tx->cur_completing_bd_idx = 1;
327 b_last_frag =
328 p_tx->cur_completing_bd_idx == p_pkt->bd_used;
329 tx_frag = p_pkt->bds_set[0].tx_frag;
Arnd Bergmann0629a332017-01-18 15:52:52 +0100330 if (p_ll2_conn->conn.gsi_enable)
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800331 qed_ll2b_release_tx_gsi_packet(p_hwfn,
332 p_ll2_conn->
333 my_id,
334 p_pkt->cookie,
335 tx_frag,
336 b_last_frag,
337 b_last_packet);
338 else
339 qed_ll2b_complete_tx_packet(p_hwfn,
340 p_ll2_conn->my_id,
341 p_pkt->cookie,
342 tx_frag,
343 b_last_frag,
344 b_last_packet);
345 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300346 }
347}
348
349static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
350{
351 struct qed_ll2_info *p_ll2_conn = p_cookie;
352 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
353 u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
354 struct qed_ll2_tx_packet *p_pkt;
355 bool b_last_frag = false;
356 unsigned long flags;
Ram Amraniabd49672016-10-01 22:00:01 +0300357 dma_addr_t tx_frag;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300358 int rc = -EINVAL;
359
360 spin_lock_irqsave(&p_tx->lock, flags);
361 if (p_tx->b_completing_packet) {
362 rc = -EBUSY;
363 goto out;
364 }
365
366 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
367 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
368 while (num_bds) {
369 if (list_empty(&p_tx->active_descq))
370 goto out;
371
372 p_pkt = list_first_entry(&p_tx->active_descq,
373 struct qed_ll2_tx_packet, list_entry);
374 if (!p_pkt)
375 goto out;
376
377 p_tx->b_completing_packet = true;
378 p_tx->cur_completing_packet = *p_pkt;
379 num_bds_in_packet = p_pkt->bd_used;
380 list_del(&p_pkt->list_entry);
381
382 if (num_bds < num_bds_in_packet) {
383 DP_NOTICE(p_hwfn,
384 "Rest of BDs does not cover whole packet\n");
385 goto out;
386 }
387
388 num_bds -= num_bds_in_packet;
389 p_tx->bds_idx += num_bds_in_packet;
390 while (num_bds_in_packet--)
391 qed_chain_consume(&p_tx->txq_chain);
392
393 p_tx->cur_completing_bd_idx = 1;
394 b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
395 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
396
397 spin_unlock_irqrestore(&p_tx->lock, flags);
Ram Amraniabd49672016-10-01 22:00:01 +0300398 tx_frag = p_pkt->bds_set[0].tx_frag;
Arnd Bergmann0629a332017-01-18 15:52:52 +0100399 if (p_ll2_conn->conn.gsi_enable)
Ram Amraniabd49672016-10-01 22:00:01 +0300400 qed_ll2b_complete_tx_gsi_packet(p_hwfn,
401 p_ll2_conn->my_id,
402 p_pkt->cookie,
403 tx_frag,
404 b_last_frag, !num_bds);
405 else
406 qed_ll2b_complete_tx_packet(p_hwfn,
407 p_ll2_conn->my_id,
408 p_pkt->cookie,
409 tx_frag,
410 b_last_frag, !num_bds);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300411 spin_lock_irqsave(&p_tx->lock, flags);
412 }
413
414 p_tx->b_completing_packet = false;
415 rc = 0;
416out:
417 spin_unlock_irqrestore(&p_tx->lock, flags);
418 return rc;
419}
420
Ram Amraniabd49672016-10-01 22:00:01 +0300421static int
422qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
423 struct qed_ll2_info *p_ll2_info,
424 union core_rx_cqe_union *p_cqe,
425 unsigned long lock_flags, bool b_last_cqe)
426{
427 struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue;
428 struct qed_ll2_rx_packet *p_pkt = NULL;
429 u16 packet_length, parse_flags, vlan;
430 u32 src_mac_addrhi;
431 u16 src_mac_addrlo;
432
433 if (!list_empty(&p_rx->active_descq))
434 p_pkt = list_first_entry(&p_rx->active_descq,
435 struct qed_ll2_rx_packet, list_entry);
436 if (!p_pkt) {
437 DP_NOTICE(p_hwfn,
438 "GSI Rx completion but active_descq is empty\n");
439 return -EIO;
440 }
441
442 list_del(&p_pkt->list_entry);
443 parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
444 packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
445 vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
446 src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
447 src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
448 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
449 DP_NOTICE(p_hwfn,
450 "Mismatch between active_descq and the LL2 Rx chain\n");
451 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
452
453 spin_unlock_irqrestore(&p_rx->lock, lock_flags);
454 qed_ll2b_complete_rx_gsi_packet(p_hwfn,
455 p_ll2_info->my_id,
456 p_pkt->cookie,
457 p_pkt->rx_buf_addr,
458 packet_length,
459 p_cqe->rx_cqe_gsi.data_length_error,
460 parse_flags,
461 vlan,
462 src_mac_addrhi,
463 src_mac_addrlo, b_last_cqe);
464 spin_lock_irqsave(&p_rx->lock, lock_flags);
465
466 return 0;
467}
468
Mintz, Yuval68be9102017-06-09 17:13:19 +0300469static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
470 union core_rx_cqe_union *p_cqe,
471 struct qed_ll2_comp_rx_data *data)
472{
473 data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_fp.parse_flags.flags);
474 data->length.packet_length =
475 le16_to_cpu(p_cqe->rx_cqe_fp.packet_length);
476 data->vlan = le16_to_cpu(p_cqe->rx_cqe_fp.vlan);
477 data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[0]);
478 data->opaque_data_1 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[1]);
479 data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset;
480}
481
482static int
483qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
484 struct qed_ll2_info *p_ll2_conn,
485 union core_rx_cqe_union *p_cqe,
486 unsigned long *p_lock_flags, bool b_last_cqe)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300487{
488 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
489 struct qed_ll2_rx_packet *p_pkt = NULL;
Mintz, Yuval68be9102017-06-09 17:13:19 +0300490 struct qed_ll2_comp_rx_data data;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300491
492 if (!list_empty(&p_rx->active_descq))
493 p_pkt = list_first_entry(&p_rx->active_descq,
494 struct qed_ll2_rx_packet, list_entry);
495 if (!p_pkt) {
496 DP_NOTICE(p_hwfn,
Mintz, Yuval68be9102017-06-09 17:13:19 +0300497 "[%d] LL2 Rx completion but active_descq is empty\n",
498 p_ll2_conn->conn.conn_type);
499
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300500 return -EIO;
501 }
502 list_del(&p_pkt->list_entry);
503
Mintz, Yuval68be9102017-06-09 17:13:19 +0300504 qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300505 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
506 DP_NOTICE(p_hwfn,
507 "Mismatch between active_descq and the LL2 Rx chain\n");
Mintz, Yuval68be9102017-06-09 17:13:19 +0300508
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300509 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
510
Mintz, Yuval68be9102017-06-09 17:13:19 +0300511 data.connection_handle = p_ll2_conn->my_id;
512 data.cookie = p_pkt->cookie;
513 data.rx_buf_addr = p_pkt->rx_buf_addr;
514 data.b_last_packet = b_last_cqe;
515
Ram Amrani1df2ade2017-03-14 15:26:02 +0200516 spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
Mintz, Yuval68be9102017-06-09 17:13:19 +0300517 qed_ll2b_complete_rx_packet(p_hwfn, &data);
Ram Amrani1df2ade2017-03-14 15:26:02 +0200518 spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300519
520 return 0;
521}
522
523static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
524{
525 struct qed_ll2_info *p_ll2_conn = cookie;
526 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
527 union core_rx_cqe_union *cqe = NULL;
528 u16 cq_new_idx = 0, cq_old_idx = 0;
529 unsigned long flags = 0;
530 int rc = 0;
531
532 spin_lock_irqsave(&p_rx->lock, flags);
533 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
534 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
535
536 while (cq_new_idx != cq_old_idx) {
537 bool b_last_cqe = (cq_new_idx == cq_old_idx);
538
539 cqe = qed_chain_consume(&p_rx->rcq_chain);
540 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
541
542 DP_VERBOSE(p_hwfn,
543 QED_MSG_LL2,
544 "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
545 cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
546
547 switch (cqe->rx_cqe_sp.type) {
548 case CORE_RX_CQE_TYPE_SLOW_PATH:
549 DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
550 rc = -EINVAL;
551 break;
Ram Amraniabd49672016-10-01 22:00:01 +0300552 case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
553 rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn,
554 cqe, flags, b_last_cqe);
555 break;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300556 case CORE_RX_CQE_TYPE_REGULAR:
Mintz, Yuval68be9102017-06-09 17:13:19 +0300557 rc = qed_ll2_rxq_handle_completion(p_hwfn, p_ll2_conn,
558 cqe, &flags,
559 b_last_cqe);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300560 break;
561 default:
562 rc = -EIO;
563 }
564 }
565
566 spin_unlock_irqrestore(&p_rx->lock, flags);
567 return rc;
568}
569
Yuval Mintz8c93bea2016-10-13 22:57:03 +0300570static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300571{
572 struct qed_ll2_info *p_ll2_conn = NULL;
573 struct qed_ll2_rx_packet *p_pkt = NULL;
574 struct qed_ll2_rx_queue *p_rx;
575
576 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
577 if (!p_ll2_conn)
578 return;
579
580 p_rx = &p_ll2_conn->rx_queue;
581
582 while (!list_empty(&p_rx->active_descq)) {
583 dma_addr_t rx_buf_addr;
584 void *cookie;
585 bool b_last;
586
587 p_pkt = list_first_entry(&p_rx->active_descq,
588 struct qed_ll2_rx_packet, list_entry);
589 if (!p_pkt)
590 break;
591
Wei Yongjunb4f0fd42016-10-17 15:17:51 +0000592 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300593
Arnd Bergmann0629a332017-01-18 15:52:52 +0100594 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800595 struct qed_ooo_buffer *p_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300596
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800597 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
598 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
599 p_buffer);
600 } else {
601 rx_buf_addr = p_pkt->rx_buf_addr;
602 cookie = p_pkt->cookie;
603
604 b_last = list_empty(&p_rx->active_descq);
605 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300606 }
607}
608
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800609#if IS_ENABLED(CONFIG_QED_ISCSI)
610static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
611{
612 u8 bd_flags = 0;
613
614 if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200615 SET_FIELD(bd_flags, CORE_TX_BD_DATA_VLAN_INSERTION, 1);
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800616
617 return bd_flags;
618}
619
620static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
621 struct qed_ll2_info *p_ll2_conn)
622{
623 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
624 u16 packet_length = 0, parse_flags = 0, vlan = 0;
625 struct qed_ll2_rx_packet *p_pkt = NULL;
626 u32 num_ooo_add_to_peninsula = 0, cid;
627 union core_rx_cqe_union *cqe = NULL;
628 u16 cq_new_idx = 0, cq_old_idx = 0;
629 struct qed_ooo_buffer *p_buffer;
630 struct ooo_opaque *iscsi_ooo;
631 u8 placement_offset = 0;
632 u8 cqe_type;
633
634 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
635 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
636 if (cq_new_idx == cq_old_idx)
637 return 0;
638
639 while (cq_new_idx != cq_old_idx) {
640 struct core_rx_fast_path_cqe *p_cqe_fp;
641
642 cqe = qed_chain_consume(&p_rx->rcq_chain);
643 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
644 cqe_type = cqe->rx_cqe_sp.type;
645
646 if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
647 DP_NOTICE(p_hwfn,
648 "Got a non-regular LB LL2 completion [type 0x%02x]\n",
649 cqe_type);
650 return -EINVAL;
651 }
652 p_cqe_fp = &cqe->rx_cqe_fp;
653
654 placement_offset = p_cqe_fp->placement_offset;
655 parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
656 packet_length = le16_to_cpu(p_cqe_fp->packet_length);
657 vlan = le16_to_cpu(p_cqe_fp->vlan);
658 iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
659 qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
660 iscsi_ooo);
661 cid = le32_to_cpu(iscsi_ooo->cid);
662
663 /* Process delete isle first */
664 if (iscsi_ooo->drop_size)
665 qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
666 iscsi_ooo->drop_isle,
667 iscsi_ooo->drop_size);
668
669 if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
670 continue;
671
672 /* Now process create/add/join isles */
673 if (list_empty(&p_rx->active_descq)) {
674 DP_NOTICE(p_hwfn,
675 "LL2 OOO RX chain has no submitted buffers\n"
676 );
677 return -EIO;
678 }
679
680 p_pkt = list_first_entry(&p_rx->active_descq,
681 struct qed_ll2_rx_packet, list_entry);
682
683 if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
684 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
685 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
686 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
687 (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
688 if (!p_pkt) {
689 DP_NOTICE(p_hwfn,
690 "LL2 OOO RX packet is not valid\n");
691 return -EIO;
692 }
693 list_del(&p_pkt->list_entry);
694 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
695 p_buffer->packet_length = packet_length;
696 p_buffer->parse_flags = parse_flags;
697 p_buffer->vlan = vlan;
698 p_buffer->placement_offset = placement_offset;
699 qed_chain_consume(&p_rx->rxq_chain);
700 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
701
702 switch (iscsi_ooo->ooo_opcode) {
703 case TCP_EVENT_ADD_NEW_ISLE:
704 qed_ooo_add_new_isle(p_hwfn,
705 p_hwfn->p_ooo_info,
706 cid,
707 iscsi_ooo->ooo_isle,
708 p_buffer);
709 break;
710 case TCP_EVENT_ADD_ISLE_RIGHT:
711 qed_ooo_add_new_buffer(p_hwfn,
712 p_hwfn->p_ooo_info,
713 cid,
714 iscsi_ooo->ooo_isle,
715 p_buffer,
716 QED_OOO_RIGHT_BUF);
717 break;
718 case TCP_EVENT_ADD_ISLE_LEFT:
719 qed_ooo_add_new_buffer(p_hwfn,
720 p_hwfn->p_ooo_info,
721 cid,
722 iscsi_ooo->ooo_isle,
723 p_buffer,
724 QED_OOO_LEFT_BUF);
725 break;
726 case TCP_EVENT_JOIN:
727 qed_ooo_add_new_buffer(p_hwfn,
728 p_hwfn->p_ooo_info,
729 cid,
730 iscsi_ooo->ooo_isle +
731 1,
732 p_buffer,
733 QED_OOO_LEFT_BUF);
734 qed_ooo_join_isles(p_hwfn,
735 p_hwfn->p_ooo_info,
736 cid, iscsi_ooo->ooo_isle);
737 break;
738 case TCP_EVENT_ADD_PEN:
739 num_ooo_add_to_peninsula++;
740 qed_ooo_put_ready_buffer(p_hwfn,
741 p_hwfn->p_ooo_info,
742 p_buffer, true);
743 break;
744 }
745 } else {
746 DP_NOTICE(p_hwfn,
747 "Unexpected event (%d) TX OOO completion\n",
748 iscsi_ooo->ooo_opcode);
749 }
750 }
751
752 return 0;
753}
754
755static void
756qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
757 struct qed_ll2_info *p_ll2_conn)
758{
Mintz, Yuval7c7973b2017-06-09 17:13:18 +0300759 struct qed_ll2_tx_pkt_info tx_pkt;
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800760 struct qed_ooo_buffer *p_buffer;
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800761 u16 l4_hdr_offset_w;
762 dma_addr_t first_frag;
763 u16 parse_flags;
764 u8 bd_flags;
Mintz, Yuval7c7973b2017-06-09 17:13:18 +0300765 int rc;
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800766
767 /* Submit Tx buffers here */
768 while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
769 p_hwfn->p_ooo_info))) {
770 l4_hdr_offset_w = 0;
771 bd_flags = 0;
772
773 first_frag = p_buffer->rx_buffer_phys_addr +
774 p_buffer->placement_offset;
775 parse_flags = p_buffer->parse_flags;
776 bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200777 SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
778 SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800779
Mintz, Yuval7c7973b2017-06-09 17:13:18 +0300780 memset(&tx_pkt, 0, sizeof(tx_pkt));
781 tx_pkt.num_of_bds = 1;
782 tx_pkt.vlan = p_buffer->vlan;
783 tx_pkt.bd_flags = bd_flags;
784 tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w;
785 tx_pkt.tx_dest = p_ll2_conn->conn.tx_dest;
786 tx_pkt.first_frag = first_frag;
787 tx_pkt.first_frag_len = p_buffer->packet_length;
788 tx_pkt.cookie = p_buffer;
789
790 rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id,
791 &tx_pkt, true);
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800792 if (rc) {
793 qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
794 p_buffer, false);
795 break;
796 }
797 }
798}
799
800static void
801qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn,
802 struct qed_ll2_info *p_ll2_conn)
803{
804 struct qed_ooo_buffer *p_buffer;
805 int rc;
806
807 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
808 p_hwfn->p_ooo_info))) {
809 rc = qed_ll2_post_rx_buffer(p_hwfn,
810 p_ll2_conn->my_id,
811 p_buffer->rx_buffer_phys_addr,
812 0, p_buffer, true);
813 if (rc) {
814 qed_ooo_put_free_buffer(p_hwfn,
815 p_hwfn->p_ooo_info, p_buffer);
816 break;
817 }
818 }
819}
820
821static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
822{
823 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
824 int rc;
825
826 rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
827 if (rc)
828 return rc;
829
830 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
831 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
832
833 return 0;
834}
835
836static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
837{
838 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
839 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
840 struct qed_ll2_tx_packet *p_pkt = NULL;
841 struct qed_ooo_buffer *p_buffer;
842 bool b_dont_submit_rx = false;
843 u16 new_idx = 0, num_bds = 0;
844 int rc;
845
846 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
847 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
848
849 if (!num_bds)
850 return 0;
851
852 while (num_bds) {
853 if (list_empty(&p_tx->active_descq))
854 return -EINVAL;
855
856 p_pkt = list_first_entry(&p_tx->active_descq,
857 struct qed_ll2_tx_packet, list_entry);
858 if (!p_pkt)
859 return -EINVAL;
860
861 if (p_pkt->bd_used != 1) {
862 DP_NOTICE(p_hwfn,
863 "Unexpectedly many BDs(%d) in TX OOO completion\n",
864 p_pkt->bd_used);
865 return -EINVAL;
866 }
867
868 list_del(&p_pkt->list_entry);
869
870 num_bds--;
871 p_tx->bds_idx++;
872 qed_chain_consume(&p_tx->txq_chain);
873
874 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
875 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
876
877 if (b_dont_submit_rx) {
878 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
879 p_buffer);
880 continue;
881 }
882
883 rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
884 p_buffer->rx_buffer_phys_addr, 0,
885 p_buffer, true);
886 if (rc != 0) {
887 qed_ooo_put_free_buffer(p_hwfn,
888 p_hwfn->p_ooo_info, p_buffer);
889 b_dont_submit_rx = true;
890 }
891 }
892
893 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
894
895 return 0;
896}
897
898static int
899qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
900 struct qed_ll2_info *p_ll2_info,
901 u16 rx_num_ooo_buffers, u16 mtu)
902{
903 struct qed_ooo_buffer *p_buf = NULL;
904 void *p_virt;
905 u16 buf_idx;
906 int rc = 0;
907
Arnd Bergmann0629a332017-01-18 15:52:52 +0100908 if (p_ll2_info->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800909 return rc;
910
911 if (!rx_num_ooo_buffers)
912 return -EINVAL;
913
914 for (buf_idx = 0; buf_idx < rx_num_ooo_buffers; buf_idx++) {
915 p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
916 if (!p_buf) {
917 rc = -ENOMEM;
918 goto out;
919 }
920
921 p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
922 p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
923 ETH_CACHE_LINE_SIZE - 1) &
924 ~(ETH_CACHE_LINE_SIZE - 1);
925 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
926 p_buf->rx_buffer_size,
927 &p_buf->rx_buffer_phys_addr,
928 GFP_KERNEL);
929 if (!p_virt) {
930 kfree(p_buf);
931 rc = -ENOMEM;
932 goto out;
933 }
934
935 p_buf->rx_buffer_virt_addr = p_virt;
936 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
937 }
938
939 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
940 "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
941 rx_num_ooo_buffers, p_buf->rx_buffer_size);
942
943out:
944 return rc;
945}
946
947static void
948qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
949 struct qed_ll2_info *p_ll2_conn)
950{
Arnd Bergmann0629a332017-01-18 15:52:52 +0100951 if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800952 return;
953
954 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
955 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
956}
957
958static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
959 struct qed_ll2_info *p_ll2_conn)
960{
961 struct qed_ooo_buffer *p_buffer;
962
Arnd Bergmann0629a332017-01-18 15:52:52 +0100963 if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800964 return;
965
966 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
967 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
968 p_hwfn->p_ooo_info))) {
969 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
970 p_buffer->rx_buffer_size,
971 p_buffer->rx_buffer_virt_addr,
972 p_buffer->rx_buffer_phys_addr);
973 kfree(p_buffer);
974 }
975}
976
977static void qed_ll2_stop_ooo(struct qed_dev *cdev)
978{
979 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
980 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
981
982 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n",
983 *handle);
984
985 qed_ll2_terminate_connection(hwfn, *handle);
986 qed_ll2_release_connection(hwfn, *handle);
987 *handle = QED_LL2_UNUSED_HANDLE;
988}
989
990static int qed_ll2_start_ooo(struct qed_dev *cdev,
991 struct qed_ll2_params *params)
992{
993 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
994 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
robert.foss@collabora.com8aad6f12017-03-07 11:46:25 -0500995 struct qed_ll2_conn ll2_info = { 0 };
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800996 int rc;
997
Arnd Bergmann0629a332017-01-18 15:52:52 +0100998 ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
999 ll2_info.mtu = params->mtu;
1000 ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
1001 ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
1002 ll2_info.tx_tc = OOO_LB_TC;
1003 ll2_info.tx_dest = CORE_TX_DEST_LB;
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001004
Arnd Bergmann0629a332017-01-18 15:52:52 +01001005 rc = qed_ll2_acquire_connection(hwfn, &ll2_info,
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001006 QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
1007 handle);
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001008 if (rc) {
1009 DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
1010 goto out;
1011 }
1012
1013 rc = qed_ll2_establish_connection(hwfn, *handle);
1014 if (rc) {
1015 DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
1016 goto fail;
1017 }
1018
1019 return 0;
1020
1021fail:
1022 qed_ll2_release_connection(hwfn, *handle);
1023out:
1024 *handle = QED_LL2_UNUSED_HANDLE;
1025 return rc;
1026}
1027#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
1028static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn,
1029 void *p_cookie) { return -EINVAL; }
1030static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn,
1031 void *p_cookie) { return -EINVAL; }
1032static inline int
1033qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
1034 struct qed_ll2_info *p_ll2_info,
1035 u16 rx_num_ooo_buffers, u16 mtu) { return 0; }
1036static inline void
1037qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
1038 struct qed_ll2_info *p_ll2_conn) { return; }
1039static inline void
1040qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
1041 struct qed_ll2_info *p_ll2_conn) { return; }
1042static inline void qed_ll2_stop_ooo(struct qed_dev *cdev) { return; }
1043static inline int qed_ll2_start_ooo(struct qed_dev *cdev,
1044 struct qed_ll2_params *params)
1045 { return -EINVAL; }
1046#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
1047
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001048static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
1049 struct qed_ll2_info *p_ll2_conn,
1050 u8 action_on_error)
1051{
Arnd Bergmann0629a332017-01-18 15:52:52 +01001052 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001053 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
1054 struct core_rx_start_ramrod_data *p_ramrod = NULL;
1055 struct qed_spq_entry *p_ent = NULL;
1056 struct qed_sp_init_data init_data;
1057 u16 cqe_pbl_size;
1058 int rc = 0;
1059
1060 /* Get SPQ entry */
1061 memset(&init_data, 0, sizeof(init_data));
1062 init_data.cid = p_ll2_conn->cid;
1063 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1064 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1065
1066 rc = qed_sp_init_request(p_hwfn, &p_ent,
1067 CORE_RAMROD_RX_QUEUE_START,
1068 PROTOCOLID_CORE, &init_data);
1069 if (rc)
1070 return rc;
1071
1072 p_ramrod = &p_ent->ramrod.core_rx_queue_start;
1073
1074 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
1075 p_ramrod->sb_index = p_rx->rx_sb_index;
1076 p_ramrod->complete_event_flg = 1;
1077
Arnd Bergmann0629a332017-01-18 15:52:52 +01001078 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001079 DMA_REGPAIR_LE(p_ramrod->bd_base,
1080 p_rx->rxq_chain.p_phys_addr);
1081 cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
1082 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
1083 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
1084 qed_chain_get_pbl_phys(&p_rx->rcq_chain));
1085
Arnd Bergmann0629a332017-01-18 15:52:52 +01001086 p_ramrod->drop_ttl0_flg = p_ll2_conn->conn.rx_drop_ttl0_flg;
1087 p_ramrod->inner_vlan_removal_en = p_ll2_conn->conn.rx_vlan_removal_en;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001088 p_ramrod->queue_id = p_ll2_conn->queue_id;
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001089 p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
1090 : 1;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001091
1092 if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
1093 p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
1094 p_ramrod->mf_si_bcast_accept_all = 1;
1095 p_ramrod->mf_si_mcast_accept_all = 1;
1096 } else {
1097 p_ramrod->mf_si_bcast_accept_all = 0;
1098 p_ramrod->mf_si_mcast_accept_all = 0;
1099 }
1100
1101 p_ramrod->action_on_error.error_type = action_on_error;
Arnd Bergmann0629a332017-01-18 15:52:52 +01001102 p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001103 return qed_spq_post(p_hwfn, p_ent, NULL);
1104}
1105
1106static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
1107 struct qed_ll2_info *p_ll2_conn)
1108{
Arnd Bergmann0629a332017-01-18 15:52:52 +01001109 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001110 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1111 struct core_tx_start_ramrod_data *p_ramrod = NULL;
1112 struct qed_spq_entry *p_ent = NULL;
1113 struct qed_sp_init_data init_data;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001114 u16 pq_id = 0, pbl_size;
1115 int rc = -EINVAL;
1116
1117 if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
1118 return 0;
1119
Arnd Bergmann0629a332017-01-18 15:52:52 +01001120 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001121 p_ll2_conn->tx_stats_en = 0;
1122 else
1123 p_ll2_conn->tx_stats_en = 1;
1124
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001125 /* Get SPQ entry */
1126 memset(&init_data, 0, sizeof(init_data));
1127 init_data.cid = p_ll2_conn->cid;
1128 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1129 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1130
1131 rc = qed_sp_init_request(p_hwfn, &p_ent,
1132 CORE_RAMROD_TX_QUEUE_START,
1133 PROTOCOLID_CORE, &init_data);
1134 if (rc)
1135 return rc;
1136
1137 p_ramrod = &p_ent->ramrod.core_tx_queue_start;
1138
1139 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
1140 p_ramrod->sb_index = p_tx->tx_sb_index;
Arnd Bergmann0629a332017-01-18 15:52:52 +01001141 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001142 p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
1143 p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
1144
1145 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
1146 qed_chain_get_pbl_phys(&p_tx->txq_chain));
1147 pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
1148 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
1149
Ariel Eliorb5a9ee72017-04-03 12:21:09 +03001150 switch (p_ll2_conn->conn.tx_tc) {
1151 case LB_TC:
1152 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
1153 break;
1154 case OOO_LB_TC:
1155 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO);
Colin Ian King827d2402017-04-05 13:35:44 +01001156 break;
Ariel Eliorb5a9ee72017-04-03 12:21:09 +03001157 default:
1158 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
1159 break;
1160 }
1161
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001162 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
1163
1164 switch (conn_type) {
Arun Easi1e128c82017-02-15 06:28:22 -08001165 case QED_LL2_TYPE_FCOE:
1166 p_ramrod->conn_type = PROTOCOLID_FCOE;
1167 break;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001168 case QED_LL2_TYPE_ISCSI:
1169 case QED_LL2_TYPE_ISCSI_OOO:
1170 p_ramrod->conn_type = PROTOCOLID_ISCSI;
1171 break;
1172 case QED_LL2_TYPE_ROCE:
1173 p_ramrod->conn_type = PROTOCOLID_ROCE;
1174 break;
1175 default:
1176 p_ramrod->conn_type = PROTOCOLID_ETH;
1177 DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
1178 }
1179
Arnd Bergmann0629a332017-01-18 15:52:52 +01001180 p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001181 return qed_spq_post(p_hwfn, p_ent, NULL);
1182}
1183
1184static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
1185 struct qed_ll2_info *p_ll2_conn)
1186{
1187 struct core_rx_stop_ramrod_data *p_ramrod = NULL;
1188 struct qed_spq_entry *p_ent = NULL;
1189 struct qed_sp_init_data init_data;
1190 int rc = -EINVAL;
1191
1192 /* Get SPQ entry */
1193 memset(&init_data, 0, sizeof(init_data));
1194 init_data.cid = p_ll2_conn->cid;
1195 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1196 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1197
1198 rc = qed_sp_init_request(p_hwfn, &p_ent,
1199 CORE_RAMROD_RX_QUEUE_STOP,
1200 PROTOCOLID_CORE, &init_data);
1201 if (rc)
1202 return rc;
1203
1204 p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
1205
1206 p_ramrod->complete_event_flg = 1;
1207 p_ramrod->queue_id = p_ll2_conn->queue_id;
1208
1209 return qed_spq_post(p_hwfn, p_ent, NULL);
1210}
1211
1212static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
1213 struct qed_ll2_info *p_ll2_conn)
1214{
1215 struct qed_spq_entry *p_ent = NULL;
1216 struct qed_sp_init_data init_data;
1217 int rc = -EINVAL;
1218
1219 /* Get SPQ entry */
1220 memset(&init_data, 0, sizeof(init_data));
1221 init_data.cid = p_ll2_conn->cid;
1222 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1223 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1224
1225 rc = qed_sp_init_request(p_hwfn, &p_ent,
1226 CORE_RAMROD_TX_QUEUE_STOP,
1227 PROTOCOLID_CORE, &init_data);
1228 if (rc)
1229 return rc;
1230
1231 return qed_spq_post(p_hwfn, p_ent, NULL);
1232}
1233
1234static int
1235qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
1236 struct qed_ll2_info *p_ll2_info, u16 rx_num_desc)
1237{
1238 struct qed_ll2_rx_packet *p_descq;
1239 u32 capacity;
1240 int rc = 0;
1241
1242 if (!rx_num_desc)
1243 goto out;
1244
1245 rc = qed_chain_alloc(p_hwfn->cdev,
1246 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1247 QED_CHAIN_MODE_NEXT_PTR,
1248 QED_CHAIN_CNT_TYPE_U16,
1249 rx_num_desc,
1250 sizeof(struct core_rx_bd),
1251 &p_ll2_info->rx_queue.rxq_chain);
1252 if (rc) {
1253 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
1254 goto out;
1255 }
1256
1257 capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
1258 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
1259 GFP_KERNEL);
1260 if (!p_descq) {
1261 rc = -ENOMEM;
1262 DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
1263 goto out;
1264 }
1265 p_ll2_info->rx_queue.descq_array = p_descq;
1266
1267 rc = qed_chain_alloc(p_hwfn->cdev,
1268 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1269 QED_CHAIN_MODE_PBL,
1270 QED_CHAIN_CNT_TYPE_U16,
1271 rx_num_desc,
1272 sizeof(struct core_rx_fast_path_cqe),
1273 &p_ll2_info->rx_queue.rcq_chain);
1274 if (rc) {
1275 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
1276 goto out;
1277 }
1278
1279 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1280 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
Arnd Bergmann0629a332017-01-18 15:52:52 +01001281 p_ll2_info->conn.conn_type, rx_num_desc);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001282
1283out:
1284 return rc;
1285}
1286
1287static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
1288 struct qed_ll2_info *p_ll2_info,
1289 u16 tx_num_desc)
1290{
1291 struct qed_ll2_tx_packet *p_descq;
1292 u32 capacity;
1293 int rc = 0;
1294
1295 if (!tx_num_desc)
1296 goto out;
1297
1298 rc = qed_chain_alloc(p_hwfn->cdev,
1299 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1300 QED_CHAIN_MODE_PBL,
1301 QED_CHAIN_CNT_TYPE_U16,
1302 tx_num_desc,
1303 sizeof(struct core_tx_bd),
1304 &p_ll2_info->tx_queue.txq_chain);
1305 if (rc)
1306 goto out;
1307
1308 capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
1309 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet),
1310 GFP_KERNEL);
1311 if (!p_descq) {
1312 rc = -ENOMEM;
1313 goto out;
1314 }
1315 p_ll2_info->tx_queue.descq_array = p_descq;
1316
1317 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1318 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
Arnd Bergmann0629a332017-01-18 15:52:52 +01001319 p_ll2_info->conn.conn_type, tx_num_desc);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001320
1321out:
1322 if (rc)
1323 DP_NOTICE(p_hwfn,
1324 "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
1325 tx_num_desc);
1326 return rc;
1327}
1328
1329int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
Arnd Bergmann0629a332017-01-18 15:52:52 +01001330 struct qed_ll2_conn *p_params,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001331 u16 rx_num_desc,
1332 u16 tx_num_desc,
1333 u8 *p_connection_handle)
1334{
1335 qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
1336 struct qed_ll2_info *p_ll2_info = NULL;
1337 int rc;
1338 u8 i;
1339
1340 if (!p_connection_handle || !p_hwfn->p_ll2_info)
1341 return -EINVAL;
1342
1343 /* Find a free connection to be used */
1344 for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
1345 mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
1346 if (p_hwfn->p_ll2_info[i].b_active) {
1347 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1348 continue;
1349 }
1350
1351 p_hwfn->p_ll2_info[i].b_active = true;
1352 p_ll2_info = &p_hwfn->p_ll2_info[i];
1353 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1354 break;
1355 }
1356 if (!p_ll2_info)
1357 return -EBUSY;
1358
Arnd Bergmann0629a332017-01-18 15:52:52 +01001359 p_ll2_info->conn = *p_params;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001360
1361 rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
1362 if (rc)
1363 goto q_allocate_fail;
1364
1365 rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info, tx_num_desc);
1366 if (rc)
1367 goto q_allocate_fail;
1368
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001369 rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
1370 rx_num_desc * 2, p_params->mtu);
1371 if (rc)
1372 goto q_allocate_fail;
1373
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001374 /* Register callbacks for the Rx/Tx queues */
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001375 if (p_params->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
1376 comp_rx_cb = qed_ll2_lb_rxq_completion;
1377 comp_tx_cb = qed_ll2_lb_txq_completion;
1378 } else {
1379 comp_rx_cb = qed_ll2_rxq_completion;
1380 comp_tx_cb = qed_ll2_txq_completion;
1381 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001382
1383 if (rx_num_desc) {
1384 qed_int_register_cb(p_hwfn, comp_rx_cb,
1385 &p_hwfn->p_ll2_info[i],
1386 &p_ll2_info->rx_queue.rx_sb_index,
1387 &p_ll2_info->rx_queue.p_fw_cons);
1388 p_ll2_info->rx_queue.b_cb_registred = true;
1389 }
1390
1391 if (tx_num_desc) {
1392 qed_int_register_cb(p_hwfn,
1393 comp_tx_cb,
1394 &p_hwfn->p_ll2_info[i],
1395 &p_ll2_info->tx_queue.tx_sb_index,
1396 &p_ll2_info->tx_queue.p_fw_cons);
1397 p_ll2_info->tx_queue.b_cb_registred = true;
1398 }
1399
1400 *p_connection_handle = i;
1401 return rc;
1402
1403q_allocate_fail:
1404 qed_ll2_release_connection(p_hwfn, i);
1405 return -ENOMEM;
1406}
1407
1408static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
1409 struct qed_ll2_info *p_ll2_conn)
1410{
1411 u8 action_on_error = 0;
1412
1413 if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
1414 return 0;
1415
1416 DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
1417
1418 SET_FIELD(action_on_error,
1419 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
Arnd Bergmann0629a332017-01-18 15:52:52 +01001420 p_ll2_conn->conn.ai_err_packet_too_big);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001421 SET_FIELD(action_on_error,
Arnd Bergmann0629a332017-01-18 15:52:52 +01001422 CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->conn.ai_err_no_buf);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001423
1424 return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
1425}
1426
1427int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1428{
1429 struct qed_ll2_info *p_ll2_conn;
1430 struct qed_ll2_rx_queue *p_rx;
1431 struct qed_ll2_tx_queue *p_tx;
Rahul Verma15582962017-04-06 15:58:29 +03001432 struct qed_ptt *p_ptt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001433 int rc = -EINVAL;
1434 u32 i, capacity;
1435 u8 qid;
1436
Rahul Verma15582962017-04-06 15:58:29 +03001437 p_ptt = qed_ptt_acquire(p_hwfn);
1438 if (!p_ptt)
1439 return -EAGAIN;
1440
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001441 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
Rahul Verma15582962017-04-06 15:58:29 +03001442 if (!p_ll2_conn) {
1443 rc = -EINVAL;
1444 goto out;
1445 }
1446
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001447 p_rx = &p_ll2_conn->rx_queue;
1448 p_tx = &p_ll2_conn->tx_queue;
1449
1450 qed_chain_reset(&p_rx->rxq_chain);
1451 qed_chain_reset(&p_rx->rcq_chain);
1452 INIT_LIST_HEAD(&p_rx->active_descq);
1453 INIT_LIST_HEAD(&p_rx->free_descq);
1454 INIT_LIST_HEAD(&p_rx->posting_descq);
1455 spin_lock_init(&p_rx->lock);
1456 capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
1457 for (i = 0; i < capacity; i++)
1458 list_add_tail(&p_rx->descq_array[i].list_entry,
1459 &p_rx->free_descq);
1460 *p_rx->p_fw_cons = 0;
1461
1462 qed_chain_reset(&p_tx->txq_chain);
1463 INIT_LIST_HEAD(&p_tx->active_descq);
1464 INIT_LIST_HEAD(&p_tx->free_descq);
1465 INIT_LIST_HEAD(&p_tx->sending_descq);
1466 spin_lock_init(&p_tx->lock);
1467 capacity = qed_chain_get_capacity(&p_tx->txq_chain);
1468 for (i = 0; i < capacity; i++)
1469 list_add_tail(&p_tx->descq_array[i].list_entry,
1470 &p_tx->free_descq);
1471 p_tx->cur_completing_bd_idx = 0;
1472 p_tx->bds_idx = 0;
1473 p_tx->b_completing_packet = false;
1474 p_tx->cur_send_packet = NULL;
1475 p_tx->cur_send_frag_num = 0;
1476 p_tx->cur_completing_frag_num = 0;
1477 *p_tx->p_fw_cons = 0;
1478
Rahul Verma15582962017-04-06 15:58:29 +03001479 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
1480 if (rc)
1481 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001482
1483 qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
1484 p_ll2_conn->queue_id = qid;
1485 p_ll2_conn->tx_stats_id = qid;
1486 p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
1487 GTT_BAR0_MAP_REG_TSDM_RAM +
1488 TSTORM_LL2_RX_PRODS_OFFSET(qid);
1489 p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
1490 qed_db_addr(p_ll2_conn->cid,
1491 DQ_DEMS_LEGACY);
1492
1493 rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
1494 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001495 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001496
1497 rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
1498 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001499 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001500
1501 if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
Rahul Verma15582962017-04-06 15:58:29 +03001502 qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001503
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001504 qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
1505
Arun Easi1e128c82017-02-15 06:28:22 -08001506 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
Rahul Verma15582962017-04-06 15:58:29 +03001507 qed_llh_add_protocol_filter(p_hwfn, p_ptt,
Arun Easi1e128c82017-02-15 06:28:22 -08001508 0x8906, 0,
1509 QED_LLH_FILTER_ETHERTYPE);
Rahul Verma15582962017-04-06 15:58:29 +03001510 qed_llh_add_protocol_filter(p_hwfn, p_ptt,
Arun Easi1e128c82017-02-15 06:28:22 -08001511 0x8914, 0,
1512 QED_LLH_FILTER_ETHERTYPE);
1513 }
1514
Rahul Verma15582962017-04-06 15:58:29 +03001515out:
1516 qed_ptt_release(p_hwfn, p_ptt);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001517 return rc;
1518}
1519
1520static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
1521 struct qed_ll2_rx_queue *p_rx,
1522 struct qed_ll2_rx_packet *p_curp)
1523{
1524 struct qed_ll2_rx_packet *p_posting_packet = NULL;
1525 struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
1526 bool b_notify_fw = false;
1527 u16 bd_prod, cq_prod;
1528
1529 /* This handles the flushing of already posted buffers */
1530 while (!list_empty(&p_rx->posting_descq)) {
1531 p_posting_packet = list_first_entry(&p_rx->posting_descq,
1532 struct qed_ll2_rx_packet,
1533 list_entry);
Wei Yongjunb4f0fd42016-10-17 15:17:51 +00001534 list_move_tail(&p_posting_packet->list_entry,
1535 &p_rx->active_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001536 b_notify_fw = true;
1537 }
1538
1539 /* This handles the supplied packet [if there is one] */
1540 if (p_curp) {
1541 list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
1542 b_notify_fw = true;
1543 }
1544
1545 if (!b_notify_fw)
1546 return;
1547
1548 bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
1549 cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1550 rx_prod.bd_prod = cpu_to_le16(bd_prod);
1551 rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1552 DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1553}
1554
1555int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
1556 u8 connection_handle,
1557 dma_addr_t addr,
1558 u16 buf_len, void *cookie, u8 notify_fw)
1559{
1560 struct core_rx_bd_with_buff_len *p_curb = NULL;
1561 struct qed_ll2_rx_packet *p_curp = NULL;
1562 struct qed_ll2_info *p_ll2_conn;
1563 struct qed_ll2_rx_queue *p_rx;
1564 unsigned long flags;
1565 void *p_data;
1566 int rc = 0;
1567
1568 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1569 if (!p_ll2_conn)
1570 return -EINVAL;
1571 p_rx = &p_ll2_conn->rx_queue;
1572
1573 spin_lock_irqsave(&p_rx->lock, flags);
1574 if (!list_empty(&p_rx->free_descq))
1575 p_curp = list_first_entry(&p_rx->free_descq,
1576 struct qed_ll2_rx_packet, list_entry);
1577 if (p_curp) {
1578 if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
1579 qed_chain_get_elem_left(&p_rx->rcq_chain)) {
1580 p_data = qed_chain_produce(&p_rx->rxq_chain);
1581 p_curb = (struct core_rx_bd_with_buff_len *)p_data;
1582 qed_chain_produce(&p_rx->rcq_chain);
1583 }
1584 }
1585
1586 /* If we're lacking entires, let's try to flush buffers to FW */
1587 if (!p_curp || !p_curb) {
1588 rc = -EBUSY;
1589 p_curp = NULL;
1590 goto out_notify;
1591 }
1592
1593 /* We have an Rx packet we can fill */
1594 DMA_REGPAIR_LE(p_curb->addr, addr);
1595 p_curb->buff_length = cpu_to_le16(buf_len);
1596 p_curp->rx_buf_addr = addr;
1597 p_curp->cookie = cookie;
1598 p_curp->rxq_bd = p_curb;
1599 p_curp->buf_length = buf_len;
1600 list_del(&p_curp->list_entry);
1601
1602 /* Check if we only want to enqueue this packet without informing FW */
1603 if (!notify_fw) {
1604 list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
1605 goto out;
1606 }
1607
1608out_notify:
1609 qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
1610out:
1611 spin_unlock_irqrestore(&p_rx->lock, flags);
1612 return rc;
1613}
1614
1615static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
1616 struct qed_ll2_tx_queue *p_tx,
1617 struct qed_ll2_tx_packet *p_curp,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001618 struct qed_ll2_tx_pkt_info *pkt,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001619 u8 notify_fw)
1620{
1621 list_del(&p_curp->list_entry);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001622 p_curp->cookie = pkt->cookie;
1623 p_curp->bd_used = pkt->num_of_bds;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001624 p_curp->notify_fw = notify_fw;
1625 p_tx->cur_send_packet = p_curp;
1626 p_tx->cur_send_frag_num = 0;
1627
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001628 p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = pkt->first_frag;
1629 p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = pkt->first_frag_len;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001630 p_tx->cur_send_frag_num++;
1631}
1632
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001633static void
1634qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1635 struct qed_ll2_info *p_ll2,
1636 struct qed_ll2_tx_packet *p_curp,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001637 struct qed_ll2_tx_pkt_info *pkt)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001638{
1639 struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
1640 u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
1641 struct core_tx_bd *start_bd = NULL;
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001642 enum core_roce_flavor_type roce_flavor;
1643 enum core_tx_dest tx_dest;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001644 u16 bd_data = 0, frag_idx;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001645
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001646 roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE
1647 : CORE_RROCE;
1648
1649 tx_dest = (pkt->tx_dest == QED_LL2_TX_DEST_NW) ? CORE_TX_DEST_NW
1650 : CORE_TX_DEST_LB;
1651
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001652 start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001653 start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001654 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001655 cpu_to_le16(pkt->l4_hdr_offset_w));
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001656 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001657 bd_data |= pkt->bd_flags;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001658 SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001659 SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001660 SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
1661 start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001662 DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag);
1663 start_bd->nbytes = cpu_to_le16(pkt->first_frag_len);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001664
1665 DP_VERBOSE(p_hwfn,
1666 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1667 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1668 p_ll2->queue_id,
1669 p_ll2->cid,
Arnd Bergmann0629a332017-01-18 15:52:52 +01001670 p_ll2->conn.conn_type,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001671 prod_idx,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001672 pkt->first_frag_len,
1673 pkt->num_of_bds,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001674 le32_to_cpu(start_bd->addr.hi),
1675 le32_to_cpu(start_bd->addr.lo));
1676
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001677 if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001678 return;
1679
1680 /* Need to provide the packet with additional BDs for frags */
1681 for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001682 frag_idx < pkt->num_of_bds; frag_idx++) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001683 struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
1684
1685 *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001686 (*p_bd)->bd_data.as_bitfield = 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001687 (*p_bd)->bitfield1 = 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001688 p_curp->bds_set[frag_idx].tx_frag = 0;
1689 p_curp->bds_set[frag_idx].frag_len = 0;
1690 }
1691}
1692
1693/* This should be called while the Txq spinlock is being held */
1694static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
1695 struct qed_ll2_info *p_ll2_conn)
1696{
1697 bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
1698 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1699 struct qed_ll2_tx_packet *p_pkt = NULL;
1700 struct core_db_data db_msg = { 0, 0, 0 };
1701 u16 bd_prod;
1702
1703 /* If there are missing BDs, don't do anything now */
1704 if (p_ll2_conn->tx_queue.cur_send_frag_num !=
1705 p_ll2_conn->tx_queue.cur_send_packet->bd_used)
1706 return;
1707
1708 /* Push the current packet to the list and clean after it */
1709 list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
1710 &p_ll2_conn->tx_queue.sending_descq);
1711 p_ll2_conn->tx_queue.cur_send_packet = NULL;
1712 p_ll2_conn->tx_queue.cur_send_frag_num = 0;
1713
1714 /* Notify FW of packet only if requested to */
1715 if (!b_notify)
1716 return;
1717
1718 bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
1719
1720 while (!list_empty(&p_tx->sending_descq)) {
1721 p_pkt = list_first_entry(&p_tx->sending_descq,
1722 struct qed_ll2_tx_packet, list_entry);
1723 if (!p_pkt)
1724 break;
1725
Wei Yongjunb4f0fd42016-10-17 15:17:51 +00001726 list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001727 }
1728
1729 SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
1730 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1731 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
1732 DQ_XCM_CORE_TX_BD_PROD_CMD);
1733 db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
1734 db_msg.spq_prod = cpu_to_le16(bd_prod);
1735
1736 /* Make sure the BDs data is updated before ringing the doorbell */
1737 wmb();
1738
1739 DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
1740
1741 DP_VERBOSE(p_hwfn,
1742 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1743 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1744 p_ll2_conn->queue_id,
Arnd Bergmann0629a332017-01-18 15:52:52 +01001745 p_ll2_conn->cid, p_ll2_conn->conn.conn_type, db_msg.spq_prod);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001746}
1747
1748int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
1749 u8 connection_handle,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001750 struct qed_ll2_tx_pkt_info *pkt,
1751 bool notify_fw)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001752{
1753 struct qed_ll2_tx_packet *p_curp = NULL;
1754 struct qed_ll2_info *p_ll2_conn = NULL;
1755 struct qed_ll2_tx_queue *p_tx;
1756 struct qed_chain *p_tx_chain;
1757 unsigned long flags;
1758 int rc = 0;
1759
1760 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1761 if (!p_ll2_conn)
1762 return -EINVAL;
1763 p_tx = &p_ll2_conn->tx_queue;
1764 p_tx_chain = &p_tx->txq_chain;
1765
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001766 if (pkt->num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001767 return -EIO;
1768
1769 spin_lock_irqsave(&p_tx->lock, flags);
1770 if (p_tx->cur_send_packet) {
1771 rc = -EEXIST;
1772 goto out;
1773 }
1774
1775 /* Get entry, but only if we have tx elements for it */
1776 if (!list_empty(&p_tx->free_descq))
1777 p_curp = list_first_entry(&p_tx->free_descq,
1778 struct qed_ll2_tx_packet, list_entry);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001779 if (p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001780 p_curp = NULL;
1781
1782 if (!p_curp) {
1783 rc = -EBUSY;
1784 goto out;
1785 }
1786
1787 /* Prepare packet and BD, and perhaps send a doorbell to FW */
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001788 qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, pkt, notify_fw);
1789
1790 qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, pkt);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001791
1792 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1793
1794out:
1795 spin_unlock_irqrestore(&p_tx->lock, flags);
1796 return rc;
1797}
1798
1799int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
1800 u8 connection_handle,
1801 dma_addr_t addr, u16 nbytes)
1802{
1803 struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
1804 struct qed_ll2_info *p_ll2_conn = NULL;
1805 u16 cur_send_frag_num = 0;
1806 struct core_tx_bd *p_bd;
1807 unsigned long flags;
1808
1809 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1810 if (!p_ll2_conn)
1811 return -EINVAL;
1812
1813 if (!p_ll2_conn->tx_queue.cur_send_packet)
1814 return -EINVAL;
1815
1816 p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
1817 cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
1818
1819 if (cur_send_frag_num >= p_cur_send_packet->bd_used)
1820 return -EINVAL;
1821
1822 /* Fill the BD information, and possibly notify FW */
1823 p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
1824 DMA_REGPAIR_LE(p_bd->addr, addr);
1825 p_bd->nbytes = cpu_to_le16(nbytes);
1826 p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
1827 p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
1828
1829 p_ll2_conn->tx_queue.cur_send_frag_num++;
1830
1831 spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
1832 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1833 spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
1834
1835 return 0;
1836}
1837
1838int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1839{
1840 struct qed_ll2_info *p_ll2_conn = NULL;
1841 int rc = -EINVAL;
Rahul Verma15582962017-04-06 15:58:29 +03001842 struct qed_ptt *p_ptt;
1843
1844 p_ptt = qed_ptt_acquire(p_hwfn);
1845 if (!p_ptt)
1846 return -EAGAIN;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001847
1848 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
Rahul Verma15582962017-04-06 15:58:29 +03001849 if (!p_ll2_conn) {
1850 rc = -EINVAL;
1851 goto out;
1852 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001853
1854 /* Stop Tx & Rx of connection, if needed */
1855 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1856 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
1857 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001858 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001859 qed_ll2_txq_flush(p_hwfn, connection_handle);
1860 }
1861
1862 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1863 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
1864 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001865 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001866 qed_ll2_rxq_flush(p_hwfn, connection_handle);
1867 }
1868
Arnd Bergmann0629a332017-01-18 15:52:52 +01001869 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001870 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1871
Arun Easi1e128c82017-02-15 06:28:22 -08001872 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
Rahul Verma15582962017-04-06 15:58:29 +03001873 qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
Arun Easi1e128c82017-02-15 06:28:22 -08001874 0x8906, 0,
1875 QED_LLH_FILTER_ETHERTYPE);
Rahul Verma15582962017-04-06 15:58:29 +03001876 qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
Arun Easi1e128c82017-02-15 06:28:22 -08001877 0x8914, 0,
1878 QED_LLH_FILTER_ETHERTYPE);
1879 }
1880
Rahul Verma15582962017-04-06 15:58:29 +03001881out:
1882 qed_ptt_release(p_hwfn, p_ptt);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001883 return rc;
1884}
1885
1886void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1887{
1888 struct qed_ll2_info *p_ll2_conn = NULL;
1889
1890 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1891 if (!p_ll2_conn)
1892 return;
1893
1894 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1895 p_ll2_conn->rx_queue.b_cb_registred = false;
1896 qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
1897 }
1898
1899 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1900 p_ll2_conn->tx_queue.b_cb_registred = false;
1901 qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
1902 }
1903
1904 kfree(p_ll2_conn->tx_queue.descq_array);
1905 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
1906
1907 kfree(p_ll2_conn->rx_queue.descq_array);
1908 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
1909 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
1910
1911 qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
1912
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001913 qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
1914
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001915 mutex_lock(&p_ll2_conn->mutex);
1916 p_ll2_conn->b_active = false;
1917 mutex_unlock(&p_ll2_conn->mutex);
1918}
1919
Tomer Tayar3587cb82017-05-21 12:10:56 +03001920int qed_ll2_alloc(struct qed_hwfn *p_hwfn)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001921{
1922 struct qed_ll2_info *p_ll2_connections;
1923 u8 i;
1924
1925 /* Allocate LL2's set struct */
1926 p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
1927 sizeof(struct qed_ll2_info), GFP_KERNEL);
1928 if (!p_ll2_connections) {
1929 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
Tomer Tayar3587cb82017-05-21 12:10:56 +03001930 return -ENOMEM;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001931 }
1932
1933 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1934 p_ll2_connections[i].my_id = i;
1935
Tomer Tayar3587cb82017-05-21 12:10:56 +03001936 p_hwfn->p_ll2_info = p_ll2_connections;
1937 return 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001938}
1939
Tomer Tayar3587cb82017-05-21 12:10:56 +03001940void qed_ll2_setup(struct qed_hwfn *p_hwfn)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001941{
1942 int i;
1943
1944 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
Tomer Tayar3587cb82017-05-21 12:10:56 +03001945 mutex_init(&p_hwfn->p_ll2_info[i].mutex);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001946}
1947
Tomer Tayar3587cb82017-05-21 12:10:56 +03001948void qed_ll2_free(struct qed_hwfn *p_hwfn)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001949{
Tomer Tayar3587cb82017-05-21 12:10:56 +03001950 if (!p_hwfn->p_ll2_info)
1951 return;
1952
1953 kfree(p_hwfn->p_ll2_info);
1954 p_hwfn->p_ll2_info = NULL;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001955}
1956
1957static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
1958 struct qed_ptt *p_ptt,
1959 struct qed_ll2_info *p_ll2_conn,
1960 struct qed_ll2_stats *p_stats)
1961{
1962 struct core_ll2_tstorm_per_queue_stat tstats;
1963 u8 qid = p_ll2_conn->queue_id;
1964 u32 tstats_addr;
1965
1966 memset(&tstats, 0, sizeof(tstats));
1967 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1968 CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
1969 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
1970
1971 p_stats->packet_too_big_discard =
1972 HILO_64_REGPAIR(tstats.packet_too_big_discard);
1973 p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
1974}
1975
1976static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
1977 struct qed_ptt *p_ptt,
1978 struct qed_ll2_info *p_ll2_conn,
1979 struct qed_ll2_stats *p_stats)
1980{
1981 struct core_ll2_ustorm_per_queue_stat ustats;
1982 u8 qid = p_ll2_conn->queue_id;
1983 u32 ustats_addr;
1984
1985 memset(&ustats, 0, sizeof(ustats));
1986 ustats_addr = BAR0_MAP_REG_USDM_RAM +
1987 CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
1988 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
1989
1990 p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1991 p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1992 p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1993 p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1994 p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1995 p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1996}
1997
1998static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
1999 struct qed_ptt *p_ptt,
2000 struct qed_ll2_info *p_ll2_conn,
2001 struct qed_ll2_stats *p_stats)
2002{
2003 struct core_ll2_pstorm_per_queue_stat pstats;
2004 u8 stats_id = p_ll2_conn->tx_stats_id;
2005 u32 pstats_addr;
2006
2007 memset(&pstats, 0, sizeof(pstats));
2008 pstats_addr = BAR0_MAP_REG_PSDM_RAM +
2009 CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
2010 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
2011
2012 p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
2013 p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
2014 p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
2015 p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
2016 p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
2017 p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
2018}
2019
2020int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
2021 u8 connection_handle, struct qed_ll2_stats *p_stats)
2022{
2023 struct qed_ll2_info *p_ll2_conn = NULL;
2024 struct qed_ptt *p_ptt;
2025
2026 memset(p_stats, 0, sizeof(*p_stats));
2027
2028 if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
2029 !p_hwfn->p_ll2_info)
2030 return -EINVAL;
2031
2032 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
2033
2034 p_ptt = qed_ptt_acquire(p_hwfn);
2035 if (!p_ptt) {
2036 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
2037 return -EINVAL;
2038 }
2039
2040 _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2041 _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2042 if (p_ll2_conn->tx_stats_en)
2043 _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2044
2045 qed_ptt_release(p_hwfn, p_ptt);
2046 return 0;
2047}
2048
2049static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
2050 const struct qed_ll2_cb_ops *ops,
2051 void *cookie)
2052{
2053 cdev->ll2->cbs = ops;
2054 cdev->ll2->cb_cookie = cookie;
2055}
2056
2057static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
2058{
Arnd Bergmann0629a332017-01-18 15:52:52 +01002059 struct qed_ll2_conn ll2_info;
Wei Yongjun88a24282016-10-10 14:08:28 +00002060 struct qed_ll2_buffer *buffer, *tmp_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002061 enum qed_ll2_conn_type conn_type;
2062 struct qed_ptt *p_ptt;
2063 int rc, i;
Yuval Mintzfc831822016-12-01 00:21:06 -08002064 u8 gsi_enable = 1;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002065
2066 /* Initialize LL2 locks & lists */
2067 INIT_LIST_HEAD(&cdev->ll2->list);
2068 spin_lock_init(&cdev->ll2->lock);
2069 cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
2070 L1_CACHE_BYTES + params->mtu;
2071 cdev->ll2->frags_mapped = params->frags_mapped;
2072
2073 /*Allocate memory for LL2 */
2074 DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
2075 cdev->ll2->rx_size);
2076 for (i = 0; i < QED_LL2_RX_SIZE; i++) {
2077 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2078 if (!buffer) {
2079 DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
2080 goto fail;
2081 }
2082
2083 rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
2084 &buffer->phys_addr);
2085 if (rc) {
2086 kfree(buffer);
2087 goto fail;
2088 }
2089
2090 list_add_tail(&buffer->list, &cdev->ll2->list);
2091 }
2092
2093 switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
Arun Easi1e128c82017-02-15 06:28:22 -08002094 case QED_PCI_FCOE:
2095 conn_type = QED_LL2_TYPE_FCOE;
2096 gsi_enable = 0;
2097 break;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002098 case QED_PCI_ISCSI:
2099 conn_type = QED_LL2_TYPE_ISCSI;
Yuval Mintzfc831822016-12-01 00:21:06 -08002100 gsi_enable = 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002101 break;
2102 case QED_PCI_ETH_ROCE:
2103 conn_type = QED_LL2_TYPE_ROCE;
2104 break;
2105 default:
2106 conn_type = QED_LL2_TYPE_TEST;
2107 }
2108
2109 /* Prepare the temporary ll2 information */
2110 memset(&ll2_info, 0, sizeof(ll2_info));
Arnd Bergmann0629a332017-01-18 15:52:52 +01002111
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002112 ll2_info.conn_type = conn_type;
2113 ll2_info.mtu = params->mtu;
2114 ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
2115 ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
2116 ll2_info.tx_tc = 0;
2117 ll2_info.tx_dest = CORE_TX_DEST_NW;
Yuval Mintzfc831822016-12-01 00:21:06 -08002118 ll2_info.gsi_enable = gsi_enable;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002119
2120 rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info,
2121 QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
2122 &cdev->ll2->handle);
2123 if (rc) {
2124 DP_INFO(cdev, "Failed to acquire LL2 connection\n");
2125 goto fail;
2126 }
2127
2128 rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
2129 cdev->ll2->handle);
2130 if (rc) {
2131 DP_INFO(cdev, "Failed to establish LL2 connection\n");
2132 goto release_fail;
2133 }
2134
2135 /* Post all Rx buffers to FW */
2136 spin_lock_bh(&cdev->ll2->lock);
Wei Yongjun88a24282016-10-10 14:08:28 +00002137 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002138 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
2139 cdev->ll2->handle,
2140 buffer->phys_addr, 0, buffer, 1);
2141 if (rc) {
2142 DP_INFO(cdev,
2143 "Failed to post an Rx buffer; Deleting it\n");
2144 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
2145 cdev->ll2->rx_size, DMA_FROM_DEVICE);
2146 kfree(buffer->data);
2147 list_del(&buffer->list);
2148 kfree(buffer);
2149 } else {
2150 cdev->ll2->rx_cnt++;
2151 }
2152 }
2153 spin_unlock_bh(&cdev->ll2->lock);
2154
2155 if (!cdev->ll2->rx_cnt) {
2156 DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
2157 goto release_terminate;
2158 }
2159
2160 if (!is_valid_ether_addr(params->ll2_mac_address)) {
2161 DP_INFO(cdev, "Invalid Ethernet address\n");
2162 goto release_terminate;
2163 }
2164
Yuval Mintz1d6cff42016-12-01 00:21:07 -08002165 if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
2166 cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
2167 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
2168 rc = qed_ll2_start_ooo(cdev, params);
2169 if (rc) {
2170 DP_INFO(cdev,
2171 "Failed to initialize the OOO LL2 queue\n");
2172 goto release_terminate;
2173 }
2174 }
2175
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002176 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2177 if (!p_ptt) {
2178 DP_INFO(cdev, "Failed to acquire PTT\n");
2179 goto release_terminate;
2180 }
2181
2182 rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2183 params->ll2_mac_address);
2184 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2185 if (rc) {
2186 DP_ERR(cdev, "Failed to allocate LLH filter\n");
2187 goto release_terminate_all;
2188 }
2189
2190 ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002191 return 0;
2192
2193release_terminate_all:
2194
2195release_terminate:
2196 qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2197release_fail:
2198 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2199fail:
2200 qed_ll2_kill_buffers(cdev);
2201 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2202 return -EINVAL;
2203}
2204
2205static int qed_ll2_stop(struct qed_dev *cdev)
2206{
2207 struct qed_ptt *p_ptt;
2208 int rc;
2209
2210 if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
2211 return 0;
2212
2213 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2214 if (!p_ptt) {
2215 DP_INFO(cdev, "Failed to acquire PTT\n");
2216 goto fail;
2217 }
2218
2219 qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2220 cdev->ll2_mac_address);
2221 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2222 eth_zero_addr(cdev->ll2_mac_address);
2223
Yuval Mintz1d6cff42016-12-01 00:21:07 -08002224 if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
2225 cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
2226 qed_ll2_stop_ooo(cdev);
2227
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002228 rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
2229 cdev->ll2->handle);
2230 if (rc)
2231 DP_INFO(cdev, "Failed to terminate LL2 connection\n");
2232
2233 qed_ll2_kill_buffers(cdev);
2234
2235 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2236 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2237
2238 return rc;
2239fail:
2240 return -EINVAL;
2241}
2242
2243static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
2244{
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03002245 struct qed_ll2_tx_pkt_info pkt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002246 const skb_frag_t *frag;
2247 int rc = -EINVAL, i;
2248 dma_addr_t mapping;
2249 u16 vlan = 0;
2250 u8 flags = 0;
2251
2252 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
2253 DP_INFO(cdev, "Cannot transmit a checksumed packet\n");
2254 return -EINVAL;
2255 }
2256
2257 if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
2258 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
2259 1 + skb_shinfo(skb)->nr_frags);
2260 return -EINVAL;
2261 }
2262
2263 mapping = dma_map_single(&cdev->pdev->dev, skb->data,
2264 skb->len, DMA_TO_DEVICE);
2265 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2266 DP_NOTICE(cdev, "SKB mapping failed\n");
2267 return -EINVAL;
2268 }
2269
2270 /* Request HW to calculate IP csum */
2271 if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
2272 ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002273 flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002274
2275 if (skb_vlan_tag_present(skb)) {
2276 vlan = skb_vlan_tag_get(skb);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002277 flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002278 }
2279
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03002280 memset(&pkt, 0, sizeof(pkt));
2281 pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags;
2282 pkt.vlan = vlan;
2283 pkt.bd_flags = flags;
2284 pkt.tx_dest = QED_LL2_TX_DEST_NW;
2285 pkt.first_frag = mapping;
2286 pkt.first_frag_len = skb->len;
2287 pkt.cookie = skb;
2288
2289 rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
2290 &pkt, 1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002291 if (rc)
2292 goto err;
2293
2294 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2295 frag = &skb_shinfo(skb)->frags[i];
2296 if (!cdev->ll2->frags_mapped) {
2297 mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
2298 skb_frag_size(frag),
2299 DMA_TO_DEVICE);
2300
2301 if (unlikely(dma_mapping_error(&cdev->pdev->dev,
2302 mapping))) {
2303 DP_NOTICE(cdev,
2304 "Unable to map frag - dropping packet\n");
Pan Bian0ff18d22016-12-04 13:53:53 +08002305 rc = -ENOMEM;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002306 goto err;
2307 }
2308 } else {
2309 mapping = page_to_phys(skb_frag_page(frag)) |
2310 frag->page_offset;
2311 }
2312
2313 rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
2314 cdev->ll2->handle,
2315 mapping,
2316 skb_frag_size(frag));
2317
2318 /* if failed not much to do here, partial packet has been posted
2319 * we can't free memory, will need to wait for completion.
2320 */
2321 if (rc)
2322 goto err2;
2323 }
2324
2325 return 0;
2326
2327err:
2328 dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
2329
2330err2:
2331 return rc;
2332}
2333
2334static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
2335{
2336 if (!cdev->ll2)
2337 return -EINVAL;
2338
2339 return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
2340 cdev->ll2->handle, stats);
2341}
2342
2343const struct qed_ll2_ops qed_ll2_ops_pass = {
2344 .start = &qed_ll2_start,
2345 .stop = &qed_ll2_stop,
2346 .start_xmit = &qed_ll2_start_xmit,
2347 .register_cb_ops = &qed_ll2_register_cb_ops,
2348 .get_stats = &qed_ll2_stats,
2349};
2350
2351int qed_ll2_alloc_if(struct qed_dev *cdev)
2352{
2353 cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
2354 return cdev->ll2 ? 0 : -ENOMEM;
2355}
2356
2357void qed_ll2_dealloc_if(struct qed_dev *cdev)
2358{
2359 kfree(cdev->ll2);
2360 cdev->ll2 = NULL;
2361}