blob: 4a78294d6481fb0f3da79ebeedefb6f0be9ade99 [file] [log] [blame]
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001/* QLogic qed NIC Driver
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02002 * Copyright (c) 2015-2017 QLogic Corporation
Yuval Mintz0a7fb112016-10-01 21:59:55 +03003 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
Yuval Mintz0a7fb112016-10-01 21:59:55 +03009 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +020010 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Yuval Mintz0a7fb112016-10-01 21:59:55 +030031 */
32
33#include <linux/types.h>
34#include <asm/byteorder.h>
35#include <linux/dma-mapping.h>
36#include <linux/if_vlan.h>
37#include <linux/kernel.h>
38#include <linux/pci.h>
39#include <linux/slab.h>
40#include <linux/stddef.h>
Yuval Mintz0a7fb112016-10-01 21:59:55 +030041#include <linux/workqueue.h>
42#include <net/ipv6.h>
43#include <linux/bitops.h>
44#include <linux/delay.h>
45#include <linux/errno.h>
46#include <linux/etherdevice.h>
47#include <linux/io.h>
48#include <linux/list.h>
49#include <linux/mutex.h>
50#include <linux/spinlock.h>
51#include <linux/string.h>
52#include <linux/qed/qed_ll2_if.h>
53#include "qed.h"
54#include "qed_cxt.h"
55#include "qed_dev_api.h"
56#include "qed_hsi.h"
57#include "qed_hw.h"
58#include "qed_int.h"
59#include "qed_ll2.h"
60#include "qed_mcp.h"
Yuval Mintz1d6cff42016-12-01 00:21:07 -080061#include "qed_ooo.h"
Yuval Mintz0a7fb112016-10-01 21:59:55 +030062#include "qed_reg_addr.h"
63#include "qed_sp.h"
Kalderon, Michalb71b9af2017-06-21 16:22:45 +030064#include "qed_rdma.h"
Yuval Mintz0a7fb112016-10-01 21:59:55 +030065
66#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
67#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
68
69#define QED_LL2_TX_SIZE (256)
70#define QED_LL2_RX_SIZE (4096)
71
72struct qed_cb_ll2_info {
73 int rx_cnt;
74 u32 rx_size;
75 u8 handle;
Yuval Mintz0a7fb112016-10-01 21:59:55 +030076
77 /* Lock protecting LL2 buffer lists in sleepless context */
78 spinlock_t lock;
79 struct list_head list;
80
81 const struct qed_ll2_cb_ops *cbs;
82 void *cb_cookie;
83};
84
85struct qed_ll2_buffer {
86 struct list_head list;
87 void *data;
88 dma_addr_t phys_addr;
89};
90
Michal Kalderon0518c122017-06-09 17:13:22 +030091static void qed_ll2b_complete_tx_packet(void *cxt,
Yuval Mintz0a7fb112016-10-01 21:59:55 +030092 u8 connection_handle,
93 void *cookie,
94 dma_addr_t first_frag_addr,
95 bool b_last_fragment,
96 bool b_last_packet)
97{
Michal Kalderon0518c122017-06-09 17:13:22 +030098 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +030099 struct qed_dev *cdev = p_hwfn->cdev;
100 struct sk_buff *skb = cookie;
101
102 /* All we need to do is release the mapping */
103 dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
104 skb_headlen(skb), DMA_TO_DEVICE);
105
106 if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
107 cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
108 b_last_fragment);
109
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300110 dev_kfree_skb_any(skb);
111}
112
113static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
114 u8 **data, dma_addr_t *phys_addr)
115{
116 *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
117 if (!(*data)) {
118 DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
119 return -ENOMEM;
120 }
121
122 *phys_addr = dma_map_single(&cdev->pdev->dev,
123 ((*data) + NET_SKB_PAD),
124 cdev->ll2->rx_size, DMA_FROM_DEVICE);
125 if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
126 DP_INFO(cdev, "Failed to map LL2 buffer data\n");
127 kfree((*data));
128 return -ENOMEM;
129 }
130
131 return 0;
132}
133
134static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
135 struct qed_ll2_buffer *buffer)
136{
137 spin_lock_bh(&cdev->ll2->lock);
138
139 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
140 cdev->ll2->rx_size, DMA_FROM_DEVICE);
141 kfree(buffer->data);
142 list_del(&buffer->list);
143
144 cdev->ll2->rx_cnt--;
145 if (!cdev->ll2->rx_cnt)
146 DP_INFO(cdev, "All LL2 entries were removed\n");
147
148 spin_unlock_bh(&cdev->ll2->lock);
149
150 return 0;
151}
152
153static void qed_ll2_kill_buffers(struct qed_dev *cdev)
154{
155 struct qed_ll2_buffer *buffer, *tmp_buffer;
156
157 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
158 qed_ll2_dealloc_buffer(cdev, buffer);
159}
160
Michal Kalderon0518c122017-06-09 17:13:22 +0300161void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300162{
Michal Kalderon0518c122017-06-09 17:13:22 +0300163 struct qed_hwfn *p_hwfn = cxt;
Mintz, Yuval68be9102017-06-09 17:13:19 +0300164 struct qed_ll2_buffer *buffer = data->cookie;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300165 struct qed_dev *cdev = p_hwfn->cdev;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300166 dma_addr_t new_phys_addr;
167 struct sk_buff *skb;
168 bool reuse = false;
169 int rc = -EINVAL;
170 u8 *new_data;
171
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300172 DP_VERBOSE(p_hwfn,
173 (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
174 "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
Mintz, Yuval68be9102017-06-09 17:13:19 +0300175 (u64)data->rx_buf_addr,
176 data->u.placement_offset,
177 data->length.packet_length,
178 data->parse_flags,
179 data->vlan, data->opaque_data_0, data->opaque_data_1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300180
181 if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
182 print_hex_dump(KERN_INFO, "",
183 DUMP_PREFIX_OFFSET, 16, 1,
Mintz, Yuval68be9102017-06-09 17:13:19 +0300184 buffer->data, data->length.packet_length, false);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300185 }
186
187 /* Determine if data is valid */
Mintz, Yuval68be9102017-06-09 17:13:19 +0300188 if (data->length.packet_length < ETH_HLEN)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300189 reuse = true;
190
191 /* Allocate a replacement for buffer; Reuse upon failure */
192 if (!reuse)
193 rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
194 &new_phys_addr);
195
196 /* If need to reuse or there's no replacement buffer, repost this */
197 if (rc)
198 goto out_post;
Mintz, Yuval752ecb22017-03-14 15:26:00 +0200199 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
200 cdev->ll2->rx_size, DMA_FROM_DEVICE);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300201
202 skb = build_skb(buffer->data, 0);
203 if (!skb) {
204 rc = -ENOMEM;
205 goto out_post;
206 }
207
Mintz, Yuval68be9102017-06-09 17:13:19 +0300208 data->u.placement_offset += NET_SKB_PAD;
209 skb_reserve(skb, data->u.placement_offset);
210 skb_put(skb, data->length.packet_length);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300211 skb_checksum_none_assert(skb);
212
213 /* Get parital ethernet information instead of eth_type_trans(),
214 * Since we don't have an associated net_device.
215 */
216 skb_reset_mac_header(skb);
217 skb->protocol = eth_hdr(skb)->h_proto;
218
219 /* Pass SKB onward */
220 if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
Mintz, Yuval68be9102017-06-09 17:13:19 +0300221 if (data->vlan)
222 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
223 data->vlan);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300224 cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
Mintz, Yuval68be9102017-06-09 17:13:19 +0300225 data->opaque_data_0,
226 data->opaque_data_1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300227 }
228
229 /* Update Buffer information and update FW producer */
230 buffer->data = new_data;
231 buffer->phys_addr = new_phys_addr;
232
233out_post:
234 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
235 buffer->phys_addr, 0, buffer, 1);
236
237 if (rc)
238 qed_ll2_dealloc_buffer(cdev, buffer);
239}
240
241static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
242 u8 connection_handle,
243 bool b_lock,
244 bool b_only_active)
245{
246 struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
247
248 if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
249 return NULL;
250
251 if (!p_hwfn->p_ll2_info)
252 return NULL;
253
254 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
255
256 if (b_only_active) {
257 if (b_lock)
258 mutex_lock(&p_ll2_conn->mutex);
259 if (p_ll2_conn->b_active)
260 p_ret = p_ll2_conn;
261 if (b_lock)
262 mutex_unlock(&p_ll2_conn->mutex);
263 } else {
264 p_ret = p_ll2_conn;
265 }
266
267 return p_ret;
268}
269
270static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
271 u8 connection_handle)
272{
273 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
274}
275
276static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
277 u8 connection_handle)
278{
279 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
280}
281
282static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
283 *p_hwfn,
284 u8 connection_handle)
285{
286 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
287}
288
289static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
290{
291 bool b_last_packet = false, b_last_frag = false;
292 struct qed_ll2_tx_packet *p_pkt = NULL;
293 struct qed_ll2_info *p_ll2_conn;
294 struct qed_ll2_tx_queue *p_tx;
Michal Kalderon6291c602018-05-16 14:44:39 +0300295 unsigned long flags = 0;
Ram Amraniabd49672016-10-01 22:00:01 +0300296 dma_addr_t tx_frag;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300297
298 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
299 if (!p_ll2_conn)
300 return;
301
302 p_tx = &p_ll2_conn->tx_queue;
303
Michal Kalderon6291c602018-05-16 14:44:39 +0300304 spin_lock_irqsave(&p_tx->lock, flags);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300305 while (!list_empty(&p_tx->active_descq)) {
306 p_pkt = list_first_entry(&p_tx->active_descq,
307 struct qed_ll2_tx_packet, list_entry);
308 if (!p_pkt)
309 break;
310
311 list_del(&p_pkt->list_entry);
312 b_last_packet = list_empty(&p_tx->active_descq);
313 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
Michal Kalderon6291c602018-05-16 14:44:39 +0300314 spin_unlock_irqrestore(&p_tx->lock, flags);
Kalderon, Michal526d1d02017-07-02 10:29:23 +0300315 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800316 struct qed_ooo_buffer *p_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300317
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800318 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
319 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
320 p_buffer);
321 } else {
322 p_tx->cur_completing_packet = *p_pkt;
323 p_tx->cur_completing_bd_idx = 1;
324 b_last_frag =
325 p_tx->cur_completing_bd_idx == p_pkt->bd_used;
326 tx_frag = p_pkt->bds_set[0].tx_frag;
Michal Kalderon0518c122017-06-09 17:13:22 +0300327 p_ll2_conn->cbs.tx_release_cb(p_ll2_conn->cbs.cookie,
328 p_ll2_conn->my_id,
329 p_pkt->cookie,
330 tx_frag,
331 b_last_frag,
332 b_last_packet);
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800333 }
Michal Kalderon6291c602018-05-16 14:44:39 +0300334 spin_lock_irqsave(&p_tx->lock, flags);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300335 }
Michal Kalderon6291c602018-05-16 14:44:39 +0300336 spin_unlock_irqrestore(&p_tx->lock, flags);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300337}
338
339static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
340{
341 struct qed_ll2_info *p_ll2_conn = p_cookie;
342 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
343 u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
344 struct qed_ll2_tx_packet *p_pkt;
345 bool b_last_frag = false;
346 unsigned long flags;
347 int rc = -EINVAL;
348
349 spin_lock_irqsave(&p_tx->lock, flags);
350 if (p_tx->b_completing_packet) {
351 rc = -EBUSY;
352 goto out;
353 }
354
355 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
356 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
357 while (num_bds) {
358 if (list_empty(&p_tx->active_descq))
359 goto out;
360
361 p_pkt = list_first_entry(&p_tx->active_descq,
362 struct qed_ll2_tx_packet, list_entry);
363 if (!p_pkt)
364 goto out;
365
366 p_tx->b_completing_packet = true;
367 p_tx->cur_completing_packet = *p_pkt;
368 num_bds_in_packet = p_pkt->bd_used;
369 list_del(&p_pkt->list_entry);
370
371 if (num_bds < num_bds_in_packet) {
372 DP_NOTICE(p_hwfn,
373 "Rest of BDs does not cover whole packet\n");
374 goto out;
375 }
376
377 num_bds -= num_bds_in_packet;
378 p_tx->bds_idx += num_bds_in_packet;
379 while (num_bds_in_packet--)
380 qed_chain_consume(&p_tx->txq_chain);
381
382 p_tx->cur_completing_bd_idx = 1;
383 b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
384 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
385
386 spin_unlock_irqrestore(&p_tx->lock, flags);
Michal Kalderon0518c122017-06-09 17:13:22 +0300387
388 p_ll2_conn->cbs.tx_comp_cb(p_ll2_conn->cbs.cookie,
389 p_ll2_conn->my_id,
390 p_pkt->cookie,
391 p_pkt->bds_set[0].tx_frag,
392 b_last_frag, !num_bds);
393
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300394 spin_lock_irqsave(&p_tx->lock, flags);
395 }
396
397 p_tx->b_completing_packet = false;
398 rc = 0;
399out:
400 spin_unlock_irqrestore(&p_tx->lock, flags);
401 return rc;
402}
403
Michal Kalderon0518c122017-06-09 17:13:22 +0300404static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn,
405 union core_rx_cqe_union *p_cqe,
406 struct qed_ll2_comp_rx_data *data)
Ram Amraniabd49672016-10-01 22:00:01 +0300407{
Michal Kalderon0518c122017-06-09 17:13:22 +0300408 data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
409 data->length.data_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
410 data->vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
411 data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
412 data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
413 data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error;
Tomer Tayarda090912017-12-27 19:30:07 +0200414 data->qp_id = le16_to_cpu(p_cqe->rx_cqe_gsi.qp_id);
415
416 data->src_qp = le32_to_cpu(p_cqe->rx_cqe_gsi.src_qp);
Ram Amraniabd49672016-10-01 22:00:01 +0300417}
418
Mintz, Yuval68be9102017-06-09 17:13:19 +0300419static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
420 union core_rx_cqe_union *p_cqe,
421 struct qed_ll2_comp_rx_data *data)
422{
423 data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_fp.parse_flags.flags);
Michal Kalderon1e99c492017-09-24 12:09:45 +0300424 data->err_flags = le16_to_cpu(p_cqe->rx_cqe_fp.err_flags.flags);
Mintz, Yuval68be9102017-06-09 17:13:19 +0300425 data->length.packet_length =
426 le16_to_cpu(p_cqe->rx_cqe_fp.packet_length);
427 data->vlan = le16_to_cpu(p_cqe->rx_cqe_fp.vlan);
428 data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[0]);
429 data->opaque_data_1 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[1]);
430 data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset;
431}
432
433static int
Michal Kalderon6f34a282017-10-09 12:37:48 +0300434qed_ll2_handle_slowpath(struct qed_hwfn *p_hwfn,
435 struct qed_ll2_info *p_ll2_conn,
436 union core_rx_cqe_union *p_cqe,
437 unsigned long *p_lock_flags)
438{
439 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
440 struct core_rx_slow_path_cqe *sp_cqe;
441
442 sp_cqe = &p_cqe->rx_cqe_sp;
443 if (sp_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) {
444 DP_NOTICE(p_hwfn,
445 "LL2 - unexpected Rx CQE slowpath ramrod_cmd_id:%d\n",
446 sp_cqe->ramrod_cmd_id);
447 return -EINVAL;
448 }
449
450 if (!p_ll2_conn->cbs.slowpath_cb) {
451 DP_NOTICE(p_hwfn,
452 "LL2 - received RX_QUEUE_FLUSH but no callback was provided\n");
453 return -EINVAL;
454 }
455
456 spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
457
458 p_ll2_conn->cbs.slowpath_cb(p_ll2_conn->cbs.cookie,
459 p_ll2_conn->my_id,
460 le32_to_cpu(sp_cqe->opaque_data.data[0]),
461 le32_to_cpu(sp_cqe->opaque_data.data[1]));
462
463 spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
464
465 return 0;
466}
467
468static int
Mintz, Yuval68be9102017-06-09 17:13:19 +0300469qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
470 struct qed_ll2_info *p_ll2_conn,
471 union core_rx_cqe_union *p_cqe,
472 unsigned long *p_lock_flags, bool b_last_cqe)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300473{
474 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
475 struct qed_ll2_rx_packet *p_pkt = NULL;
Mintz, Yuval68be9102017-06-09 17:13:19 +0300476 struct qed_ll2_comp_rx_data data;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300477
478 if (!list_empty(&p_rx->active_descq))
479 p_pkt = list_first_entry(&p_rx->active_descq,
480 struct qed_ll2_rx_packet, list_entry);
481 if (!p_pkt) {
482 DP_NOTICE(p_hwfn,
Mintz, Yuval68be9102017-06-09 17:13:19 +0300483 "[%d] LL2 Rx completion but active_descq is empty\n",
Mintz, Yuval13c54772017-06-09 17:13:20 +0300484 p_ll2_conn->input.conn_type);
Mintz, Yuval68be9102017-06-09 17:13:19 +0300485
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300486 return -EIO;
487 }
488 list_del(&p_pkt->list_entry);
489
Michal Kalderon0518c122017-06-09 17:13:22 +0300490 if (p_cqe->rx_cqe_sp.type == CORE_RX_CQE_TYPE_REGULAR)
491 qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data);
492 else
493 qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300494 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
495 DP_NOTICE(p_hwfn,
496 "Mismatch between active_descq and the LL2 Rx chain\n");
Mintz, Yuval68be9102017-06-09 17:13:19 +0300497
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300498 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
499
Mintz, Yuval68be9102017-06-09 17:13:19 +0300500 data.connection_handle = p_ll2_conn->my_id;
501 data.cookie = p_pkt->cookie;
502 data.rx_buf_addr = p_pkt->rx_buf_addr;
503 data.b_last_packet = b_last_cqe;
504
Ram Amrani1df2ade2017-03-14 15:26:02 +0200505 spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
Michal Kalderon0518c122017-06-09 17:13:22 +0300506 p_ll2_conn->cbs.rx_comp_cb(p_ll2_conn->cbs.cookie, &data);
507
Ram Amrani1df2ade2017-03-14 15:26:02 +0200508 spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300509
510 return 0;
511}
512
513static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
514{
Mintz, Yuval13c54772017-06-09 17:13:20 +0300515 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)cookie;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300516 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
517 union core_rx_cqe_union *cqe = NULL;
518 u16 cq_new_idx = 0, cq_old_idx = 0;
519 unsigned long flags = 0;
520 int rc = 0;
521
522 spin_lock_irqsave(&p_rx->lock, flags);
523 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
524 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
525
526 while (cq_new_idx != cq_old_idx) {
527 bool b_last_cqe = (cq_new_idx == cq_old_idx);
528
Mintz, Yuval13c54772017-06-09 17:13:20 +0300529 cqe =
530 (union core_rx_cqe_union *)
531 qed_chain_consume(&p_rx->rcq_chain);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300532 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
533
534 DP_VERBOSE(p_hwfn,
535 QED_MSG_LL2,
536 "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
537 cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
538
539 switch (cqe->rx_cqe_sp.type) {
540 case CORE_RX_CQE_TYPE_SLOW_PATH:
Michal Kalderon6f34a282017-10-09 12:37:48 +0300541 rc = qed_ll2_handle_slowpath(p_hwfn, p_ll2_conn,
542 cqe, &flags);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300543 break;
Ram Amraniabd49672016-10-01 22:00:01 +0300544 case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300545 case CORE_RX_CQE_TYPE_REGULAR:
Mintz, Yuval68be9102017-06-09 17:13:19 +0300546 rc = qed_ll2_rxq_handle_completion(p_hwfn, p_ll2_conn,
547 cqe, &flags,
548 b_last_cqe);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300549 break;
550 default:
551 rc = -EIO;
552 }
553 }
554
555 spin_unlock_irqrestore(&p_rx->lock, flags);
556 return rc;
557}
558
Yuval Mintz8c93bea2016-10-13 22:57:03 +0300559static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300560{
561 struct qed_ll2_info *p_ll2_conn = NULL;
562 struct qed_ll2_rx_packet *p_pkt = NULL;
563 struct qed_ll2_rx_queue *p_rx;
Michal Kalderon6291c602018-05-16 14:44:39 +0300564 unsigned long flags = 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300565
566 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
567 if (!p_ll2_conn)
568 return;
569
570 p_rx = &p_ll2_conn->rx_queue;
571
Michal Kalderon6291c602018-05-16 14:44:39 +0300572 spin_lock_irqsave(&p_rx->lock, flags);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300573 while (!list_empty(&p_rx->active_descq)) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300574 p_pkt = list_first_entry(&p_rx->active_descq,
575 struct qed_ll2_rx_packet, list_entry);
576 if (!p_pkt)
577 break;
Wei Yongjunb4f0fd42016-10-17 15:17:51 +0000578 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
Michal Kalderon6291c602018-05-16 14:44:39 +0300579 spin_unlock_irqrestore(&p_rx->lock, flags);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300580
Kalderon, Michal526d1d02017-07-02 10:29:23 +0300581 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800582 struct qed_ooo_buffer *p_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300583
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800584 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
585 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
586 p_buffer);
587 } else {
Mintz, Yuval54f19f02017-06-09 17:13:24 +0300588 dma_addr_t rx_buf_addr = p_pkt->rx_buf_addr;
589 void *cookie = p_pkt->cookie;
590 bool b_last;
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800591
592 b_last = list_empty(&p_rx->active_descq);
Mintz, Yuval54f19f02017-06-09 17:13:24 +0300593 p_ll2_conn->cbs.rx_release_cb(p_ll2_conn->cbs.cookie,
594 p_ll2_conn->my_id,
595 cookie,
596 rx_buf_addr, b_last);
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800597 }
Michal Kalderon6291c602018-05-16 14:44:39 +0300598 spin_lock_irqsave(&p_rx->lock, flags);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300599 }
Michal Kalderon6291c602018-05-16 14:44:39 +0300600 spin_unlock_irqrestore(&p_rx->lock, flags);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300601}
602
Michal Kalderon974f6c02018-05-16 14:44:38 +0300603static bool
604qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn,
605 struct core_rx_slow_path_cqe *p_cqe)
606{
607 struct ooo_opaque *iscsi_ooo;
608 u32 cid;
609
610 if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH)
611 return false;
612
613 iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data;
614 if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES)
615 return false;
616
617 /* Need to make a flush */
618 cid = le32_to_cpu(iscsi_ooo->cid);
619 qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid);
620
621 return true;
622}
623
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800624static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
625 struct qed_ll2_info *p_ll2_conn)
626{
627 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
628 u16 packet_length = 0, parse_flags = 0, vlan = 0;
629 struct qed_ll2_rx_packet *p_pkt = NULL;
630 u32 num_ooo_add_to_peninsula = 0, cid;
631 union core_rx_cqe_union *cqe = NULL;
632 u16 cq_new_idx = 0, cq_old_idx = 0;
633 struct qed_ooo_buffer *p_buffer;
634 struct ooo_opaque *iscsi_ooo;
635 u8 placement_offset = 0;
636 u8 cqe_type;
637
638 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
639 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
640 if (cq_new_idx == cq_old_idx)
641 return 0;
642
643 while (cq_new_idx != cq_old_idx) {
644 struct core_rx_fast_path_cqe *p_cqe_fp;
645
646 cqe = qed_chain_consume(&p_rx->rcq_chain);
647 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
648 cqe_type = cqe->rx_cqe_sp.type;
649
Michal Kalderon974f6c02018-05-16 14:44:38 +0300650 if (cqe_type == CORE_RX_CQE_TYPE_SLOW_PATH)
651 if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn,
652 &cqe->rx_cqe_sp))
653 continue;
654
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800655 if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
656 DP_NOTICE(p_hwfn,
657 "Got a non-regular LB LL2 completion [type 0x%02x]\n",
658 cqe_type);
659 return -EINVAL;
660 }
661 p_cqe_fp = &cqe->rx_cqe_fp;
662
663 placement_offset = p_cqe_fp->placement_offset;
664 parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
665 packet_length = le16_to_cpu(p_cqe_fp->packet_length);
666 vlan = le16_to_cpu(p_cqe_fp->vlan);
667 iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
668 qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
669 iscsi_ooo);
670 cid = le32_to_cpu(iscsi_ooo->cid);
671
672 /* Process delete isle first */
673 if (iscsi_ooo->drop_size)
674 qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
675 iscsi_ooo->drop_isle,
676 iscsi_ooo->drop_size);
677
678 if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
679 continue;
680
681 /* Now process create/add/join isles */
682 if (list_empty(&p_rx->active_descq)) {
683 DP_NOTICE(p_hwfn,
684 "LL2 OOO RX chain has no submitted buffers\n"
685 );
686 return -EIO;
687 }
688
689 p_pkt = list_first_entry(&p_rx->active_descq,
690 struct qed_ll2_rx_packet, list_entry);
691
692 if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
693 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
694 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
695 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
696 (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
697 if (!p_pkt) {
698 DP_NOTICE(p_hwfn,
699 "LL2 OOO RX packet is not valid\n");
700 return -EIO;
701 }
702 list_del(&p_pkt->list_entry);
703 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
704 p_buffer->packet_length = packet_length;
705 p_buffer->parse_flags = parse_flags;
706 p_buffer->vlan = vlan;
707 p_buffer->placement_offset = placement_offset;
708 qed_chain_consume(&p_rx->rxq_chain);
709 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
710
711 switch (iscsi_ooo->ooo_opcode) {
712 case TCP_EVENT_ADD_NEW_ISLE:
713 qed_ooo_add_new_isle(p_hwfn,
714 p_hwfn->p_ooo_info,
715 cid,
716 iscsi_ooo->ooo_isle,
717 p_buffer);
718 break;
719 case TCP_EVENT_ADD_ISLE_RIGHT:
720 qed_ooo_add_new_buffer(p_hwfn,
721 p_hwfn->p_ooo_info,
722 cid,
723 iscsi_ooo->ooo_isle,
724 p_buffer,
725 QED_OOO_RIGHT_BUF);
726 break;
727 case TCP_EVENT_ADD_ISLE_LEFT:
728 qed_ooo_add_new_buffer(p_hwfn,
729 p_hwfn->p_ooo_info,
730 cid,
731 iscsi_ooo->ooo_isle,
732 p_buffer,
733 QED_OOO_LEFT_BUF);
734 break;
735 case TCP_EVENT_JOIN:
736 qed_ooo_add_new_buffer(p_hwfn,
737 p_hwfn->p_ooo_info,
738 cid,
739 iscsi_ooo->ooo_isle +
740 1,
741 p_buffer,
742 QED_OOO_LEFT_BUF);
743 qed_ooo_join_isles(p_hwfn,
744 p_hwfn->p_ooo_info,
745 cid, iscsi_ooo->ooo_isle);
746 break;
747 case TCP_EVENT_ADD_PEN:
748 num_ooo_add_to_peninsula++;
749 qed_ooo_put_ready_buffer(p_hwfn,
750 p_hwfn->p_ooo_info,
751 p_buffer, true);
752 break;
753 }
754 } else {
755 DP_NOTICE(p_hwfn,
756 "Unexpected event (%d) TX OOO completion\n",
757 iscsi_ooo->ooo_opcode);
758 }
759 }
760
761 return 0;
762}
763
764static void
765qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
766 struct qed_ll2_info *p_ll2_conn)
767{
Mintz, Yuval7c7973b2017-06-09 17:13:18 +0300768 struct qed_ll2_tx_pkt_info tx_pkt;
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800769 struct qed_ooo_buffer *p_buffer;
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800770 u16 l4_hdr_offset_w;
771 dma_addr_t first_frag;
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800772 u8 bd_flags;
Mintz, Yuval7c7973b2017-06-09 17:13:18 +0300773 int rc;
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800774
775 /* Submit Tx buffers here */
776 while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
777 p_hwfn->p_ooo_info))) {
778 l4_hdr_offset_w = 0;
779 bd_flags = 0;
780
781 first_frag = p_buffer->rx_buffer_phys_addr +
782 p_buffer->placement_offset;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200783 SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
784 SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800785
Mintz, Yuval7c7973b2017-06-09 17:13:18 +0300786 memset(&tx_pkt, 0, sizeof(tx_pkt));
787 tx_pkt.num_of_bds = 1;
788 tx_pkt.vlan = p_buffer->vlan;
789 tx_pkt.bd_flags = bd_flags;
790 tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w;
Mintz, Yuval13c54772017-06-09 17:13:20 +0300791 tx_pkt.tx_dest = p_ll2_conn->tx_dest;
Mintz, Yuval7c7973b2017-06-09 17:13:18 +0300792 tx_pkt.first_frag = first_frag;
793 tx_pkt.first_frag_len = p_buffer->packet_length;
794 tx_pkt.cookie = p_buffer;
795
796 rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id,
797 &tx_pkt, true);
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800798 if (rc) {
799 qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
800 p_buffer, false);
801 break;
802 }
803 }
804}
805
806static void
807qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn,
808 struct qed_ll2_info *p_ll2_conn)
809{
810 struct qed_ooo_buffer *p_buffer;
811 int rc;
812
813 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
814 p_hwfn->p_ooo_info))) {
815 rc = qed_ll2_post_rx_buffer(p_hwfn,
816 p_ll2_conn->my_id,
817 p_buffer->rx_buffer_phys_addr,
818 0, p_buffer, true);
819 if (rc) {
820 qed_ooo_put_free_buffer(p_hwfn,
821 p_hwfn->p_ooo_info, p_buffer);
822 break;
823 }
824 }
825}
826
827static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
828{
829 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
830 int rc;
831
832 rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
833 if (rc)
834 return rc;
835
836 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
837 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
838
839 return 0;
840}
841
842static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
843{
844 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
845 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
846 struct qed_ll2_tx_packet *p_pkt = NULL;
847 struct qed_ooo_buffer *p_buffer;
848 bool b_dont_submit_rx = false;
849 u16 new_idx = 0, num_bds = 0;
850 int rc;
851
852 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
853 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
854
855 if (!num_bds)
856 return 0;
857
858 while (num_bds) {
859 if (list_empty(&p_tx->active_descq))
860 return -EINVAL;
861
862 p_pkt = list_first_entry(&p_tx->active_descq,
863 struct qed_ll2_tx_packet, list_entry);
864 if (!p_pkt)
865 return -EINVAL;
866
867 if (p_pkt->bd_used != 1) {
868 DP_NOTICE(p_hwfn,
869 "Unexpectedly many BDs(%d) in TX OOO completion\n",
870 p_pkt->bd_used);
871 return -EINVAL;
872 }
873
874 list_del(&p_pkt->list_entry);
875
876 num_bds--;
877 p_tx->bds_idx++;
878 qed_chain_consume(&p_tx->txq_chain);
879
880 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
881 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
882
883 if (b_dont_submit_rx) {
884 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
885 p_buffer);
886 continue;
887 }
888
889 rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
890 p_buffer->rx_buffer_phys_addr, 0,
891 p_buffer, true);
892 if (rc != 0) {
893 qed_ooo_put_free_buffer(p_hwfn,
894 p_hwfn->p_ooo_info, p_buffer);
895 b_dont_submit_rx = true;
896 }
897 }
898
899 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
900
901 return 0;
902}
903
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800904static void qed_ll2_stop_ooo(struct qed_dev *cdev)
905{
906 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
907 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
908
909 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n",
910 *handle);
911
912 qed_ll2_terminate_connection(hwfn, *handle);
913 qed_ll2_release_connection(hwfn, *handle);
914 *handle = QED_LL2_UNUSED_HANDLE;
915}
916
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300917static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
918 struct qed_ll2_info *p_ll2_conn,
919 u8 action_on_error)
920{
Mintz, Yuval13c54772017-06-09 17:13:20 +0300921 enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300922 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
923 struct core_rx_start_ramrod_data *p_ramrod = NULL;
924 struct qed_spq_entry *p_ent = NULL;
925 struct qed_sp_init_data init_data;
926 u16 cqe_pbl_size;
927 int rc = 0;
928
929 /* Get SPQ entry */
930 memset(&init_data, 0, sizeof(init_data));
931 init_data.cid = p_ll2_conn->cid;
932 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
933 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
934
935 rc = qed_sp_init_request(p_hwfn, &p_ent,
936 CORE_RAMROD_RX_QUEUE_START,
937 PROTOCOLID_CORE, &init_data);
938 if (rc)
939 return rc;
940
941 p_ramrod = &p_ent->ramrod.core_rx_queue_start;
942
943 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
944 p_ramrod->sb_index = p_rx->rx_sb_index;
945 p_ramrod->complete_event_flg = 1;
946
Mintz, Yuval13c54772017-06-09 17:13:20 +0300947 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
948 DMA_REGPAIR_LE(p_ramrod->bd_base, p_rx->rxq_chain.p_phys_addr);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300949 cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
950 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
951 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
952 qed_chain_get_pbl_phys(&p_rx->rcq_chain));
953
Mintz, Yuval13c54772017-06-09 17:13:20 +0300954 p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
Tomer Tayarda090912017-12-27 19:30:07 +0200955 p_ramrod->inner_vlan_stripping_en =
956 p_ll2_conn->input.rx_vlan_removal_en;
Sudarsana Reddy Kallurucac6f692018-05-05 18:43:02 -0700957
958 if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
959 p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE)
960 p_ramrod->report_outer_vlan = 1;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300961 p_ramrod->queue_id = p_ll2_conn->queue_id;
Michal Kalderoned468eb2017-10-09 12:37:44 +0300962 p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300963
Sudarsana Reddy Kalluru0bc5fe82018-05-05 18:42:59 -0700964 if (test_bit(QED_MF_LL2_NON_UNICAST, &p_hwfn->cdev->mf_bits) &&
965 p_ramrod->main_func_queue && conn_type != QED_LL2_TYPE_ROCE &&
966 conn_type != QED_LL2_TYPE_IWARP) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300967 p_ramrod->mf_si_bcast_accept_all = 1;
968 p_ramrod->mf_si_mcast_accept_all = 1;
969 } else {
970 p_ramrod->mf_si_bcast_accept_all = 0;
971 p_ramrod->mf_si_mcast_accept_all = 0;
972 }
973
974 p_ramrod->action_on_error.error_type = action_on_error;
Mintz, Yuval13c54772017-06-09 17:13:20 +0300975 p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300976 return qed_spq_post(p_hwfn, p_ent, NULL);
977}
978
979static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
980 struct qed_ll2_info *p_ll2_conn)
981{
Mintz, Yuval13c54772017-06-09 17:13:20 +0300982 enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300983 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
984 struct core_tx_start_ramrod_data *p_ramrod = NULL;
985 struct qed_spq_entry *p_ent = NULL;
986 struct qed_sp_init_data init_data;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300987 u16 pq_id = 0, pbl_size;
988 int rc = -EINVAL;
989
990 if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
991 return 0;
992
Kalderon, Michal526d1d02017-07-02 10:29:23 +0300993 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800994 p_ll2_conn->tx_stats_en = 0;
995 else
996 p_ll2_conn->tx_stats_en = 1;
997
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300998 /* Get SPQ entry */
999 memset(&init_data, 0, sizeof(init_data));
1000 init_data.cid = p_ll2_conn->cid;
1001 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1002 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1003
1004 rc = qed_sp_init_request(p_hwfn, &p_ent,
1005 CORE_RAMROD_TX_QUEUE_START,
1006 PROTOCOLID_CORE, &init_data);
1007 if (rc)
1008 return rc;
1009
1010 p_ramrod = &p_ent->ramrod.core_tx_queue_start;
1011
1012 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
1013 p_ramrod->sb_index = p_tx->tx_sb_index;
Mintz, Yuval13c54772017-06-09 17:13:20 +03001014 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001015 p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
1016 p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
1017
1018 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
1019 qed_chain_get_pbl_phys(&p_tx->txq_chain));
1020 pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
1021 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
1022
Mintz, Yuval13c54772017-06-09 17:13:20 +03001023 switch (p_ll2_conn->input.tx_tc) {
Kalderon, Michal526d1d02017-07-02 10:29:23 +03001024 case PURE_LB_TC:
Ariel Eliorb5a9ee72017-04-03 12:21:09 +03001025 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
1026 break;
Kalderon, Michal526d1d02017-07-02 10:29:23 +03001027 case PKT_LB_TC:
Ariel Eliorb5a9ee72017-04-03 12:21:09 +03001028 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO);
Colin Ian King827d2402017-04-05 13:35:44 +01001029 break;
Ariel Eliorb5a9ee72017-04-03 12:21:09 +03001030 default:
1031 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
1032 break;
1033 }
1034
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001035 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
1036
1037 switch (conn_type) {
Arun Easi1e128c82017-02-15 06:28:22 -08001038 case QED_LL2_TYPE_FCOE:
1039 p_ramrod->conn_type = PROTOCOLID_FCOE;
1040 break;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001041 case QED_LL2_TYPE_ISCSI:
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001042 p_ramrod->conn_type = PROTOCOLID_ISCSI;
1043 break;
1044 case QED_LL2_TYPE_ROCE:
1045 p_ramrod->conn_type = PROTOCOLID_ROCE;
1046 break;
Kalderon, Michalcc4ad322017-07-02 10:29:24 +03001047 case QED_LL2_TYPE_IWARP:
1048 p_ramrod->conn_type = PROTOCOLID_IWARP;
1049 break;
1050 case QED_LL2_TYPE_OOO:
1051 if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
1052 p_ramrod->conn_type = PROTOCOLID_ISCSI;
1053 else
1054 p_ramrod->conn_type = PROTOCOLID_IWARP;
1055 break;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001056 default:
1057 p_ramrod->conn_type = PROTOCOLID_ETH;
1058 DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
1059 }
1060
Mintz, Yuval13c54772017-06-09 17:13:20 +03001061 p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
1062
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001063 return qed_spq_post(p_hwfn, p_ent, NULL);
1064}
1065
1066static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
1067 struct qed_ll2_info *p_ll2_conn)
1068{
1069 struct core_rx_stop_ramrod_data *p_ramrod = NULL;
1070 struct qed_spq_entry *p_ent = NULL;
1071 struct qed_sp_init_data init_data;
1072 int rc = -EINVAL;
1073
1074 /* Get SPQ entry */
1075 memset(&init_data, 0, sizeof(init_data));
1076 init_data.cid = p_ll2_conn->cid;
1077 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1078 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1079
1080 rc = qed_sp_init_request(p_hwfn, &p_ent,
1081 CORE_RAMROD_RX_QUEUE_STOP,
1082 PROTOCOLID_CORE, &init_data);
1083 if (rc)
1084 return rc;
1085
1086 p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
1087
1088 p_ramrod->complete_event_flg = 1;
1089 p_ramrod->queue_id = p_ll2_conn->queue_id;
1090
1091 return qed_spq_post(p_hwfn, p_ent, NULL);
1092}
1093
1094static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
1095 struct qed_ll2_info *p_ll2_conn)
1096{
1097 struct qed_spq_entry *p_ent = NULL;
1098 struct qed_sp_init_data init_data;
1099 int rc = -EINVAL;
1100
1101 /* Get SPQ entry */
1102 memset(&init_data, 0, sizeof(init_data));
1103 init_data.cid = p_ll2_conn->cid;
1104 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1105 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1106
1107 rc = qed_sp_init_request(p_hwfn, &p_ent,
1108 CORE_RAMROD_TX_QUEUE_STOP,
1109 PROTOCOLID_CORE, &init_data);
1110 if (rc)
1111 return rc;
1112
1113 return qed_spq_post(p_hwfn, p_ent, NULL);
1114}
1115
1116static int
1117qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001118 struct qed_ll2_info *p_ll2_info)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001119{
1120 struct qed_ll2_rx_packet *p_descq;
1121 u32 capacity;
1122 int rc = 0;
1123
Mintz, Yuval13c54772017-06-09 17:13:20 +03001124 if (!p_ll2_info->input.rx_num_desc)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001125 goto out;
1126
1127 rc = qed_chain_alloc(p_hwfn->cdev,
1128 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1129 QED_CHAIN_MODE_NEXT_PTR,
1130 QED_CHAIN_CNT_TYPE_U16,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001131 p_ll2_info->input.rx_num_desc,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001132 sizeof(struct core_rx_bd),
Mintz, Yuval1a4a6972017-06-20 16:00:00 +03001133 &p_ll2_info->rx_queue.rxq_chain, NULL);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001134 if (rc) {
1135 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
1136 goto out;
1137 }
1138
1139 capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
1140 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
1141 GFP_KERNEL);
1142 if (!p_descq) {
1143 rc = -ENOMEM;
1144 DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
1145 goto out;
1146 }
1147 p_ll2_info->rx_queue.descq_array = p_descq;
1148
1149 rc = qed_chain_alloc(p_hwfn->cdev,
1150 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1151 QED_CHAIN_MODE_PBL,
1152 QED_CHAIN_CNT_TYPE_U16,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001153 p_ll2_info->input.rx_num_desc,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001154 sizeof(struct core_rx_fast_path_cqe),
Mintz, Yuval1a4a6972017-06-20 16:00:00 +03001155 &p_ll2_info->rx_queue.rcq_chain, NULL);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001156 if (rc) {
1157 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
1158 goto out;
1159 }
1160
1161 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1162 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
Mintz, Yuval13c54772017-06-09 17:13:20 +03001163 p_ll2_info->input.conn_type, p_ll2_info->input.rx_num_desc);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001164
1165out:
1166 return rc;
1167}
1168
1169static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001170 struct qed_ll2_info *p_ll2_info)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001171{
1172 struct qed_ll2_tx_packet *p_descq;
Michal Kalderonf5823fe2017-10-09 12:37:43 +03001173 u32 desc_size;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001174 u32 capacity;
1175 int rc = 0;
1176
Mintz, Yuval13c54772017-06-09 17:13:20 +03001177 if (!p_ll2_info->input.tx_num_desc)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001178 goto out;
1179
1180 rc = qed_chain_alloc(p_hwfn->cdev,
1181 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1182 QED_CHAIN_MODE_PBL,
1183 QED_CHAIN_CNT_TYPE_U16,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001184 p_ll2_info->input.tx_num_desc,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001185 sizeof(struct core_tx_bd),
Mintz, Yuval1a4a6972017-06-20 16:00:00 +03001186 &p_ll2_info->tx_queue.txq_chain, NULL);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001187 if (rc)
1188 goto out;
1189
1190 capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
Michal Kalderonf5823fe2017-10-09 12:37:43 +03001191 /* First element is part of the packet, rest are flexibly added */
1192 desc_size = (sizeof(*p_descq) +
1193 (p_ll2_info->input.tx_max_bds_per_packet - 1) *
1194 sizeof(p_descq->bds_set));
1195
1196 p_descq = kcalloc(capacity, desc_size, GFP_KERNEL);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001197 if (!p_descq) {
1198 rc = -ENOMEM;
1199 goto out;
1200 }
Michal Kalderonf5823fe2017-10-09 12:37:43 +03001201 p_ll2_info->tx_queue.descq_mem = p_descq;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001202
1203 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1204 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
Mintz, Yuval13c54772017-06-09 17:13:20 +03001205 p_ll2_info->input.conn_type, p_ll2_info->input.tx_num_desc);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001206
1207out:
1208 if (rc)
1209 DP_NOTICE(p_hwfn,
1210 "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
Mintz, Yuval13c54772017-06-09 17:13:20 +03001211 p_ll2_info->input.tx_num_desc);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001212 return rc;
1213}
1214
Mintz, Yuval13c54772017-06-09 17:13:20 +03001215static int
1216qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
1217 struct qed_ll2_info *p_ll2_info, u16 mtu)
1218{
1219 struct qed_ooo_buffer *p_buf = NULL;
1220 void *p_virt;
1221 u16 buf_idx;
1222 int rc = 0;
1223
Kalderon, Michal526d1d02017-07-02 10:29:23 +03001224 if (p_ll2_info->input.conn_type != QED_LL2_TYPE_OOO)
Mintz, Yuval13c54772017-06-09 17:13:20 +03001225 return rc;
1226
1227 /* Correct number of requested OOO buffers if needed */
1228 if (!p_ll2_info->input.rx_num_ooo_buffers) {
1229 u16 num_desc = p_ll2_info->input.rx_num_desc;
1230
1231 if (!num_desc)
1232 return -EINVAL;
1233 p_ll2_info->input.rx_num_ooo_buffers = num_desc * 2;
1234 }
1235
1236 for (buf_idx = 0; buf_idx < p_ll2_info->input.rx_num_ooo_buffers;
1237 buf_idx++) {
1238 p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
1239 if (!p_buf) {
1240 rc = -ENOMEM;
1241 goto out;
1242 }
1243
1244 p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
1245 p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
1246 ETH_CACHE_LINE_SIZE - 1) &
1247 ~(ETH_CACHE_LINE_SIZE - 1);
1248 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1249 p_buf->rx_buffer_size,
1250 &p_buf->rx_buffer_phys_addr,
1251 GFP_KERNEL);
1252 if (!p_virt) {
1253 kfree(p_buf);
1254 rc = -ENOMEM;
1255 goto out;
1256 }
1257
1258 p_buf->rx_buffer_virt_addr = p_virt;
1259 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
1260 }
1261
1262 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1263 "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
1264 p_ll2_info->input.rx_num_ooo_buffers, p_buf->rx_buffer_size);
1265
1266out:
1267 return rc;
1268}
1269
Michal Kalderon0518c122017-06-09 17:13:22 +03001270static int
1271qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs)
1272{
1273 if (!cbs || (!cbs->rx_comp_cb ||
1274 !cbs->rx_release_cb ||
1275 !cbs->tx_comp_cb || !cbs->tx_release_cb || !cbs->cookie))
1276 return -EINVAL;
1277
1278 p_ll2_info->cbs.rx_comp_cb = cbs->rx_comp_cb;
1279 p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb;
1280 p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb;
1281 p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb;
Michal Kalderon6f34a282017-10-09 12:37:48 +03001282 p_ll2_info->cbs.slowpath_cb = cbs->slowpath_cb;
Michal Kalderon0518c122017-06-09 17:13:22 +03001283 p_ll2_info->cbs.cookie = cbs->cookie;
1284
1285 return 0;
1286}
1287
Mintz, Yuval13c54772017-06-09 17:13:20 +03001288static enum core_error_handle
1289qed_ll2_get_error_choice(enum qed_ll2_error_handle err)
1290{
1291 switch (err) {
1292 case QED_LL2_DROP_PACKET:
1293 return LL2_DROP_PACKET;
1294 case QED_LL2_DO_NOTHING:
1295 return LL2_DO_NOTHING;
1296 case QED_LL2_ASSERT:
1297 return LL2_ASSERT;
1298 default:
1299 return LL2_DO_NOTHING;
1300 }
1301}
1302
Michal Kalderon0518c122017-06-09 17:13:22 +03001303int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001304{
Michal Kalderon0518c122017-06-09 17:13:22 +03001305 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001306 qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
1307 struct qed_ll2_info *p_ll2_info = NULL;
Mintz, Yuval13c54772017-06-09 17:13:20 +03001308 u8 i, *p_tx_max;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001309 int rc;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001310
Mintz, Yuval13c54772017-06-09 17:13:20 +03001311 if (!data->p_connection_handle || !p_hwfn->p_ll2_info)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001312 return -EINVAL;
1313
1314 /* Find a free connection to be used */
1315 for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
1316 mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
1317 if (p_hwfn->p_ll2_info[i].b_active) {
1318 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1319 continue;
1320 }
1321
1322 p_hwfn->p_ll2_info[i].b_active = true;
1323 p_ll2_info = &p_hwfn->p_ll2_info[i];
1324 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1325 break;
1326 }
1327 if (!p_ll2_info)
1328 return -EBUSY;
1329
Mintz, Yuval13c54772017-06-09 17:13:20 +03001330 memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001331
Tomer Tayarda090912017-12-27 19:30:07 +02001332 switch (data->input.tx_dest) {
1333 case QED_LL2_TX_DEST_NW:
1334 p_ll2_info->tx_dest = CORE_TX_DEST_NW;
1335 break;
1336 case QED_LL2_TX_DEST_LB:
1337 p_ll2_info->tx_dest = CORE_TX_DEST_LB;
1338 break;
1339 case QED_LL2_TX_DEST_DROP:
1340 p_ll2_info->tx_dest = CORE_TX_DEST_DROP;
1341 break;
1342 default:
1343 return -EINVAL;
1344 }
1345
Michal Kalderoned468eb2017-10-09 12:37:44 +03001346 if (data->input.conn_type == QED_LL2_TYPE_OOO ||
1347 data->input.secondary_queue)
1348 p_ll2_info->main_func_queue = false;
1349 else
1350 p_ll2_info->main_func_queue = true;
Mintz, Yuval13c54772017-06-09 17:13:20 +03001351
1352 /* Correct maximum number of Tx BDs */
1353 p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet;
1354 if (*p_tx_max == 0)
1355 *p_tx_max = CORE_LL2_TX_MAX_BDS_PER_PACKET;
1356 else
1357 *p_tx_max = min_t(u8, *p_tx_max,
1358 CORE_LL2_TX_MAX_BDS_PER_PACKET);
Michal Kalderon0518c122017-06-09 17:13:22 +03001359
1360 rc = qed_ll2_set_cbs(p_ll2_info, data->cbs);
1361 if (rc) {
1362 DP_NOTICE(p_hwfn, "Invalid callback functions\n");
1363 goto q_allocate_fail;
1364 }
1365
Mintz, Yuval13c54772017-06-09 17:13:20 +03001366 rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001367 if (rc)
1368 goto q_allocate_fail;
1369
Mintz, Yuval13c54772017-06-09 17:13:20 +03001370 rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001371 if (rc)
1372 goto q_allocate_fail;
1373
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001374 rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001375 data->input.mtu);
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001376 if (rc)
1377 goto q_allocate_fail;
1378
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001379 /* Register callbacks for the Rx/Tx queues */
Kalderon, Michal526d1d02017-07-02 10:29:23 +03001380 if (data->input.conn_type == QED_LL2_TYPE_OOO) {
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001381 comp_rx_cb = qed_ll2_lb_rxq_completion;
1382 comp_tx_cb = qed_ll2_lb_txq_completion;
1383 } else {
1384 comp_rx_cb = qed_ll2_rxq_completion;
1385 comp_tx_cb = qed_ll2_txq_completion;
1386 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001387
Mintz, Yuval13c54772017-06-09 17:13:20 +03001388 if (data->input.rx_num_desc) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001389 qed_int_register_cb(p_hwfn, comp_rx_cb,
1390 &p_hwfn->p_ll2_info[i],
1391 &p_ll2_info->rx_queue.rx_sb_index,
1392 &p_ll2_info->rx_queue.p_fw_cons);
1393 p_ll2_info->rx_queue.b_cb_registred = true;
1394 }
1395
Mintz, Yuval13c54772017-06-09 17:13:20 +03001396 if (data->input.tx_num_desc) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001397 qed_int_register_cb(p_hwfn,
1398 comp_tx_cb,
1399 &p_hwfn->p_ll2_info[i],
1400 &p_ll2_info->tx_queue.tx_sb_index,
1401 &p_ll2_info->tx_queue.p_fw_cons);
1402 p_ll2_info->tx_queue.b_cb_registred = true;
1403 }
1404
Mintz, Yuval13c54772017-06-09 17:13:20 +03001405 *data->p_connection_handle = i;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001406 return rc;
1407
1408q_allocate_fail:
1409 qed_ll2_release_connection(p_hwfn, i);
1410 return -ENOMEM;
1411}
1412
1413static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
1414 struct qed_ll2_info *p_ll2_conn)
1415{
Mintz, Yuval13c54772017-06-09 17:13:20 +03001416 enum qed_ll2_error_handle error_input;
1417 enum core_error_handle error_mode;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001418 u8 action_on_error = 0;
1419
1420 if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
1421 return 0;
1422
1423 DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
Mintz, Yuval13c54772017-06-09 17:13:20 +03001424 error_input = p_ll2_conn->input.ai_err_packet_too_big;
1425 error_mode = qed_ll2_get_error_choice(error_input);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001426 SET_FIELD(action_on_error,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001427 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, error_mode);
1428 error_input = p_ll2_conn->input.ai_err_no_buf;
1429 error_mode = qed_ll2_get_error_choice(error_input);
1430 SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001431
1432 return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
1433}
1434
Mintz, Yuval58de2892017-06-09 17:13:21 +03001435static void
1436qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
1437 struct qed_ll2_info *p_ll2_conn)
1438{
Kalderon, Michal526d1d02017-07-02 10:29:23 +03001439 if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
Mintz, Yuval58de2892017-06-09 17:13:21 +03001440 return;
1441
1442 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1443 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
1444}
Michal Kalderon0518c122017-06-09 17:13:22 +03001445
1446int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001447{
Michal Kalderon0518c122017-06-09 17:13:22 +03001448 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001449 struct qed_ll2_info *p_ll2_conn;
Michal Kalderonf5823fe2017-10-09 12:37:43 +03001450 struct qed_ll2_tx_packet *p_pkt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001451 struct qed_ll2_rx_queue *p_rx;
1452 struct qed_ll2_tx_queue *p_tx;
Rahul Verma15582962017-04-06 15:58:29 +03001453 struct qed_ptt *p_ptt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001454 int rc = -EINVAL;
1455 u32 i, capacity;
Michal Kalderonf5823fe2017-10-09 12:37:43 +03001456 u32 desc_size;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001457 u8 qid;
1458
Rahul Verma15582962017-04-06 15:58:29 +03001459 p_ptt = qed_ptt_acquire(p_hwfn);
1460 if (!p_ptt)
1461 return -EAGAIN;
1462
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001463 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
Rahul Verma15582962017-04-06 15:58:29 +03001464 if (!p_ll2_conn) {
1465 rc = -EINVAL;
1466 goto out;
1467 }
1468
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001469 p_rx = &p_ll2_conn->rx_queue;
1470 p_tx = &p_ll2_conn->tx_queue;
1471
1472 qed_chain_reset(&p_rx->rxq_chain);
1473 qed_chain_reset(&p_rx->rcq_chain);
1474 INIT_LIST_HEAD(&p_rx->active_descq);
1475 INIT_LIST_HEAD(&p_rx->free_descq);
1476 INIT_LIST_HEAD(&p_rx->posting_descq);
1477 spin_lock_init(&p_rx->lock);
1478 capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
1479 for (i = 0; i < capacity; i++)
1480 list_add_tail(&p_rx->descq_array[i].list_entry,
1481 &p_rx->free_descq);
1482 *p_rx->p_fw_cons = 0;
1483
1484 qed_chain_reset(&p_tx->txq_chain);
1485 INIT_LIST_HEAD(&p_tx->active_descq);
1486 INIT_LIST_HEAD(&p_tx->free_descq);
1487 INIT_LIST_HEAD(&p_tx->sending_descq);
1488 spin_lock_init(&p_tx->lock);
1489 capacity = qed_chain_get_capacity(&p_tx->txq_chain);
Michal Kalderonf5823fe2017-10-09 12:37:43 +03001490 /* First element is part of the packet, rest are flexibly added */
1491 desc_size = (sizeof(*p_pkt) +
1492 (p_ll2_conn->input.tx_max_bds_per_packet - 1) *
1493 sizeof(p_pkt->bds_set));
1494
1495 for (i = 0; i < capacity; i++) {
1496 p_pkt = p_tx->descq_mem + desc_size * i;
1497 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
1498 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001499 p_tx->cur_completing_bd_idx = 0;
1500 p_tx->bds_idx = 0;
1501 p_tx->b_completing_packet = false;
1502 p_tx->cur_send_packet = NULL;
1503 p_tx->cur_send_frag_num = 0;
1504 p_tx->cur_completing_frag_num = 0;
1505 *p_tx->p_fw_cons = 0;
1506
Rahul Verma15582962017-04-06 15:58:29 +03001507 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
1508 if (rc)
1509 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001510
1511 qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
1512 p_ll2_conn->queue_id = qid;
1513 p_ll2_conn->tx_stats_id = qid;
1514 p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
1515 GTT_BAR0_MAP_REG_TSDM_RAM +
1516 TSTORM_LL2_RX_PRODS_OFFSET(qid);
1517 p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
1518 qed_db_addr(p_ll2_conn->cid,
1519 DQ_DEMS_LEGACY);
1520
1521 rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
1522 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001523 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001524
1525 rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
1526 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001527 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001528
Kalderon, Michalc851a9d2017-07-02 10:29:21 +03001529 if (!QED_IS_RDMA_PERSONALITY(p_hwfn))
Rahul Verma15582962017-04-06 15:58:29 +03001530 qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001531
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001532 qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
1533
Mintz, Yuval13c54772017-06-09 17:13:20 +03001534 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
Sudarsana Reddy Kallurucac6f692018-05-05 18:43:02 -07001535 if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
1536 qed_llh_add_protocol_filter(p_hwfn, p_ptt,
1537 ETH_P_FCOE, 0,
1538 QED_LLH_FILTER_ETHERTYPE);
Rahul Verma15582962017-04-06 15:58:29 +03001539 qed_llh_add_protocol_filter(p_hwfn, p_ptt,
Sudarsana Reddy Kallurucac6f692018-05-05 18:43:02 -07001540 ETH_P_FIP, 0,
Arun Easi1e128c82017-02-15 06:28:22 -08001541 QED_LLH_FILTER_ETHERTYPE);
1542 }
1543
Rahul Verma15582962017-04-06 15:58:29 +03001544out:
1545 qed_ptt_release(p_hwfn, p_ptt);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001546 return rc;
1547}
1548
1549static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
1550 struct qed_ll2_rx_queue *p_rx,
1551 struct qed_ll2_rx_packet *p_curp)
1552{
1553 struct qed_ll2_rx_packet *p_posting_packet = NULL;
1554 struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
1555 bool b_notify_fw = false;
1556 u16 bd_prod, cq_prod;
1557
1558 /* This handles the flushing of already posted buffers */
1559 while (!list_empty(&p_rx->posting_descq)) {
1560 p_posting_packet = list_first_entry(&p_rx->posting_descq,
1561 struct qed_ll2_rx_packet,
1562 list_entry);
Wei Yongjunb4f0fd42016-10-17 15:17:51 +00001563 list_move_tail(&p_posting_packet->list_entry,
1564 &p_rx->active_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001565 b_notify_fw = true;
1566 }
1567
1568 /* This handles the supplied packet [if there is one] */
1569 if (p_curp) {
1570 list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
1571 b_notify_fw = true;
1572 }
1573
1574 if (!b_notify_fw)
1575 return;
1576
1577 bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
1578 cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1579 rx_prod.bd_prod = cpu_to_le16(bd_prod);
1580 rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1581 DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1582}
1583
Michal Kalderon0518c122017-06-09 17:13:22 +03001584int qed_ll2_post_rx_buffer(void *cxt,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001585 u8 connection_handle,
1586 dma_addr_t addr,
1587 u16 buf_len, void *cookie, u8 notify_fw)
1588{
Michal Kalderon0518c122017-06-09 17:13:22 +03001589 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001590 struct core_rx_bd_with_buff_len *p_curb = NULL;
1591 struct qed_ll2_rx_packet *p_curp = NULL;
1592 struct qed_ll2_info *p_ll2_conn;
1593 struct qed_ll2_rx_queue *p_rx;
1594 unsigned long flags;
1595 void *p_data;
1596 int rc = 0;
1597
1598 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1599 if (!p_ll2_conn)
1600 return -EINVAL;
1601 p_rx = &p_ll2_conn->rx_queue;
1602
1603 spin_lock_irqsave(&p_rx->lock, flags);
1604 if (!list_empty(&p_rx->free_descq))
1605 p_curp = list_first_entry(&p_rx->free_descq,
1606 struct qed_ll2_rx_packet, list_entry);
1607 if (p_curp) {
1608 if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
1609 qed_chain_get_elem_left(&p_rx->rcq_chain)) {
1610 p_data = qed_chain_produce(&p_rx->rxq_chain);
1611 p_curb = (struct core_rx_bd_with_buff_len *)p_data;
1612 qed_chain_produce(&p_rx->rcq_chain);
1613 }
1614 }
1615
1616 /* If we're lacking entires, let's try to flush buffers to FW */
1617 if (!p_curp || !p_curb) {
1618 rc = -EBUSY;
1619 p_curp = NULL;
1620 goto out_notify;
1621 }
1622
1623 /* We have an Rx packet we can fill */
1624 DMA_REGPAIR_LE(p_curb->addr, addr);
1625 p_curb->buff_length = cpu_to_le16(buf_len);
1626 p_curp->rx_buf_addr = addr;
1627 p_curp->cookie = cookie;
1628 p_curp->rxq_bd = p_curb;
1629 p_curp->buf_length = buf_len;
1630 list_del(&p_curp->list_entry);
1631
1632 /* Check if we only want to enqueue this packet without informing FW */
1633 if (!notify_fw) {
1634 list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
1635 goto out;
1636 }
1637
1638out_notify:
1639 qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
1640out:
1641 spin_unlock_irqrestore(&p_rx->lock, flags);
1642 return rc;
1643}
1644
1645static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
1646 struct qed_ll2_tx_queue *p_tx,
1647 struct qed_ll2_tx_packet *p_curp,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001648 struct qed_ll2_tx_pkt_info *pkt,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001649 u8 notify_fw)
1650{
1651 list_del(&p_curp->list_entry);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001652 p_curp->cookie = pkt->cookie;
1653 p_curp->bd_used = pkt->num_of_bds;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001654 p_curp->notify_fw = notify_fw;
1655 p_tx->cur_send_packet = p_curp;
1656 p_tx->cur_send_frag_num = 0;
1657
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001658 p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = pkt->first_frag;
1659 p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = pkt->first_frag_len;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001660 p_tx->cur_send_frag_num++;
1661}
1662
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001663static void
1664qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1665 struct qed_ll2_info *p_ll2,
1666 struct qed_ll2_tx_packet *p_curp,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001667 struct qed_ll2_tx_pkt_info *pkt)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001668{
1669 struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
1670 u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
1671 struct core_tx_bd *start_bd = NULL;
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001672 enum core_roce_flavor_type roce_flavor;
1673 enum core_tx_dest tx_dest;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001674 u16 bd_data = 0, frag_idx;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001675
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001676 roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE
1677 : CORE_RROCE;
1678
Michal Kalderon77caa792017-10-09 12:37:45 +03001679 switch (pkt->tx_dest) {
1680 case QED_LL2_TX_DEST_NW:
1681 tx_dest = CORE_TX_DEST_NW;
1682 break;
1683 case QED_LL2_TX_DEST_LB:
1684 tx_dest = CORE_TX_DEST_LB;
1685 break;
1686 case QED_LL2_TX_DEST_DROP:
1687 tx_dest = CORE_TX_DEST_DROP;
1688 break;
1689 default:
1690 tx_dest = CORE_TX_DEST_LB;
1691 break;
1692 }
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001693
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001694 start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
Michal Kalderon89d65112017-10-09 12:37:47 +03001695 if (QED_IS_IWARP_PERSONALITY(p_hwfn) &&
Sudarsana Reddy Kallurucac6f692018-05-05 18:43:02 -07001696 p_ll2->input.conn_type == QED_LL2_TYPE_OOO) {
Michal Kalderon89d65112017-10-09 12:37:47 +03001697 start_bd->nw_vlan_or_lb_echo =
1698 cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE);
Sudarsana Reddy Kallurucac6f692018-05-05 18:43:02 -07001699 } else {
Michal Kalderon89d65112017-10-09 12:37:47 +03001700 start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
Sudarsana Reddy Kallurucac6f692018-05-05 18:43:02 -07001701 if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
1702 p_ll2->input.conn_type == QED_LL2_TYPE_FCOE)
1703 pkt->remove_stag = true;
1704 }
1705
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001706 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001707 cpu_to_le16(pkt->l4_hdr_offset_w));
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001708 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001709 bd_data |= pkt->bd_flags;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001710 SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001711 SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001712 SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
Michal Kalderon6df60fe2017-10-09 12:37:46 +03001713 SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_CSUM, !!(pkt->enable_ip_cksum));
1714 SET_FIELD(bd_data, CORE_TX_BD_DATA_L4_CSUM, !!(pkt->enable_l4_cksum));
1715 SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_LEN, !!(pkt->calc_ip_len));
Sudarsana Reddy Kallurucac6f692018-05-05 18:43:02 -07001716 SET_FIELD(bd_data, CORE_TX_BD_DATA_DISABLE_STAG_INSERTION,
1717 !!(pkt->remove_stag));
1718
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001719 start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001720 DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag);
1721 start_bd->nbytes = cpu_to_le16(pkt->first_frag_len);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001722
1723 DP_VERBOSE(p_hwfn,
1724 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1725 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1726 p_ll2->queue_id,
1727 p_ll2->cid,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001728 p_ll2->input.conn_type,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001729 prod_idx,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001730 pkt->first_frag_len,
1731 pkt->num_of_bds,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001732 le32_to_cpu(start_bd->addr.hi),
1733 le32_to_cpu(start_bd->addr.lo));
1734
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001735 if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001736 return;
1737
1738 /* Need to provide the packet with additional BDs for frags */
1739 for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001740 frag_idx < pkt->num_of_bds; frag_idx++) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001741 struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
1742
1743 *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001744 (*p_bd)->bd_data.as_bitfield = 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001745 (*p_bd)->bitfield1 = 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001746 p_curp->bds_set[frag_idx].tx_frag = 0;
1747 p_curp->bds_set[frag_idx].frag_len = 0;
1748 }
1749}
1750
1751/* This should be called while the Txq spinlock is being held */
1752static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
1753 struct qed_ll2_info *p_ll2_conn)
1754{
1755 bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
1756 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1757 struct qed_ll2_tx_packet *p_pkt = NULL;
1758 struct core_db_data db_msg = { 0, 0, 0 };
1759 u16 bd_prod;
1760
1761 /* If there are missing BDs, don't do anything now */
1762 if (p_ll2_conn->tx_queue.cur_send_frag_num !=
1763 p_ll2_conn->tx_queue.cur_send_packet->bd_used)
1764 return;
1765
1766 /* Push the current packet to the list and clean after it */
1767 list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
1768 &p_ll2_conn->tx_queue.sending_descq);
1769 p_ll2_conn->tx_queue.cur_send_packet = NULL;
1770 p_ll2_conn->tx_queue.cur_send_frag_num = 0;
1771
1772 /* Notify FW of packet only if requested to */
1773 if (!b_notify)
1774 return;
1775
1776 bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
1777
1778 while (!list_empty(&p_tx->sending_descq)) {
1779 p_pkt = list_first_entry(&p_tx->sending_descq,
1780 struct qed_ll2_tx_packet, list_entry);
1781 if (!p_pkt)
1782 break;
1783
Wei Yongjunb4f0fd42016-10-17 15:17:51 +00001784 list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001785 }
1786
1787 SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
1788 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1789 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
1790 DQ_XCM_CORE_TX_BD_PROD_CMD);
1791 db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
1792 db_msg.spq_prod = cpu_to_le16(bd_prod);
1793
1794 /* Make sure the BDs data is updated before ringing the doorbell */
1795 wmb();
1796
1797 DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
1798
1799 DP_VERBOSE(p_hwfn,
1800 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1801 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1802 p_ll2_conn->queue_id,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001803 p_ll2_conn->cid,
1804 p_ll2_conn->input.conn_type, db_msg.spq_prod);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001805}
1806
Michal Kalderon0518c122017-06-09 17:13:22 +03001807int qed_ll2_prepare_tx_packet(void *cxt,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001808 u8 connection_handle,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001809 struct qed_ll2_tx_pkt_info *pkt,
1810 bool notify_fw)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001811{
Michal Kalderon0518c122017-06-09 17:13:22 +03001812 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001813 struct qed_ll2_tx_packet *p_curp = NULL;
1814 struct qed_ll2_info *p_ll2_conn = NULL;
1815 struct qed_ll2_tx_queue *p_tx;
1816 struct qed_chain *p_tx_chain;
1817 unsigned long flags;
1818 int rc = 0;
1819
1820 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1821 if (!p_ll2_conn)
1822 return -EINVAL;
1823 p_tx = &p_ll2_conn->tx_queue;
1824 p_tx_chain = &p_tx->txq_chain;
1825
Michal Kalderonf5823fe2017-10-09 12:37:43 +03001826 if (pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001827 return -EIO;
1828
1829 spin_lock_irqsave(&p_tx->lock, flags);
1830 if (p_tx->cur_send_packet) {
1831 rc = -EEXIST;
1832 goto out;
1833 }
1834
1835 /* Get entry, but only if we have tx elements for it */
1836 if (!list_empty(&p_tx->free_descq))
1837 p_curp = list_first_entry(&p_tx->free_descq,
1838 struct qed_ll2_tx_packet, list_entry);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001839 if (p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001840 p_curp = NULL;
1841
1842 if (!p_curp) {
1843 rc = -EBUSY;
1844 goto out;
1845 }
1846
1847 /* Prepare packet and BD, and perhaps send a doorbell to FW */
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001848 qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, pkt, notify_fw);
1849
1850 qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, pkt);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001851
1852 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1853
1854out:
1855 spin_unlock_irqrestore(&p_tx->lock, flags);
1856 return rc;
1857}
1858
Michal Kalderon0518c122017-06-09 17:13:22 +03001859int qed_ll2_set_fragment_of_tx_packet(void *cxt,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001860 u8 connection_handle,
1861 dma_addr_t addr, u16 nbytes)
1862{
1863 struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
Michal Kalderon0518c122017-06-09 17:13:22 +03001864 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001865 struct qed_ll2_info *p_ll2_conn = NULL;
1866 u16 cur_send_frag_num = 0;
1867 struct core_tx_bd *p_bd;
1868 unsigned long flags;
1869
1870 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1871 if (!p_ll2_conn)
1872 return -EINVAL;
1873
1874 if (!p_ll2_conn->tx_queue.cur_send_packet)
1875 return -EINVAL;
1876
1877 p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
1878 cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
1879
1880 if (cur_send_frag_num >= p_cur_send_packet->bd_used)
1881 return -EINVAL;
1882
1883 /* Fill the BD information, and possibly notify FW */
1884 p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
1885 DMA_REGPAIR_LE(p_bd->addr, addr);
1886 p_bd->nbytes = cpu_to_le16(nbytes);
1887 p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
1888 p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
1889
1890 p_ll2_conn->tx_queue.cur_send_frag_num++;
1891
1892 spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
1893 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1894 spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
1895
1896 return 0;
1897}
1898
Michal Kalderon0518c122017-06-09 17:13:22 +03001899int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001900{
Michal Kalderon0518c122017-06-09 17:13:22 +03001901 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001902 struct qed_ll2_info *p_ll2_conn = NULL;
1903 int rc = -EINVAL;
Rahul Verma15582962017-04-06 15:58:29 +03001904 struct qed_ptt *p_ptt;
1905
1906 p_ptt = qed_ptt_acquire(p_hwfn);
1907 if (!p_ptt)
1908 return -EAGAIN;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001909
1910 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
Rahul Verma15582962017-04-06 15:58:29 +03001911 if (!p_ll2_conn) {
1912 rc = -EINVAL;
1913 goto out;
1914 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001915
1916 /* Stop Tx & Rx of connection, if needed */
1917 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1918 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
1919 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001920 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001921 qed_ll2_txq_flush(p_hwfn, connection_handle);
1922 }
1923
1924 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1925 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
1926 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001927 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001928 qed_ll2_rxq_flush(p_hwfn, connection_handle);
1929 }
1930
Kalderon, Michal526d1d02017-07-02 10:29:23 +03001931 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001932 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1933
Mintz, Yuval13c54772017-06-09 17:13:20 +03001934 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
Sudarsana Reddy Kallurucac6f692018-05-05 18:43:02 -07001935 if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
1936 qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
1937 ETH_P_FCOE, 0,
1938 QED_LLH_FILTER_ETHERTYPE);
Rahul Verma15582962017-04-06 15:58:29 +03001939 qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
Sudarsana Reddy Kallurucac6f692018-05-05 18:43:02 -07001940 ETH_P_FIP, 0,
Arun Easi1e128c82017-02-15 06:28:22 -08001941 QED_LLH_FILTER_ETHERTYPE);
1942 }
1943
Rahul Verma15582962017-04-06 15:58:29 +03001944out:
1945 qed_ptt_release(p_hwfn, p_ptt);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001946 return rc;
1947}
1948
Mintz, Yuval58de2892017-06-09 17:13:21 +03001949static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
1950 struct qed_ll2_info *p_ll2_conn)
1951{
1952 struct qed_ooo_buffer *p_buffer;
1953
Kalderon, Michal526d1d02017-07-02 10:29:23 +03001954 if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
Mintz, Yuval58de2892017-06-09 17:13:21 +03001955 return;
1956
1957 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1958 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
1959 p_hwfn->p_ooo_info))) {
1960 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1961 p_buffer->rx_buffer_size,
1962 p_buffer->rx_buffer_virt_addr,
1963 p_buffer->rx_buffer_phys_addr);
1964 kfree(p_buffer);
1965 }
1966}
Michal Kalderon0518c122017-06-09 17:13:22 +03001967
1968void qed_ll2_release_connection(void *cxt, u8 connection_handle)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001969{
Michal Kalderon0518c122017-06-09 17:13:22 +03001970 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001971 struct qed_ll2_info *p_ll2_conn = NULL;
1972
1973 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1974 if (!p_ll2_conn)
1975 return;
1976
1977 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1978 p_ll2_conn->rx_queue.b_cb_registred = false;
1979 qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
1980 }
1981
1982 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1983 p_ll2_conn->tx_queue.b_cb_registred = false;
1984 qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
1985 }
1986
Michal Kalderonf5823fe2017-10-09 12:37:43 +03001987 kfree(p_ll2_conn->tx_queue.descq_mem);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001988 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
1989
1990 kfree(p_ll2_conn->rx_queue.descq_array);
1991 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
1992 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
1993
1994 qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
1995
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001996 qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
1997
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001998 mutex_lock(&p_ll2_conn->mutex);
1999 p_ll2_conn->b_active = false;
2000 mutex_unlock(&p_ll2_conn->mutex);
2001}
2002
Tomer Tayar3587cb82017-05-21 12:10:56 +03002003int qed_ll2_alloc(struct qed_hwfn *p_hwfn)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002004{
2005 struct qed_ll2_info *p_ll2_connections;
2006 u8 i;
2007
2008 /* Allocate LL2's set struct */
2009 p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
2010 sizeof(struct qed_ll2_info), GFP_KERNEL);
2011 if (!p_ll2_connections) {
2012 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
Tomer Tayar3587cb82017-05-21 12:10:56 +03002013 return -ENOMEM;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002014 }
2015
2016 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
2017 p_ll2_connections[i].my_id = i;
2018
Tomer Tayar3587cb82017-05-21 12:10:56 +03002019 p_hwfn->p_ll2_info = p_ll2_connections;
2020 return 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002021}
2022
Tomer Tayar3587cb82017-05-21 12:10:56 +03002023void qed_ll2_setup(struct qed_hwfn *p_hwfn)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002024{
2025 int i;
2026
2027 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
Tomer Tayar3587cb82017-05-21 12:10:56 +03002028 mutex_init(&p_hwfn->p_ll2_info[i].mutex);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002029}
2030
Tomer Tayar3587cb82017-05-21 12:10:56 +03002031void qed_ll2_free(struct qed_hwfn *p_hwfn)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002032{
Tomer Tayar3587cb82017-05-21 12:10:56 +03002033 if (!p_hwfn->p_ll2_info)
2034 return;
2035
2036 kfree(p_hwfn->p_ll2_info);
2037 p_hwfn->p_ll2_info = NULL;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002038}
2039
Mintz, Yuvalfef1c3f2017-06-09 17:13:25 +03002040static void _qed_ll2_get_port_stats(struct qed_hwfn *p_hwfn,
2041 struct qed_ptt *p_ptt,
2042 struct qed_ll2_stats *p_stats)
2043{
2044 struct core_ll2_port_stats port_stats;
2045
2046 memset(&port_stats, 0, sizeof(port_stats));
2047 qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
2048 BAR0_MAP_REG_TSDM_RAM +
2049 TSTORM_LL2_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)),
2050 sizeof(port_stats));
2051
2052 p_stats->gsi_invalid_hdr = HILO_64_REGPAIR(port_stats.gsi_invalid_hdr);
2053 p_stats->gsi_invalid_pkt_length =
2054 HILO_64_REGPAIR(port_stats.gsi_invalid_pkt_length);
2055 p_stats->gsi_unsupported_pkt_typ =
2056 HILO_64_REGPAIR(port_stats.gsi_unsupported_pkt_typ);
2057 p_stats->gsi_crcchksm_error =
2058 HILO_64_REGPAIR(port_stats.gsi_crcchksm_error);
2059}
2060
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002061static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
2062 struct qed_ptt *p_ptt,
2063 struct qed_ll2_info *p_ll2_conn,
2064 struct qed_ll2_stats *p_stats)
2065{
2066 struct core_ll2_tstorm_per_queue_stat tstats;
2067 u8 qid = p_ll2_conn->queue_id;
2068 u32 tstats_addr;
2069
2070 memset(&tstats, 0, sizeof(tstats));
2071 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
2072 CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
2073 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
2074
2075 p_stats->packet_too_big_discard =
2076 HILO_64_REGPAIR(tstats.packet_too_big_discard);
2077 p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
2078}
2079
2080static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
2081 struct qed_ptt *p_ptt,
2082 struct qed_ll2_info *p_ll2_conn,
2083 struct qed_ll2_stats *p_stats)
2084{
2085 struct core_ll2_ustorm_per_queue_stat ustats;
2086 u8 qid = p_ll2_conn->queue_id;
2087 u32 ustats_addr;
2088
2089 memset(&ustats, 0, sizeof(ustats));
2090 ustats_addr = BAR0_MAP_REG_USDM_RAM +
2091 CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
2092 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
2093
2094 p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
2095 p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
2096 p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
2097 p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
2098 p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
2099 p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
2100}
2101
2102static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
2103 struct qed_ptt *p_ptt,
2104 struct qed_ll2_info *p_ll2_conn,
2105 struct qed_ll2_stats *p_stats)
2106{
2107 struct core_ll2_pstorm_per_queue_stat pstats;
2108 u8 stats_id = p_ll2_conn->tx_stats_id;
2109 u32 pstats_addr;
2110
2111 memset(&pstats, 0, sizeof(pstats));
2112 pstats_addr = BAR0_MAP_REG_PSDM_RAM +
2113 CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
2114 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
2115
2116 p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
2117 p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
2118 p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
2119 p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
2120 p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
2121 p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
2122}
2123
Michal Kalderon0518c122017-06-09 17:13:22 +03002124int qed_ll2_get_stats(void *cxt,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002125 u8 connection_handle, struct qed_ll2_stats *p_stats)
2126{
Michal Kalderon0518c122017-06-09 17:13:22 +03002127 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002128 struct qed_ll2_info *p_ll2_conn = NULL;
2129 struct qed_ptt *p_ptt;
2130
2131 memset(p_stats, 0, sizeof(*p_stats));
2132
2133 if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
2134 !p_hwfn->p_ll2_info)
2135 return -EINVAL;
2136
2137 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
2138
2139 p_ptt = qed_ptt_acquire(p_hwfn);
2140 if (!p_ptt) {
2141 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
2142 return -EINVAL;
2143 }
2144
Mintz, Yuvalfef1c3f2017-06-09 17:13:25 +03002145 if (p_ll2_conn->input.gsi_enable)
2146 _qed_ll2_get_port_stats(p_hwfn, p_ptt, p_stats);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002147 _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2148 _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2149 if (p_ll2_conn->tx_stats_en)
2150 _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2151
2152 qed_ptt_release(p_hwfn, p_ptt);
2153 return 0;
2154}
2155
Michal Kalderon0518c122017-06-09 17:13:22 +03002156static void qed_ll2b_release_rx_packet(void *cxt,
2157 u8 connection_handle,
2158 void *cookie,
2159 dma_addr_t rx_buf_addr,
2160 bool b_last_packet)
2161{
2162 struct qed_hwfn *p_hwfn = cxt;
2163
2164 qed_ll2_dealloc_buffer(p_hwfn->cdev, cookie);
2165}
2166
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002167static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
2168 const struct qed_ll2_cb_ops *ops,
2169 void *cookie)
2170{
2171 cdev->ll2->cbs = ops;
2172 cdev->ll2->cb_cookie = cookie;
2173}
2174
Michal Kalderon0518c122017-06-09 17:13:22 +03002175struct qed_ll2_cbs ll2_cbs = {
2176 .rx_comp_cb = &qed_ll2b_complete_rx_packet,
2177 .rx_release_cb = &qed_ll2b_release_rx_packet,
2178 .tx_comp_cb = &qed_ll2b_complete_tx_packet,
2179 .tx_release_cb = &qed_ll2b_complete_tx_packet,
2180};
2181
Mintz, Yuval13c54772017-06-09 17:13:20 +03002182static void qed_ll2_set_conn_data(struct qed_dev *cdev,
2183 struct qed_ll2_acquire_data *data,
2184 struct qed_ll2_params *params,
2185 enum qed_ll2_conn_type conn_type,
Michal Kalderon0518c122017-06-09 17:13:22 +03002186 u8 *handle, bool lb)
Mintz, Yuval13c54772017-06-09 17:13:20 +03002187{
2188 memset(data, 0, sizeof(*data));
2189
2190 data->input.conn_type = conn_type;
2191 data->input.mtu = params->mtu;
2192 data->input.rx_num_desc = QED_LL2_RX_SIZE;
2193 data->input.rx_drop_ttl0_flg = params->drop_ttl0_packets;
2194 data->input.rx_vlan_removal_en = params->rx_vlan_stripping;
2195 data->input.tx_num_desc = QED_LL2_TX_SIZE;
Mintz, Yuval13c54772017-06-09 17:13:20 +03002196 data->p_connection_handle = handle;
Michal Kalderon0518c122017-06-09 17:13:22 +03002197 data->cbs = &ll2_cbs;
2198 ll2_cbs.cookie = QED_LEADING_HWFN(cdev);
2199
Mintz, Yuval13c54772017-06-09 17:13:20 +03002200 if (lb) {
Kalderon, Michal526d1d02017-07-02 10:29:23 +03002201 data->input.tx_tc = PKT_LB_TC;
Mintz, Yuval13c54772017-06-09 17:13:20 +03002202 data->input.tx_dest = QED_LL2_TX_DEST_LB;
2203 } else {
2204 data->input.tx_tc = 0;
2205 data->input.tx_dest = QED_LL2_TX_DEST_NW;
2206 }
2207}
2208
2209static int qed_ll2_start_ooo(struct qed_dev *cdev,
2210 struct qed_ll2_params *params)
2211{
2212 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2213 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
2214 struct qed_ll2_acquire_data data;
2215 int rc;
2216
2217 qed_ll2_set_conn_data(cdev, &data, params,
Kalderon, Michal526d1d02017-07-02 10:29:23 +03002218 QED_LL2_TYPE_OOO, handle, true);
Mintz, Yuval13c54772017-06-09 17:13:20 +03002219
2220 rc = qed_ll2_acquire_connection(hwfn, &data);
2221 if (rc) {
2222 DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
2223 goto out;
2224 }
2225
2226 rc = qed_ll2_establish_connection(hwfn, *handle);
2227 if (rc) {
2228 DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
2229 goto fail;
2230 }
2231
2232 return 0;
2233
2234fail:
2235 qed_ll2_release_connection(hwfn, *handle);
2236out:
2237 *handle = QED_LL2_UNUSED_HANDLE;
2238 return rc;
2239}
2240
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002241static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
2242{
Wei Yongjun88a24282016-10-10 14:08:28 +00002243 struct qed_ll2_buffer *buffer, *tmp_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002244 enum qed_ll2_conn_type conn_type;
Mintz, Yuval13c54772017-06-09 17:13:20 +03002245 struct qed_ll2_acquire_data data;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002246 struct qed_ptt *p_ptt;
2247 int rc, i;
Michal Kalderon0518c122017-06-09 17:13:22 +03002248
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002249
2250 /* Initialize LL2 locks & lists */
2251 INIT_LIST_HEAD(&cdev->ll2->list);
2252 spin_lock_init(&cdev->ll2->lock);
2253 cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
2254 L1_CACHE_BYTES + params->mtu;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002255
2256 /*Allocate memory for LL2 */
2257 DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
2258 cdev->ll2->rx_size);
2259 for (i = 0; i < QED_LL2_RX_SIZE; i++) {
2260 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2261 if (!buffer) {
2262 DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
2263 goto fail;
2264 }
2265
2266 rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
2267 &buffer->phys_addr);
2268 if (rc) {
2269 kfree(buffer);
2270 goto fail;
2271 }
2272
2273 list_add_tail(&buffer->list, &cdev->ll2->list);
2274 }
2275
2276 switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
Arun Easi1e128c82017-02-15 06:28:22 -08002277 case QED_PCI_FCOE:
2278 conn_type = QED_LL2_TYPE_FCOE;
Arun Easi1e128c82017-02-15 06:28:22 -08002279 break;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002280 case QED_PCI_ISCSI:
2281 conn_type = QED_LL2_TYPE_ISCSI;
2282 break;
2283 case QED_PCI_ETH_ROCE:
2284 conn_type = QED_LL2_TYPE_ROCE;
2285 break;
2286 default:
2287 conn_type = QED_LL2_TYPE_TEST;
2288 }
2289
Mintz, Yuval13c54772017-06-09 17:13:20 +03002290 qed_ll2_set_conn_data(cdev, &data, params, conn_type,
Michal Kalderon0518c122017-06-09 17:13:22 +03002291 &cdev->ll2->handle, false);
Arnd Bergmann0629a332017-01-18 15:52:52 +01002292
Mintz, Yuval13c54772017-06-09 17:13:20 +03002293 rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &data);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002294 if (rc) {
2295 DP_INFO(cdev, "Failed to acquire LL2 connection\n");
2296 goto fail;
2297 }
2298
2299 rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
2300 cdev->ll2->handle);
2301 if (rc) {
2302 DP_INFO(cdev, "Failed to establish LL2 connection\n");
2303 goto release_fail;
2304 }
2305
2306 /* Post all Rx buffers to FW */
2307 spin_lock_bh(&cdev->ll2->lock);
Wei Yongjun88a24282016-10-10 14:08:28 +00002308 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002309 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
2310 cdev->ll2->handle,
2311 buffer->phys_addr, 0, buffer, 1);
2312 if (rc) {
2313 DP_INFO(cdev,
2314 "Failed to post an Rx buffer; Deleting it\n");
2315 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
2316 cdev->ll2->rx_size, DMA_FROM_DEVICE);
2317 kfree(buffer->data);
2318 list_del(&buffer->list);
2319 kfree(buffer);
2320 } else {
2321 cdev->ll2->rx_cnt++;
2322 }
2323 }
2324 spin_unlock_bh(&cdev->ll2->lock);
2325
2326 if (!cdev->ll2->rx_cnt) {
2327 DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
2328 goto release_terminate;
2329 }
2330
2331 if (!is_valid_ether_addr(params->ll2_mac_address)) {
2332 DP_INFO(cdev, "Invalid Ethernet address\n");
2333 goto release_terminate;
2334 }
2335
Tomer Tayarda090912017-12-27 19:30:07 +02002336 if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI) {
Yuval Mintz1d6cff42016-12-01 00:21:07 -08002337 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
2338 rc = qed_ll2_start_ooo(cdev, params);
2339 if (rc) {
2340 DP_INFO(cdev,
2341 "Failed to initialize the OOO LL2 queue\n");
2342 goto release_terminate;
2343 }
2344 }
2345
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002346 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2347 if (!p_ptt) {
2348 DP_INFO(cdev, "Failed to acquire PTT\n");
2349 goto release_terminate;
2350 }
2351
2352 rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2353 params->ll2_mac_address);
2354 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2355 if (rc) {
2356 DP_ERR(cdev, "Failed to allocate LLH filter\n");
2357 goto release_terminate_all;
2358 }
2359
2360 ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002361 return 0;
2362
2363release_terminate_all:
2364
2365release_terminate:
2366 qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2367release_fail:
2368 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2369fail:
2370 qed_ll2_kill_buffers(cdev);
2371 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2372 return -EINVAL;
2373}
2374
2375static int qed_ll2_stop(struct qed_dev *cdev)
2376{
2377 struct qed_ptt *p_ptt;
2378 int rc;
2379
2380 if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
2381 return 0;
2382
2383 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2384 if (!p_ptt) {
2385 DP_INFO(cdev, "Failed to acquire PTT\n");
2386 goto fail;
2387 }
2388
2389 qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2390 cdev->ll2_mac_address);
2391 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2392 eth_zero_addr(cdev->ll2_mac_address);
2393
Tomer Tayarda090912017-12-27 19:30:07 +02002394 if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI)
Yuval Mintz1d6cff42016-12-01 00:21:07 -08002395 qed_ll2_stop_ooo(cdev);
2396
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002397 rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
2398 cdev->ll2->handle);
2399 if (rc)
2400 DP_INFO(cdev, "Failed to terminate LL2 connection\n");
2401
2402 qed_ll2_kill_buffers(cdev);
2403
2404 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2405 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2406
2407 return rc;
2408fail:
2409 return -EINVAL;
2410}
2411
Sudarsana Reddy Kallurucac6f692018-05-05 18:43:02 -07002412static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
2413 unsigned long xmit_flags)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002414{
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03002415 struct qed_ll2_tx_pkt_info pkt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002416 const skb_frag_t *frag;
2417 int rc = -EINVAL, i;
2418 dma_addr_t mapping;
2419 u16 vlan = 0;
2420 u8 flags = 0;
2421
2422 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
Colin Ian Kingff81de72018-04-28 10:43:20 +01002423 DP_INFO(cdev, "Cannot transmit a checksummed packet\n");
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002424 return -EINVAL;
2425 }
2426
2427 if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
2428 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
2429 1 + skb_shinfo(skb)->nr_frags);
2430 return -EINVAL;
2431 }
2432
2433 mapping = dma_map_single(&cdev->pdev->dev, skb->data,
2434 skb->len, DMA_TO_DEVICE);
2435 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2436 DP_NOTICE(cdev, "SKB mapping failed\n");
2437 return -EINVAL;
2438 }
2439
2440 /* Request HW to calculate IP csum */
2441 if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
2442 ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002443 flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002444
2445 if (skb_vlan_tag_present(skb)) {
2446 vlan = skb_vlan_tag_get(skb);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002447 flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002448 }
2449
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03002450 memset(&pkt, 0, sizeof(pkt));
2451 pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags;
2452 pkt.vlan = vlan;
2453 pkt.bd_flags = flags;
2454 pkt.tx_dest = QED_LL2_TX_DEST_NW;
2455 pkt.first_frag = mapping;
2456 pkt.first_frag_len = skb->len;
2457 pkt.cookie = skb;
Sudarsana Reddy Kallurucac6f692018-05-05 18:43:02 -07002458 if (test_bit(QED_MF_UFP_SPECIFIC, &cdev->mf_bits) &&
2459 test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags))
2460 pkt.remove_stag = true;
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03002461
2462 rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
2463 &pkt, 1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002464 if (rc)
2465 goto err;
2466
2467 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2468 frag = &skb_shinfo(skb)->frags[i];
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002469
Mintz, Yuvald2201a22017-06-09 17:13:23 +03002470 mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
2471 skb_frag_size(frag), DMA_TO_DEVICE);
2472
2473 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2474 DP_NOTICE(cdev,
2475 "Unable to map frag - dropping packet\n");
2476 goto err;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002477 }
2478
2479 rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
2480 cdev->ll2->handle,
2481 mapping,
2482 skb_frag_size(frag));
2483
2484 /* if failed not much to do here, partial packet has been posted
2485 * we can't free memory, will need to wait for completion.
2486 */
2487 if (rc)
2488 goto err2;
2489 }
2490
2491 return 0;
2492
2493err:
2494 dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
2495
2496err2:
2497 return rc;
2498}
2499
2500static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
2501{
2502 if (!cdev->ll2)
2503 return -EINVAL;
2504
2505 return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
2506 cdev->ll2->handle, stats);
2507}
2508
2509const struct qed_ll2_ops qed_ll2_ops_pass = {
2510 .start = &qed_ll2_start,
2511 .stop = &qed_ll2_stop,
2512 .start_xmit = &qed_ll2_start_xmit,
2513 .register_cb_ops = &qed_ll2_register_cb_ops,
2514 .get_stats = &qed_ll2_stats,
2515};
2516
2517int qed_ll2_alloc_if(struct qed_dev *cdev)
2518{
2519 cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
2520 return cdev->ll2 ? 0 : -ENOMEM;
2521}
2522
2523void qed_ll2_dealloc_if(struct qed_dev *cdev)
2524{
2525 kfree(cdev->ll2);
2526 cdev->ll2 = NULL;
2527}