blob: e235fb267ab9d95735fe5df2f82f3d70ae46cc60 [file] [log] [blame]
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001/* QLogic qed NIC Driver
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02002 * Copyright (c) 2015-2017 QLogic Corporation
Yuval Mintz0a7fb112016-10-01 21:59:55 +03003 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
Yuval Mintz0a7fb112016-10-01 21:59:55 +03009 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +020010 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Yuval Mintz0a7fb112016-10-01 21:59:55 +030031 */
32
33#include <linux/types.h>
34#include <asm/byteorder.h>
35#include <linux/dma-mapping.h>
36#include <linux/if_vlan.h>
37#include <linux/kernel.h>
38#include <linux/pci.h>
39#include <linux/slab.h>
40#include <linux/stddef.h>
Yuval Mintz0a7fb112016-10-01 21:59:55 +030041#include <linux/workqueue.h>
42#include <net/ipv6.h>
43#include <linux/bitops.h>
44#include <linux/delay.h>
45#include <linux/errno.h>
46#include <linux/etherdevice.h>
47#include <linux/io.h>
48#include <linux/list.h>
49#include <linux/mutex.h>
50#include <linux/spinlock.h>
51#include <linux/string.h>
52#include <linux/qed/qed_ll2_if.h>
53#include "qed.h"
54#include "qed_cxt.h"
55#include "qed_dev_api.h"
56#include "qed_hsi.h"
57#include "qed_hw.h"
58#include "qed_int.h"
59#include "qed_ll2.h"
60#include "qed_mcp.h"
Yuval Mintz1d6cff42016-12-01 00:21:07 -080061#include "qed_ooo.h"
Yuval Mintz0a7fb112016-10-01 21:59:55 +030062#include "qed_reg_addr.h"
63#include "qed_sp.h"
Kalderon, Michalb71b9af2017-06-21 16:22:45 +030064#include "qed_rdma.h"
Yuval Mintz0a7fb112016-10-01 21:59:55 +030065
66#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
67#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
68
69#define QED_LL2_TX_SIZE (256)
70#define QED_LL2_RX_SIZE (4096)
71
72struct qed_cb_ll2_info {
73 int rx_cnt;
74 u32 rx_size;
75 u8 handle;
Yuval Mintz0a7fb112016-10-01 21:59:55 +030076
77 /* Lock protecting LL2 buffer lists in sleepless context */
78 spinlock_t lock;
79 struct list_head list;
80
81 const struct qed_ll2_cb_ops *cbs;
82 void *cb_cookie;
83};
84
85struct qed_ll2_buffer {
86 struct list_head list;
87 void *data;
88 dma_addr_t phys_addr;
89};
90
Michal Kalderon0518c122017-06-09 17:13:22 +030091static void qed_ll2b_complete_tx_packet(void *cxt,
Yuval Mintz0a7fb112016-10-01 21:59:55 +030092 u8 connection_handle,
93 void *cookie,
94 dma_addr_t first_frag_addr,
95 bool b_last_fragment,
96 bool b_last_packet)
97{
Michal Kalderon0518c122017-06-09 17:13:22 +030098 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +030099 struct qed_dev *cdev = p_hwfn->cdev;
100 struct sk_buff *skb = cookie;
101
102 /* All we need to do is release the mapping */
103 dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
104 skb_headlen(skb), DMA_TO_DEVICE);
105
106 if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
107 cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
108 b_last_fragment);
109
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300110 dev_kfree_skb_any(skb);
111}
112
113static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
114 u8 **data, dma_addr_t *phys_addr)
115{
116 *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
117 if (!(*data)) {
118 DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
119 return -ENOMEM;
120 }
121
122 *phys_addr = dma_map_single(&cdev->pdev->dev,
123 ((*data) + NET_SKB_PAD),
124 cdev->ll2->rx_size, DMA_FROM_DEVICE);
125 if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
126 DP_INFO(cdev, "Failed to map LL2 buffer data\n");
127 kfree((*data));
128 return -ENOMEM;
129 }
130
131 return 0;
132}
133
134static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
135 struct qed_ll2_buffer *buffer)
136{
137 spin_lock_bh(&cdev->ll2->lock);
138
139 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
140 cdev->ll2->rx_size, DMA_FROM_DEVICE);
141 kfree(buffer->data);
142 list_del(&buffer->list);
143
144 cdev->ll2->rx_cnt--;
145 if (!cdev->ll2->rx_cnt)
146 DP_INFO(cdev, "All LL2 entries were removed\n");
147
148 spin_unlock_bh(&cdev->ll2->lock);
149
150 return 0;
151}
152
153static void qed_ll2_kill_buffers(struct qed_dev *cdev)
154{
155 struct qed_ll2_buffer *buffer, *tmp_buffer;
156
157 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
158 qed_ll2_dealloc_buffer(cdev, buffer);
159}
160
Michal Kalderon0518c122017-06-09 17:13:22 +0300161void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300162{
Michal Kalderon0518c122017-06-09 17:13:22 +0300163 struct qed_hwfn *p_hwfn = cxt;
Mintz, Yuval68be9102017-06-09 17:13:19 +0300164 struct qed_ll2_buffer *buffer = data->cookie;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300165 struct qed_dev *cdev = p_hwfn->cdev;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300166 dma_addr_t new_phys_addr;
167 struct sk_buff *skb;
168 bool reuse = false;
169 int rc = -EINVAL;
170 u8 *new_data;
171
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300172 DP_VERBOSE(p_hwfn,
173 (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
174 "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
Mintz, Yuval68be9102017-06-09 17:13:19 +0300175 (u64)data->rx_buf_addr,
176 data->u.placement_offset,
177 data->length.packet_length,
178 data->parse_flags,
179 data->vlan, data->opaque_data_0, data->opaque_data_1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300180
181 if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
182 print_hex_dump(KERN_INFO, "",
183 DUMP_PREFIX_OFFSET, 16, 1,
Mintz, Yuval68be9102017-06-09 17:13:19 +0300184 buffer->data, data->length.packet_length, false);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300185 }
186
187 /* Determine if data is valid */
Mintz, Yuval68be9102017-06-09 17:13:19 +0300188 if (data->length.packet_length < ETH_HLEN)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300189 reuse = true;
190
191 /* Allocate a replacement for buffer; Reuse upon failure */
192 if (!reuse)
193 rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
194 &new_phys_addr);
195
196 /* If need to reuse or there's no replacement buffer, repost this */
197 if (rc)
198 goto out_post;
Mintz, Yuval752ecb22017-03-14 15:26:00 +0200199 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
200 cdev->ll2->rx_size, DMA_FROM_DEVICE);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300201
202 skb = build_skb(buffer->data, 0);
203 if (!skb) {
204 rc = -ENOMEM;
205 goto out_post;
206 }
207
Mintz, Yuval68be9102017-06-09 17:13:19 +0300208 data->u.placement_offset += NET_SKB_PAD;
209 skb_reserve(skb, data->u.placement_offset);
210 skb_put(skb, data->length.packet_length);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300211 skb_checksum_none_assert(skb);
212
213 /* Get parital ethernet information instead of eth_type_trans(),
214 * Since we don't have an associated net_device.
215 */
216 skb_reset_mac_header(skb);
217 skb->protocol = eth_hdr(skb)->h_proto;
218
219 /* Pass SKB onward */
220 if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
Mintz, Yuval68be9102017-06-09 17:13:19 +0300221 if (data->vlan)
222 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
223 data->vlan);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300224 cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
Mintz, Yuval68be9102017-06-09 17:13:19 +0300225 data->opaque_data_0,
226 data->opaque_data_1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300227 }
228
229 /* Update Buffer information and update FW producer */
230 buffer->data = new_data;
231 buffer->phys_addr = new_phys_addr;
232
233out_post:
234 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
235 buffer->phys_addr, 0, buffer, 1);
236
237 if (rc)
238 qed_ll2_dealloc_buffer(cdev, buffer);
239}
240
241static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
242 u8 connection_handle,
243 bool b_lock,
244 bool b_only_active)
245{
246 struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
247
248 if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
249 return NULL;
250
251 if (!p_hwfn->p_ll2_info)
252 return NULL;
253
254 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
255
256 if (b_only_active) {
257 if (b_lock)
258 mutex_lock(&p_ll2_conn->mutex);
259 if (p_ll2_conn->b_active)
260 p_ret = p_ll2_conn;
261 if (b_lock)
262 mutex_unlock(&p_ll2_conn->mutex);
263 } else {
264 p_ret = p_ll2_conn;
265 }
266
267 return p_ret;
268}
269
270static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
271 u8 connection_handle)
272{
273 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
274}
275
276static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
277 u8 connection_handle)
278{
279 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
280}
281
282static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
283 *p_hwfn,
284 u8 connection_handle)
285{
286 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
287}
288
289static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
290{
291 bool b_last_packet = false, b_last_frag = false;
292 struct qed_ll2_tx_packet *p_pkt = NULL;
293 struct qed_ll2_info *p_ll2_conn;
294 struct qed_ll2_tx_queue *p_tx;
Ram Amraniabd49672016-10-01 22:00:01 +0300295 dma_addr_t tx_frag;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300296
297 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
298 if (!p_ll2_conn)
299 return;
300
301 p_tx = &p_ll2_conn->tx_queue;
302
303 while (!list_empty(&p_tx->active_descq)) {
304 p_pkt = list_first_entry(&p_tx->active_descq,
305 struct qed_ll2_tx_packet, list_entry);
306 if (!p_pkt)
307 break;
308
309 list_del(&p_pkt->list_entry);
310 b_last_packet = list_empty(&p_tx->active_descq);
311 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
Kalderon, Michal526d1d02017-07-02 10:29:23 +0300312 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800313 struct qed_ooo_buffer *p_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300314
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800315 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
316 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
317 p_buffer);
318 } else {
319 p_tx->cur_completing_packet = *p_pkt;
320 p_tx->cur_completing_bd_idx = 1;
321 b_last_frag =
322 p_tx->cur_completing_bd_idx == p_pkt->bd_used;
323 tx_frag = p_pkt->bds_set[0].tx_frag;
Michal Kalderon0518c122017-06-09 17:13:22 +0300324 p_ll2_conn->cbs.tx_release_cb(p_ll2_conn->cbs.cookie,
325 p_ll2_conn->my_id,
326 p_pkt->cookie,
327 tx_frag,
328 b_last_frag,
329 b_last_packet);
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800330 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300331 }
332}
333
334static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
335{
336 struct qed_ll2_info *p_ll2_conn = p_cookie;
337 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
338 u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
339 struct qed_ll2_tx_packet *p_pkt;
340 bool b_last_frag = false;
341 unsigned long flags;
342 int rc = -EINVAL;
343
344 spin_lock_irqsave(&p_tx->lock, flags);
345 if (p_tx->b_completing_packet) {
346 rc = -EBUSY;
347 goto out;
348 }
349
350 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
351 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
352 while (num_bds) {
353 if (list_empty(&p_tx->active_descq))
354 goto out;
355
356 p_pkt = list_first_entry(&p_tx->active_descq,
357 struct qed_ll2_tx_packet, list_entry);
358 if (!p_pkt)
359 goto out;
360
361 p_tx->b_completing_packet = true;
362 p_tx->cur_completing_packet = *p_pkt;
363 num_bds_in_packet = p_pkt->bd_used;
364 list_del(&p_pkt->list_entry);
365
366 if (num_bds < num_bds_in_packet) {
367 DP_NOTICE(p_hwfn,
368 "Rest of BDs does not cover whole packet\n");
369 goto out;
370 }
371
372 num_bds -= num_bds_in_packet;
373 p_tx->bds_idx += num_bds_in_packet;
374 while (num_bds_in_packet--)
375 qed_chain_consume(&p_tx->txq_chain);
376
377 p_tx->cur_completing_bd_idx = 1;
378 b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
379 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
380
381 spin_unlock_irqrestore(&p_tx->lock, flags);
Michal Kalderon0518c122017-06-09 17:13:22 +0300382
383 p_ll2_conn->cbs.tx_comp_cb(p_ll2_conn->cbs.cookie,
384 p_ll2_conn->my_id,
385 p_pkt->cookie,
386 p_pkt->bds_set[0].tx_frag,
387 b_last_frag, !num_bds);
388
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300389 spin_lock_irqsave(&p_tx->lock, flags);
390 }
391
392 p_tx->b_completing_packet = false;
393 rc = 0;
394out:
395 spin_unlock_irqrestore(&p_tx->lock, flags);
396 return rc;
397}
398
Michal Kalderon0518c122017-06-09 17:13:22 +0300399static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn,
400 union core_rx_cqe_union *p_cqe,
401 struct qed_ll2_comp_rx_data *data)
Ram Amraniabd49672016-10-01 22:00:01 +0300402{
Michal Kalderon0518c122017-06-09 17:13:22 +0300403 data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
404 data->length.data_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
405 data->vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
406 data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
407 data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
408 data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error;
Ram Amraniabd49672016-10-01 22:00:01 +0300409}
410
Mintz, Yuval68be9102017-06-09 17:13:19 +0300411static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
412 union core_rx_cqe_union *p_cqe,
413 struct qed_ll2_comp_rx_data *data)
414{
415 data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_fp.parse_flags.flags);
416 data->length.packet_length =
417 le16_to_cpu(p_cqe->rx_cqe_fp.packet_length);
418 data->vlan = le16_to_cpu(p_cqe->rx_cqe_fp.vlan);
419 data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[0]);
420 data->opaque_data_1 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[1]);
421 data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset;
422}
423
424static int
425qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
426 struct qed_ll2_info *p_ll2_conn,
427 union core_rx_cqe_union *p_cqe,
428 unsigned long *p_lock_flags, bool b_last_cqe)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300429{
430 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
431 struct qed_ll2_rx_packet *p_pkt = NULL;
Mintz, Yuval68be9102017-06-09 17:13:19 +0300432 struct qed_ll2_comp_rx_data data;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300433
434 if (!list_empty(&p_rx->active_descq))
435 p_pkt = list_first_entry(&p_rx->active_descq,
436 struct qed_ll2_rx_packet, list_entry);
437 if (!p_pkt) {
438 DP_NOTICE(p_hwfn,
Mintz, Yuval68be9102017-06-09 17:13:19 +0300439 "[%d] LL2 Rx completion but active_descq is empty\n",
Mintz, Yuval13c54772017-06-09 17:13:20 +0300440 p_ll2_conn->input.conn_type);
Mintz, Yuval68be9102017-06-09 17:13:19 +0300441
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300442 return -EIO;
443 }
444 list_del(&p_pkt->list_entry);
445
Michal Kalderon0518c122017-06-09 17:13:22 +0300446 if (p_cqe->rx_cqe_sp.type == CORE_RX_CQE_TYPE_REGULAR)
447 qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data);
448 else
449 qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300450 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
451 DP_NOTICE(p_hwfn,
452 "Mismatch between active_descq and the LL2 Rx chain\n");
Mintz, Yuval68be9102017-06-09 17:13:19 +0300453
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300454 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
455
Mintz, Yuval68be9102017-06-09 17:13:19 +0300456 data.connection_handle = p_ll2_conn->my_id;
457 data.cookie = p_pkt->cookie;
458 data.rx_buf_addr = p_pkt->rx_buf_addr;
459 data.b_last_packet = b_last_cqe;
460
Ram Amrani1df2ade2017-03-14 15:26:02 +0200461 spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
Michal Kalderon0518c122017-06-09 17:13:22 +0300462 p_ll2_conn->cbs.rx_comp_cb(p_ll2_conn->cbs.cookie, &data);
463
Ram Amrani1df2ade2017-03-14 15:26:02 +0200464 spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300465
466 return 0;
467}
468
469static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
470{
Mintz, Yuval13c54772017-06-09 17:13:20 +0300471 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)cookie;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300472 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
473 union core_rx_cqe_union *cqe = NULL;
474 u16 cq_new_idx = 0, cq_old_idx = 0;
475 unsigned long flags = 0;
476 int rc = 0;
477
478 spin_lock_irqsave(&p_rx->lock, flags);
479 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
480 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
481
482 while (cq_new_idx != cq_old_idx) {
483 bool b_last_cqe = (cq_new_idx == cq_old_idx);
484
Mintz, Yuval13c54772017-06-09 17:13:20 +0300485 cqe =
486 (union core_rx_cqe_union *)
487 qed_chain_consume(&p_rx->rcq_chain);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300488 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
489
490 DP_VERBOSE(p_hwfn,
491 QED_MSG_LL2,
492 "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
493 cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
494
495 switch (cqe->rx_cqe_sp.type) {
496 case CORE_RX_CQE_TYPE_SLOW_PATH:
497 DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
498 rc = -EINVAL;
499 break;
Ram Amraniabd49672016-10-01 22:00:01 +0300500 case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300501 case CORE_RX_CQE_TYPE_REGULAR:
Mintz, Yuval68be9102017-06-09 17:13:19 +0300502 rc = qed_ll2_rxq_handle_completion(p_hwfn, p_ll2_conn,
503 cqe, &flags,
504 b_last_cqe);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300505 break;
506 default:
507 rc = -EIO;
508 }
509 }
510
511 spin_unlock_irqrestore(&p_rx->lock, flags);
512 return rc;
513}
514
Yuval Mintz8c93bea2016-10-13 22:57:03 +0300515static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300516{
517 struct qed_ll2_info *p_ll2_conn = NULL;
518 struct qed_ll2_rx_packet *p_pkt = NULL;
519 struct qed_ll2_rx_queue *p_rx;
520
521 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
522 if (!p_ll2_conn)
523 return;
524
525 p_rx = &p_ll2_conn->rx_queue;
526
527 while (!list_empty(&p_rx->active_descq)) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300528 p_pkt = list_first_entry(&p_rx->active_descq,
529 struct qed_ll2_rx_packet, list_entry);
530 if (!p_pkt)
531 break;
532
Wei Yongjunb4f0fd42016-10-17 15:17:51 +0000533 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300534
Kalderon, Michal526d1d02017-07-02 10:29:23 +0300535 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800536 struct qed_ooo_buffer *p_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300537
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800538 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
539 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
540 p_buffer);
541 } else {
Mintz, Yuval54f19f02017-06-09 17:13:24 +0300542 dma_addr_t rx_buf_addr = p_pkt->rx_buf_addr;
543 void *cookie = p_pkt->cookie;
544 bool b_last;
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800545
546 b_last = list_empty(&p_rx->active_descq);
Mintz, Yuval54f19f02017-06-09 17:13:24 +0300547 p_ll2_conn->cbs.rx_release_cb(p_ll2_conn->cbs.cookie,
548 p_ll2_conn->my_id,
549 cookie,
550 rx_buf_addr, b_last);
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800551 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300552 }
553}
554
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800555static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
556{
557 u8 bd_flags = 0;
558
559 if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200560 SET_FIELD(bd_flags, CORE_TX_BD_DATA_VLAN_INSERTION, 1);
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800561
562 return bd_flags;
563}
564
565static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
566 struct qed_ll2_info *p_ll2_conn)
567{
568 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
569 u16 packet_length = 0, parse_flags = 0, vlan = 0;
570 struct qed_ll2_rx_packet *p_pkt = NULL;
571 u32 num_ooo_add_to_peninsula = 0, cid;
572 union core_rx_cqe_union *cqe = NULL;
573 u16 cq_new_idx = 0, cq_old_idx = 0;
574 struct qed_ooo_buffer *p_buffer;
575 struct ooo_opaque *iscsi_ooo;
576 u8 placement_offset = 0;
577 u8 cqe_type;
578
579 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
580 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
581 if (cq_new_idx == cq_old_idx)
582 return 0;
583
584 while (cq_new_idx != cq_old_idx) {
585 struct core_rx_fast_path_cqe *p_cqe_fp;
586
587 cqe = qed_chain_consume(&p_rx->rcq_chain);
588 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
589 cqe_type = cqe->rx_cqe_sp.type;
590
591 if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
592 DP_NOTICE(p_hwfn,
593 "Got a non-regular LB LL2 completion [type 0x%02x]\n",
594 cqe_type);
595 return -EINVAL;
596 }
597 p_cqe_fp = &cqe->rx_cqe_fp;
598
599 placement_offset = p_cqe_fp->placement_offset;
600 parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
601 packet_length = le16_to_cpu(p_cqe_fp->packet_length);
602 vlan = le16_to_cpu(p_cqe_fp->vlan);
603 iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
604 qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
605 iscsi_ooo);
606 cid = le32_to_cpu(iscsi_ooo->cid);
607
608 /* Process delete isle first */
609 if (iscsi_ooo->drop_size)
610 qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
611 iscsi_ooo->drop_isle,
612 iscsi_ooo->drop_size);
613
614 if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
615 continue;
616
617 /* Now process create/add/join isles */
618 if (list_empty(&p_rx->active_descq)) {
619 DP_NOTICE(p_hwfn,
620 "LL2 OOO RX chain has no submitted buffers\n"
621 );
622 return -EIO;
623 }
624
625 p_pkt = list_first_entry(&p_rx->active_descq,
626 struct qed_ll2_rx_packet, list_entry);
627
628 if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
629 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
630 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
631 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
632 (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
633 if (!p_pkt) {
634 DP_NOTICE(p_hwfn,
635 "LL2 OOO RX packet is not valid\n");
636 return -EIO;
637 }
638 list_del(&p_pkt->list_entry);
639 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
640 p_buffer->packet_length = packet_length;
641 p_buffer->parse_flags = parse_flags;
642 p_buffer->vlan = vlan;
643 p_buffer->placement_offset = placement_offset;
644 qed_chain_consume(&p_rx->rxq_chain);
645 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
646
647 switch (iscsi_ooo->ooo_opcode) {
648 case TCP_EVENT_ADD_NEW_ISLE:
649 qed_ooo_add_new_isle(p_hwfn,
650 p_hwfn->p_ooo_info,
651 cid,
652 iscsi_ooo->ooo_isle,
653 p_buffer);
654 break;
655 case TCP_EVENT_ADD_ISLE_RIGHT:
656 qed_ooo_add_new_buffer(p_hwfn,
657 p_hwfn->p_ooo_info,
658 cid,
659 iscsi_ooo->ooo_isle,
660 p_buffer,
661 QED_OOO_RIGHT_BUF);
662 break;
663 case TCP_EVENT_ADD_ISLE_LEFT:
664 qed_ooo_add_new_buffer(p_hwfn,
665 p_hwfn->p_ooo_info,
666 cid,
667 iscsi_ooo->ooo_isle,
668 p_buffer,
669 QED_OOO_LEFT_BUF);
670 break;
671 case TCP_EVENT_JOIN:
672 qed_ooo_add_new_buffer(p_hwfn,
673 p_hwfn->p_ooo_info,
674 cid,
675 iscsi_ooo->ooo_isle +
676 1,
677 p_buffer,
678 QED_OOO_LEFT_BUF);
679 qed_ooo_join_isles(p_hwfn,
680 p_hwfn->p_ooo_info,
681 cid, iscsi_ooo->ooo_isle);
682 break;
683 case TCP_EVENT_ADD_PEN:
684 num_ooo_add_to_peninsula++;
685 qed_ooo_put_ready_buffer(p_hwfn,
686 p_hwfn->p_ooo_info,
687 p_buffer, true);
688 break;
689 }
690 } else {
691 DP_NOTICE(p_hwfn,
692 "Unexpected event (%d) TX OOO completion\n",
693 iscsi_ooo->ooo_opcode);
694 }
695 }
696
697 return 0;
698}
699
700static void
701qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
702 struct qed_ll2_info *p_ll2_conn)
703{
Mintz, Yuval7c7973b2017-06-09 17:13:18 +0300704 struct qed_ll2_tx_pkt_info tx_pkt;
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800705 struct qed_ooo_buffer *p_buffer;
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800706 u16 l4_hdr_offset_w;
707 dma_addr_t first_frag;
708 u16 parse_flags;
709 u8 bd_flags;
Mintz, Yuval7c7973b2017-06-09 17:13:18 +0300710 int rc;
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800711
712 /* Submit Tx buffers here */
713 while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
714 p_hwfn->p_ooo_info))) {
715 l4_hdr_offset_w = 0;
716 bd_flags = 0;
717
718 first_frag = p_buffer->rx_buffer_phys_addr +
719 p_buffer->placement_offset;
720 parse_flags = p_buffer->parse_flags;
721 bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200722 SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
723 SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800724
Mintz, Yuval7c7973b2017-06-09 17:13:18 +0300725 memset(&tx_pkt, 0, sizeof(tx_pkt));
726 tx_pkt.num_of_bds = 1;
727 tx_pkt.vlan = p_buffer->vlan;
728 tx_pkt.bd_flags = bd_flags;
729 tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w;
Mintz, Yuval13c54772017-06-09 17:13:20 +0300730 tx_pkt.tx_dest = p_ll2_conn->tx_dest;
Mintz, Yuval7c7973b2017-06-09 17:13:18 +0300731 tx_pkt.first_frag = first_frag;
732 tx_pkt.first_frag_len = p_buffer->packet_length;
733 tx_pkt.cookie = p_buffer;
734
735 rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id,
736 &tx_pkt, true);
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800737 if (rc) {
738 qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
739 p_buffer, false);
740 break;
741 }
742 }
743}
744
745static void
746qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn,
747 struct qed_ll2_info *p_ll2_conn)
748{
749 struct qed_ooo_buffer *p_buffer;
750 int rc;
751
752 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
753 p_hwfn->p_ooo_info))) {
754 rc = qed_ll2_post_rx_buffer(p_hwfn,
755 p_ll2_conn->my_id,
756 p_buffer->rx_buffer_phys_addr,
757 0, p_buffer, true);
758 if (rc) {
759 qed_ooo_put_free_buffer(p_hwfn,
760 p_hwfn->p_ooo_info, p_buffer);
761 break;
762 }
763 }
764}
765
766static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
767{
768 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
769 int rc;
770
771 rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
772 if (rc)
773 return rc;
774
775 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
776 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
777
778 return 0;
779}
780
781static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
782{
783 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
784 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
785 struct qed_ll2_tx_packet *p_pkt = NULL;
786 struct qed_ooo_buffer *p_buffer;
787 bool b_dont_submit_rx = false;
788 u16 new_idx = 0, num_bds = 0;
789 int rc;
790
791 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
792 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
793
794 if (!num_bds)
795 return 0;
796
797 while (num_bds) {
798 if (list_empty(&p_tx->active_descq))
799 return -EINVAL;
800
801 p_pkt = list_first_entry(&p_tx->active_descq,
802 struct qed_ll2_tx_packet, list_entry);
803 if (!p_pkt)
804 return -EINVAL;
805
806 if (p_pkt->bd_used != 1) {
807 DP_NOTICE(p_hwfn,
808 "Unexpectedly many BDs(%d) in TX OOO completion\n",
809 p_pkt->bd_used);
810 return -EINVAL;
811 }
812
813 list_del(&p_pkt->list_entry);
814
815 num_bds--;
816 p_tx->bds_idx++;
817 qed_chain_consume(&p_tx->txq_chain);
818
819 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
820 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
821
822 if (b_dont_submit_rx) {
823 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
824 p_buffer);
825 continue;
826 }
827
828 rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
829 p_buffer->rx_buffer_phys_addr, 0,
830 p_buffer, true);
831 if (rc != 0) {
832 qed_ooo_put_free_buffer(p_hwfn,
833 p_hwfn->p_ooo_info, p_buffer);
834 b_dont_submit_rx = true;
835 }
836 }
837
838 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
839
840 return 0;
841}
842
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800843static void qed_ll2_stop_ooo(struct qed_dev *cdev)
844{
845 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
846 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
847
848 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n",
849 *handle);
850
851 qed_ll2_terminate_connection(hwfn, *handle);
852 qed_ll2_release_connection(hwfn, *handle);
853 *handle = QED_LL2_UNUSED_HANDLE;
854}
855
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300856static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
857 struct qed_ll2_info *p_ll2_conn,
858 u8 action_on_error)
859{
Mintz, Yuval13c54772017-06-09 17:13:20 +0300860 enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300861 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
862 struct core_rx_start_ramrod_data *p_ramrod = NULL;
863 struct qed_spq_entry *p_ent = NULL;
864 struct qed_sp_init_data init_data;
865 u16 cqe_pbl_size;
866 int rc = 0;
867
868 /* Get SPQ entry */
869 memset(&init_data, 0, sizeof(init_data));
870 init_data.cid = p_ll2_conn->cid;
871 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
872 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
873
874 rc = qed_sp_init_request(p_hwfn, &p_ent,
875 CORE_RAMROD_RX_QUEUE_START,
876 PROTOCOLID_CORE, &init_data);
877 if (rc)
878 return rc;
879
880 p_ramrod = &p_ent->ramrod.core_rx_queue_start;
881
882 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
883 p_ramrod->sb_index = p_rx->rx_sb_index;
884 p_ramrod->complete_event_flg = 1;
885
Mintz, Yuval13c54772017-06-09 17:13:20 +0300886 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
887 DMA_REGPAIR_LE(p_ramrod->bd_base, p_rx->rxq_chain.p_phys_addr);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300888 cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
889 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
890 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
891 qed_chain_get_pbl_phys(&p_rx->rcq_chain));
892
Mintz, Yuval13c54772017-06-09 17:13:20 +0300893 p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
894 p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300895 p_ramrod->queue_id = p_ll2_conn->queue_id;
Kalderon, Michal526d1d02017-07-02 10:29:23 +0300896 p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_OOO) ? 0 : 1;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300897
898 if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
899 p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
900 p_ramrod->mf_si_bcast_accept_all = 1;
901 p_ramrod->mf_si_mcast_accept_all = 1;
902 } else {
903 p_ramrod->mf_si_bcast_accept_all = 0;
904 p_ramrod->mf_si_mcast_accept_all = 0;
905 }
906
907 p_ramrod->action_on_error.error_type = action_on_error;
Mintz, Yuval13c54772017-06-09 17:13:20 +0300908 p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300909 return qed_spq_post(p_hwfn, p_ent, NULL);
910}
911
912static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
913 struct qed_ll2_info *p_ll2_conn)
914{
Mintz, Yuval13c54772017-06-09 17:13:20 +0300915 enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300916 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
917 struct core_tx_start_ramrod_data *p_ramrod = NULL;
918 struct qed_spq_entry *p_ent = NULL;
919 struct qed_sp_init_data init_data;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300920 u16 pq_id = 0, pbl_size;
921 int rc = -EINVAL;
922
923 if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
924 return 0;
925
Kalderon, Michal526d1d02017-07-02 10:29:23 +0300926 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800927 p_ll2_conn->tx_stats_en = 0;
928 else
929 p_ll2_conn->tx_stats_en = 1;
930
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300931 /* Get SPQ entry */
932 memset(&init_data, 0, sizeof(init_data));
933 init_data.cid = p_ll2_conn->cid;
934 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
935 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
936
937 rc = qed_sp_init_request(p_hwfn, &p_ent,
938 CORE_RAMROD_TX_QUEUE_START,
939 PROTOCOLID_CORE, &init_data);
940 if (rc)
941 return rc;
942
943 p_ramrod = &p_ent->ramrod.core_tx_queue_start;
944
945 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
946 p_ramrod->sb_index = p_tx->tx_sb_index;
Mintz, Yuval13c54772017-06-09 17:13:20 +0300947 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300948 p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
949 p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
950
951 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
952 qed_chain_get_pbl_phys(&p_tx->txq_chain));
953 pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
954 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
955
Mintz, Yuval13c54772017-06-09 17:13:20 +0300956 switch (p_ll2_conn->input.tx_tc) {
Kalderon, Michal526d1d02017-07-02 10:29:23 +0300957 case PURE_LB_TC:
Ariel Eliorb5a9ee72017-04-03 12:21:09 +0300958 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
959 break;
Kalderon, Michal526d1d02017-07-02 10:29:23 +0300960 case PKT_LB_TC:
Ariel Eliorb5a9ee72017-04-03 12:21:09 +0300961 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO);
Colin Ian King827d2402017-04-05 13:35:44 +0100962 break;
Ariel Eliorb5a9ee72017-04-03 12:21:09 +0300963 default:
964 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
965 break;
966 }
967
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300968 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
969
970 switch (conn_type) {
Arun Easi1e128c82017-02-15 06:28:22 -0800971 case QED_LL2_TYPE_FCOE:
972 p_ramrod->conn_type = PROTOCOLID_FCOE;
973 break;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300974 case QED_LL2_TYPE_ISCSI:
Kalderon, Michal526d1d02017-07-02 10:29:23 +0300975 case QED_LL2_TYPE_OOO:
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300976 p_ramrod->conn_type = PROTOCOLID_ISCSI;
977 break;
978 case QED_LL2_TYPE_ROCE:
979 p_ramrod->conn_type = PROTOCOLID_ROCE;
980 break;
981 default:
982 p_ramrod->conn_type = PROTOCOLID_ETH;
983 DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
984 }
985
Mintz, Yuval13c54772017-06-09 17:13:20 +0300986 p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
987
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300988 return qed_spq_post(p_hwfn, p_ent, NULL);
989}
990
991static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
992 struct qed_ll2_info *p_ll2_conn)
993{
994 struct core_rx_stop_ramrod_data *p_ramrod = NULL;
995 struct qed_spq_entry *p_ent = NULL;
996 struct qed_sp_init_data init_data;
997 int rc = -EINVAL;
998
999 /* Get SPQ entry */
1000 memset(&init_data, 0, sizeof(init_data));
1001 init_data.cid = p_ll2_conn->cid;
1002 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1003 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1004
1005 rc = qed_sp_init_request(p_hwfn, &p_ent,
1006 CORE_RAMROD_RX_QUEUE_STOP,
1007 PROTOCOLID_CORE, &init_data);
1008 if (rc)
1009 return rc;
1010
1011 p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
1012
1013 p_ramrod->complete_event_flg = 1;
1014 p_ramrod->queue_id = p_ll2_conn->queue_id;
1015
1016 return qed_spq_post(p_hwfn, p_ent, NULL);
1017}
1018
1019static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
1020 struct qed_ll2_info *p_ll2_conn)
1021{
1022 struct qed_spq_entry *p_ent = NULL;
1023 struct qed_sp_init_data init_data;
1024 int rc = -EINVAL;
1025
1026 /* Get SPQ entry */
1027 memset(&init_data, 0, sizeof(init_data));
1028 init_data.cid = p_ll2_conn->cid;
1029 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1030 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1031
1032 rc = qed_sp_init_request(p_hwfn, &p_ent,
1033 CORE_RAMROD_TX_QUEUE_STOP,
1034 PROTOCOLID_CORE, &init_data);
1035 if (rc)
1036 return rc;
1037
1038 return qed_spq_post(p_hwfn, p_ent, NULL);
1039}
1040
1041static int
1042qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001043 struct qed_ll2_info *p_ll2_info)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001044{
1045 struct qed_ll2_rx_packet *p_descq;
1046 u32 capacity;
1047 int rc = 0;
1048
Mintz, Yuval13c54772017-06-09 17:13:20 +03001049 if (!p_ll2_info->input.rx_num_desc)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001050 goto out;
1051
1052 rc = qed_chain_alloc(p_hwfn->cdev,
1053 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1054 QED_CHAIN_MODE_NEXT_PTR,
1055 QED_CHAIN_CNT_TYPE_U16,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001056 p_ll2_info->input.rx_num_desc,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001057 sizeof(struct core_rx_bd),
Mintz, Yuval1a4a6972017-06-20 16:00:00 +03001058 &p_ll2_info->rx_queue.rxq_chain, NULL);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001059 if (rc) {
1060 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
1061 goto out;
1062 }
1063
1064 capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
1065 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
1066 GFP_KERNEL);
1067 if (!p_descq) {
1068 rc = -ENOMEM;
1069 DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
1070 goto out;
1071 }
1072 p_ll2_info->rx_queue.descq_array = p_descq;
1073
1074 rc = qed_chain_alloc(p_hwfn->cdev,
1075 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1076 QED_CHAIN_MODE_PBL,
1077 QED_CHAIN_CNT_TYPE_U16,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001078 p_ll2_info->input.rx_num_desc,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001079 sizeof(struct core_rx_fast_path_cqe),
Mintz, Yuval1a4a6972017-06-20 16:00:00 +03001080 &p_ll2_info->rx_queue.rcq_chain, NULL);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001081 if (rc) {
1082 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
1083 goto out;
1084 }
1085
1086 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1087 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
Mintz, Yuval13c54772017-06-09 17:13:20 +03001088 p_ll2_info->input.conn_type, p_ll2_info->input.rx_num_desc);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001089
1090out:
1091 return rc;
1092}
1093
1094static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001095 struct qed_ll2_info *p_ll2_info)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001096{
1097 struct qed_ll2_tx_packet *p_descq;
1098 u32 capacity;
1099 int rc = 0;
1100
Mintz, Yuval13c54772017-06-09 17:13:20 +03001101 if (!p_ll2_info->input.tx_num_desc)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001102 goto out;
1103
1104 rc = qed_chain_alloc(p_hwfn->cdev,
1105 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1106 QED_CHAIN_MODE_PBL,
1107 QED_CHAIN_CNT_TYPE_U16,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001108 p_ll2_info->input.tx_num_desc,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001109 sizeof(struct core_tx_bd),
Mintz, Yuval1a4a6972017-06-20 16:00:00 +03001110 &p_ll2_info->tx_queue.txq_chain, NULL);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001111 if (rc)
1112 goto out;
1113
1114 capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
1115 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet),
1116 GFP_KERNEL);
1117 if (!p_descq) {
1118 rc = -ENOMEM;
1119 goto out;
1120 }
1121 p_ll2_info->tx_queue.descq_array = p_descq;
1122
1123 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1124 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
Mintz, Yuval13c54772017-06-09 17:13:20 +03001125 p_ll2_info->input.conn_type, p_ll2_info->input.tx_num_desc);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001126
1127out:
1128 if (rc)
1129 DP_NOTICE(p_hwfn,
1130 "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
Mintz, Yuval13c54772017-06-09 17:13:20 +03001131 p_ll2_info->input.tx_num_desc);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001132 return rc;
1133}
1134
Mintz, Yuval13c54772017-06-09 17:13:20 +03001135static int
1136qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
1137 struct qed_ll2_info *p_ll2_info, u16 mtu)
1138{
1139 struct qed_ooo_buffer *p_buf = NULL;
1140 void *p_virt;
1141 u16 buf_idx;
1142 int rc = 0;
1143
Kalderon, Michal526d1d02017-07-02 10:29:23 +03001144 if (p_ll2_info->input.conn_type != QED_LL2_TYPE_OOO)
Mintz, Yuval13c54772017-06-09 17:13:20 +03001145 return rc;
1146
1147 /* Correct number of requested OOO buffers if needed */
1148 if (!p_ll2_info->input.rx_num_ooo_buffers) {
1149 u16 num_desc = p_ll2_info->input.rx_num_desc;
1150
1151 if (!num_desc)
1152 return -EINVAL;
1153 p_ll2_info->input.rx_num_ooo_buffers = num_desc * 2;
1154 }
1155
1156 for (buf_idx = 0; buf_idx < p_ll2_info->input.rx_num_ooo_buffers;
1157 buf_idx++) {
1158 p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
1159 if (!p_buf) {
1160 rc = -ENOMEM;
1161 goto out;
1162 }
1163
1164 p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
1165 p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
1166 ETH_CACHE_LINE_SIZE - 1) &
1167 ~(ETH_CACHE_LINE_SIZE - 1);
1168 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1169 p_buf->rx_buffer_size,
1170 &p_buf->rx_buffer_phys_addr,
1171 GFP_KERNEL);
1172 if (!p_virt) {
1173 kfree(p_buf);
1174 rc = -ENOMEM;
1175 goto out;
1176 }
1177
1178 p_buf->rx_buffer_virt_addr = p_virt;
1179 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
1180 }
1181
1182 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1183 "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
1184 p_ll2_info->input.rx_num_ooo_buffers, p_buf->rx_buffer_size);
1185
1186out:
1187 return rc;
1188}
1189
Michal Kalderon0518c122017-06-09 17:13:22 +03001190static int
1191qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs)
1192{
1193 if (!cbs || (!cbs->rx_comp_cb ||
1194 !cbs->rx_release_cb ||
1195 !cbs->tx_comp_cb || !cbs->tx_release_cb || !cbs->cookie))
1196 return -EINVAL;
1197
1198 p_ll2_info->cbs.rx_comp_cb = cbs->rx_comp_cb;
1199 p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb;
1200 p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb;
1201 p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb;
1202 p_ll2_info->cbs.cookie = cbs->cookie;
1203
1204 return 0;
1205}
1206
Mintz, Yuval13c54772017-06-09 17:13:20 +03001207static enum core_error_handle
1208qed_ll2_get_error_choice(enum qed_ll2_error_handle err)
1209{
1210 switch (err) {
1211 case QED_LL2_DROP_PACKET:
1212 return LL2_DROP_PACKET;
1213 case QED_LL2_DO_NOTHING:
1214 return LL2_DO_NOTHING;
1215 case QED_LL2_ASSERT:
1216 return LL2_ASSERT;
1217 default:
1218 return LL2_DO_NOTHING;
1219 }
1220}
1221
Michal Kalderon0518c122017-06-09 17:13:22 +03001222int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001223{
Michal Kalderon0518c122017-06-09 17:13:22 +03001224 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001225 qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
1226 struct qed_ll2_info *p_ll2_info = NULL;
Mintz, Yuval13c54772017-06-09 17:13:20 +03001227 u8 i, *p_tx_max;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001228 int rc;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001229
Mintz, Yuval13c54772017-06-09 17:13:20 +03001230 if (!data->p_connection_handle || !p_hwfn->p_ll2_info)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001231 return -EINVAL;
1232
1233 /* Find a free connection to be used */
1234 for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
1235 mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
1236 if (p_hwfn->p_ll2_info[i].b_active) {
1237 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1238 continue;
1239 }
1240
1241 p_hwfn->p_ll2_info[i].b_active = true;
1242 p_ll2_info = &p_hwfn->p_ll2_info[i];
1243 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1244 break;
1245 }
1246 if (!p_ll2_info)
1247 return -EBUSY;
1248
Mintz, Yuval13c54772017-06-09 17:13:20 +03001249 memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001250
Mintz, Yuval13c54772017-06-09 17:13:20 +03001251 p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ?
1252 CORE_TX_DEST_NW : CORE_TX_DEST_LB;
1253
1254 /* Correct maximum number of Tx BDs */
1255 p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet;
1256 if (*p_tx_max == 0)
1257 *p_tx_max = CORE_LL2_TX_MAX_BDS_PER_PACKET;
1258 else
1259 *p_tx_max = min_t(u8, *p_tx_max,
1260 CORE_LL2_TX_MAX_BDS_PER_PACKET);
Michal Kalderon0518c122017-06-09 17:13:22 +03001261
1262 rc = qed_ll2_set_cbs(p_ll2_info, data->cbs);
1263 if (rc) {
1264 DP_NOTICE(p_hwfn, "Invalid callback functions\n");
1265 goto q_allocate_fail;
1266 }
1267
Mintz, Yuval13c54772017-06-09 17:13:20 +03001268 rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001269 if (rc)
1270 goto q_allocate_fail;
1271
Mintz, Yuval13c54772017-06-09 17:13:20 +03001272 rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001273 if (rc)
1274 goto q_allocate_fail;
1275
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001276 rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001277 data->input.mtu);
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001278 if (rc)
1279 goto q_allocate_fail;
1280
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001281 /* Register callbacks for the Rx/Tx queues */
Kalderon, Michal526d1d02017-07-02 10:29:23 +03001282 if (data->input.conn_type == QED_LL2_TYPE_OOO) {
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001283 comp_rx_cb = qed_ll2_lb_rxq_completion;
1284 comp_tx_cb = qed_ll2_lb_txq_completion;
1285 } else {
1286 comp_rx_cb = qed_ll2_rxq_completion;
1287 comp_tx_cb = qed_ll2_txq_completion;
1288 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001289
Mintz, Yuval13c54772017-06-09 17:13:20 +03001290 if (data->input.rx_num_desc) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001291 qed_int_register_cb(p_hwfn, comp_rx_cb,
1292 &p_hwfn->p_ll2_info[i],
1293 &p_ll2_info->rx_queue.rx_sb_index,
1294 &p_ll2_info->rx_queue.p_fw_cons);
1295 p_ll2_info->rx_queue.b_cb_registred = true;
1296 }
1297
Mintz, Yuval13c54772017-06-09 17:13:20 +03001298 if (data->input.tx_num_desc) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001299 qed_int_register_cb(p_hwfn,
1300 comp_tx_cb,
1301 &p_hwfn->p_ll2_info[i],
1302 &p_ll2_info->tx_queue.tx_sb_index,
1303 &p_ll2_info->tx_queue.p_fw_cons);
1304 p_ll2_info->tx_queue.b_cb_registred = true;
1305 }
1306
Mintz, Yuval13c54772017-06-09 17:13:20 +03001307 *data->p_connection_handle = i;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001308 return rc;
1309
1310q_allocate_fail:
1311 qed_ll2_release_connection(p_hwfn, i);
1312 return -ENOMEM;
1313}
1314
1315static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
1316 struct qed_ll2_info *p_ll2_conn)
1317{
Mintz, Yuval13c54772017-06-09 17:13:20 +03001318 enum qed_ll2_error_handle error_input;
1319 enum core_error_handle error_mode;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001320 u8 action_on_error = 0;
1321
1322 if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
1323 return 0;
1324
1325 DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
Mintz, Yuval13c54772017-06-09 17:13:20 +03001326 error_input = p_ll2_conn->input.ai_err_packet_too_big;
1327 error_mode = qed_ll2_get_error_choice(error_input);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001328 SET_FIELD(action_on_error,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001329 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, error_mode);
1330 error_input = p_ll2_conn->input.ai_err_no_buf;
1331 error_mode = qed_ll2_get_error_choice(error_input);
1332 SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001333
1334 return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
1335}
1336
Mintz, Yuval58de2892017-06-09 17:13:21 +03001337static void
1338qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
1339 struct qed_ll2_info *p_ll2_conn)
1340{
Kalderon, Michal526d1d02017-07-02 10:29:23 +03001341 if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
Mintz, Yuval58de2892017-06-09 17:13:21 +03001342 return;
1343
1344 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1345 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
1346}
Michal Kalderon0518c122017-06-09 17:13:22 +03001347
1348int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001349{
Michal Kalderon0518c122017-06-09 17:13:22 +03001350 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001351 struct qed_ll2_info *p_ll2_conn;
1352 struct qed_ll2_rx_queue *p_rx;
1353 struct qed_ll2_tx_queue *p_tx;
Rahul Verma15582962017-04-06 15:58:29 +03001354 struct qed_ptt *p_ptt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001355 int rc = -EINVAL;
1356 u32 i, capacity;
1357 u8 qid;
1358
Rahul Verma15582962017-04-06 15:58:29 +03001359 p_ptt = qed_ptt_acquire(p_hwfn);
1360 if (!p_ptt)
1361 return -EAGAIN;
1362
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001363 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
Rahul Verma15582962017-04-06 15:58:29 +03001364 if (!p_ll2_conn) {
1365 rc = -EINVAL;
1366 goto out;
1367 }
1368
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001369 p_rx = &p_ll2_conn->rx_queue;
1370 p_tx = &p_ll2_conn->tx_queue;
1371
1372 qed_chain_reset(&p_rx->rxq_chain);
1373 qed_chain_reset(&p_rx->rcq_chain);
1374 INIT_LIST_HEAD(&p_rx->active_descq);
1375 INIT_LIST_HEAD(&p_rx->free_descq);
1376 INIT_LIST_HEAD(&p_rx->posting_descq);
1377 spin_lock_init(&p_rx->lock);
1378 capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
1379 for (i = 0; i < capacity; i++)
1380 list_add_tail(&p_rx->descq_array[i].list_entry,
1381 &p_rx->free_descq);
1382 *p_rx->p_fw_cons = 0;
1383
1384 qed_chain_reset(&p_tx->txq_chain);
1385 INIT_LIST_HEAD(&p_tx->active_descq);
1386 INIT_LIST_HEAD(&p_tx->free_descq);
1387 INIT_LIST_HEAD(&p_tx->sending_descq);
1388 spin_lock_init(&p_tx->lock);
1389 capacity = qed_chain_get_capacity(&p_tx->txq_chain);
1390 for (i = 0; i < capacity; i++)
1391 list_add_tail(&p_tx->descq_array[i].list_entry,
1392 &p_tx->free_descq);
1393 p_tx->cur_completing_bd_idx = 0;
1394 p_tx->bds_idx = 0;
1395 p_tx->b_completing_packet = false;
1396 p_tx->cur_send_packet = NULL;
1397 p_tx->cur_send_frag_num = 0;
1398 p_tx->cur_completing_frag_num = 0;
1399 *p_tx->p_fw_cons = 0;
1400
Rahul Verma15582962017-04-06 15:58:29 +03001401 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
1402 if (rc)
1403 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001404
1405 qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
1406 p_ll2_conn->queue_id = qid;
1407 p_ll2_conn->tx_stats_id = qid;
1408 p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
1409 GTT_BAR0_MAP_REG_TSDM_RAM +
1410 TSTORM_LL2_RX_PRODS_OFFSET(qid);
1411 p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
1412 qed_db_addr(p_ll2_conn->cid,
1413 DQ_DEMS_LEGACY);
1414
1415 rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
1416 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001417 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001418
1419 rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
1420 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001421 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001422
Kalderon, Michalc851a9d2017-07-02 10:29:21 +03001423 if (!QED_IS_RDMA_PERSONALITY(p_hwfn))
Rahul Verma15582962017-04-06 15:58:29 +03001424 qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001425
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001426 qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
1427
Mintz, Yuval13c54772017-06-09 17:13:20 +03001428 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
Rahul Verma15582962017-04-06 15:58:29 +03001429 qed_llh_add_protocol_filter(p_hwfn, p_ptt,
Arun Easi1e128c82017-02-15 06:28:22 -08001430 0x8906, 0,
1431 QED_LLH_FILTER_ETHERTYPE);
Rahul Verma15582962017-04-06 15:58:29 +03001432 qed_llh_add_protocol_filter(p_hwfn, p_ptt,
Arun Easi1e128c82017-02-15 06:28:22 -08001433 0x8914, 0,
1434 QED_LLH_FILTER_ETHERTYPE);
1435 }
1436
Rahul Verma15582962017-04-06 15:58:29 +03001437out:
1438 qed_ptt_release(p_hwfn, p_ptt);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001439 return rc;
1440}
1441
1442static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
1443 struct qed_ll2_rx_queue *p_rx,
1444 struct qed_ll2_rx_packet *p_curp)
1445{
1446 struct qed_ll2_rx_packet *p_posting_packet = NULL;
1447 struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
1448 bool b_notify_fw = false;
1449 u16 bd_prod, cq_prod;
1450
1451 /* This handles the flushing of already posted buffers */
1452 while (!list_empty(&p_rx->posting_descq)) {
1453 p_posting_packet = list_first_entry(&p_rx->posting_descq,
1454 struct qed_ll2_rx_packet,
1455 list_entry);
Wei Yongjunb4f0fd42016-10-17 15:17:51 +00001456 list_move_tail(&p_posting_packet->list_entry,
1457 &p_rx->active_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001458 b_notify_fw = true;
1459 }
1460
1461 /* This handles the supplied packet [if there is one] */
1462 if (p_curp) {
1463 list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
1464 b_notify_fw = true;
1465 }
1466
1467 if (!b_notify_fw)
1468 return;
1469
1470 bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
1471 cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1472 rx_prod.bd_prod = cpu_to_le16(bd_prod);
1473 rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1474 DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1475}
1476
Michal Kalderon0518c122017-06-09 17:13:22 +03001477int qed_ll2_post_rx_buffer(void *cxt,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001478 u8 connection_handle,
1479 dma_addr_t addr,
1480 u16 buf_len, void *cookie, u8 notify_fw)
1481{
Michal Kalderon0518c122017-06-09 17:13:22 +03001482 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001483 struct core_rx_bd_with_buff_len *p_curb = NULL;
1484 struct qed_ll2_rx_packet *p_curp = NULL;
1485 struct qed_ll2_info *p_ll2_conn;
1486 struct qed_ll2_rx_queue *p_rx;
1487 unsigned long flags;
1488 void *p_data;
1489 int rc = 0;
1490
1491 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1492 if (!p_ll2_conn)
1493 return -EINVAL;
1494 p_rx = &p_ll2_conn->rx_queue;
1495
1496 spin_lock_irqsave(&p_rx->lock, flags);
1497 if (!list_empty(&p_rx->free_descq))
1498 p_curp = list_first_entry(&p_rx->free_descq,
1499 struct qed_ll2_rx_packet, list_entry);
1500 if (p_curp) {
1501 if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
1502 qed_chain_get_elem_left(&p_rx->rcq_chain)) {
1503 p_data = qed_chain_produce(&p_rx->rxq_chain);
1504 p_curb = (struct core_rx_bd_with_buff_len *)p_data;
1505 qed_chain_produce(&p_rx->rcq_chain);
1506 }
1507 }
1508
1509 /* If we're lacking entires, let's try to flush buffers to FW */
1510 if (!p_curp || !p_curb) {
1511 rc = -EBUSY;
1512 p_curp = NULL;
1513 goto out_notify;
1514 }
1515
1516 /* We have an Rx packet we can fill */
1517 DMA_REGPAIR_LE(p_curb->addr, addr);
1518 p_curb->buff_length = cpu_to_le16(buf_len);
1519 p_curp->rx_buf_addr = addr;
1520 p_curp->cookie = cookie;
1521 p_curp->rxq_bd = p_curb;
1522 p_curp->buf_length = buf_len;
1523 list_del(&p_curp->list_entry);
1524
1525 /* Check if we only want to enqueue this packet without informing FW */
1526 if (!notify_fw) {
1527 list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
1528 goto out;
1529 }
1530
1531out_notify:
1532 qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
1533out:
1534 spin_unlock_irqrestore(&p_rx->lock, flags);
1535 return rc;
1536}
1537
1538static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
1539 struct qed_ll2_tx_queue *p_tx,
1540 struct qed_ll2_tx_packet *p_curp,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001541 struct qed_ll2_tx_pkt_info *pkt,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001542 u8 notify_fw)
1543{
1544 list_del(&p_curp->list_entry);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001545 p_curp->cookie = pkt->cookie;
1546 p_curp->bd_used = pkt->num_of_bds;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001547 p_curp->notify_fw = notify_fw;
1548 p_tx->cur_send_packet = p_curp;
1549 p_tx->cur_send_frag_num = 0;
1550
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001551 p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = pkt->first_frag;
1552 p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = pkt->first_frag_len;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001553 p_tx->cur_send_frag_num++;
1554}
1555
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001556static void
1557qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1558 struct qed_ll2_info *p_ll2,
1559 struct qed_ll2_tx_packet *p_curp,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001560 struct qed_ll2_tx_pkt_info *pkt)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001561{
1562 struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
1563 u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
1564 struct core_tx_bd *start_bd = NULL;
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001565 enum core_roce_flavor_type roce_flavor;
1566 enum core_tx_dest tx_dest;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001567 u16 bd_data = 0, frag_idx;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001568
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001569 roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE
1570 : CORE_RROCE;
1571
1572 tx_dest = (pkt->tx_dest == QED_LL2_TX_DEST_NW) ? CORE_TX_DEST_NW
1573 : CORE_TX_DEST_LB;
1574
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001575 start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001576 start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001577 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001578 cpu_to_le16(pkt->l4_hdr_offset_w));
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001579 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001580 bd_data |= pkt->bd_flags;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001581 SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001582 SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001583 SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
1584 start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001585 DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag);
1586 start_bd->nbytes = cpu_to_le16(pkt->first_frag_len);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001587
1588 DP_VERBOSE(p_hwfn,
1589 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1590 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1591 p_ll2->queue_id,
1592 p_ll2->cid,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001593 p_ll2->input.conn_type,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001594 prod_idx,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001595 pkt->first_frag_len,
1596 pkt->num_of_bds,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001597 le32_to_cpu(start_bd->addr.hi),
1598 le32_to_cpu(start_bd->addr.lo));
1599
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001600 if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001601 return;
1602
1603 /* Need to provide the packet with additional BDs for frags */
1604 for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001605 frag_idx < pkt->num_of_bds; frag_idx++) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001606 struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
1607
1608 *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001609 (*p_bd)->bd_data.as_bitfield = 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001610 (*p_bd)->bitfield1 = 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001611 p_curp->bds_set[frag_idx].tx_frag = 0;
1612 p_curp->bds_set[frag_idx].frag_len = 0;
1613 }
1614}
1615
1616/* This should be called while the Txq spinlock is being held */
1617static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
1618 struct qed_ll2_info *p_ll2_conn)
1619{
1620 bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
1621 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1622 struct qed_ll2_tx_packet *p_pkt = NULL;
1623 struct core_db_data db_msg = { 0, 0, 0 };
1624 u16 bd_prod;
1625
1626 /* If there are missing BDs, don't do anything now */
1627 if (p_ll2_conn->tx_queue.cur_send_frag_num !=
1628 p_ll2_conn->tx_queue.cur_send_packet->bd_used)
1629 return;
1630
1631 /* Push the current packet to the list and clean after it */
1632 list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
1633 &p_ll2_conn->tx_queue.sending_descq);
1634 p_ll2_conn->tx_queue.cur_send_packet = NULL;
1635 p_ll2_conn->tx_queue.cur_send_frag_num = 0;
1636
1637 /* Notify FW of packet only if requested to */
1638 if (!b_notify)
1639 return;
1640
1641 bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
1642
1643 while (!list_empty(&p_tx->sending_descq)) {
1644 p_pkt = list_first_entry(&p_tx->sending_descq,
1645 struct qed_ll2_tx_packet, list_entry);
1646 if (!p_pkt)
1647 break;
1648
Wei Yongjunb4f0fd42016-10-17 15:17:51 +00001649 list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001650 }
1651
1652 SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
1653 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1654 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
1655 DQ_XCM_CORE_TX_BD_PROD_CMD);
1656 db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
1657 db_msg.spq_prod = cpu_to_le16(bd_prod);
1658
1659 /* Make sure the BDs data is updated before ringing the doorbell */
1660 wmb();
1661
1662 DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
1663
1664 DP_VERBOSE(p_hwfn,
1665 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1666 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1667 p_ll2_conn->queue_id,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001668 p_ll2_conn->cid,
1669 p_ll2_conn->input.conn_type, db_msg.spq_prod);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001670}
1671
Michal Kalderon0518c122017-06-09 17:13:22 +03001672int qed_ll2_prepare_tx_packet(void *cxt,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001673 u8 connection_handle,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001674 struct qed_ll2_tx_pkt_info *pkt,
1675 bool notify_fw)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001676{
Michal Kalderon0518c122017-06-09 17:13:22 +03001677 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001678 struct qed_ll2_tx_packet *p_curp = NULL;
1679 struct qed_ll2_info *p_ll2_conn = NULL;
1680 struct qed_ll2_tx_queue *p_tx;
1681 struct qed_chain *p_tx_chain;
1682 unsigned long flags;
1683 int rc = 0;
1684
1685 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1686 if (!p_ll2_conn)
1687 return -EINVAL;
1688 p_tx = &p_ll2_conn->tx_queue;
1689 p_tx_chain = &p_tx->txq_chain;
1690
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001691 if (pkt->num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001692 return -EIO;
1693
1694 spin_lock_irqsave(&p_tx->lock, flags);
1695 if (p_tx->cur_send_packet) {
1696 rc = -EEXIST;
1697 goto out;
1698 }
1699
1700 /* Get entry, but only if we have tx elements for it */
1701 if (!list_empty(&p_tx->free_descq))
1702 p_curp = list_first_entry(&p_tx->free_descq,
1703 struct qed_ll2_tx_packet, list_entry);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001704 if (p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001705 p_curp = NULL;
1706
1707 if (!p_curp) {
1708 rc = -EBUSY;
1709 goto out;
1710 }
1711
1712 /* Prepare packet and BD, and perhaps send a doorbell to FW */
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001713 qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, pkt, notify_fw);
1714
1715 qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, pkt);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001716
1717 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1718
1719out:
1720 spin_unlock_irqrestore(&p_tx->lock, flags);
1721 return rc;
1722}
1723
Michal Kalderon0518c122017-06-09 17:13:22 +03001724int qed_ll2_set_fragment_of_tx_packet(void *cxt,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001725 u8 connection_handle,
1726 dma_addr_t addr, u16 nbytes)
1727{
1728 struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
Michal Kalderon0518c122017-06-09 17:13:22 +03001729 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001730 struct qed_ll2_info *p_ll2_conn = NULL;
1731 u16 cur_send_frag_num = 0;
1732 struct core_tx_bd *p_bd;
1733 unsigned long flags;
1734
1735 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1736 if (!p_ll2_conn)
1737 return -EINVAL;
1738
1739 if (!p_ll2_conn->tx_queue.cur_send_packet)
1740 return -EINVAL;
1741
1742 p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
1743 cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
1744
1745 if (cur_send_frag_num >= p_cur_send_packet->bd_used)
1746 return -EINVAL;
1747
1748 /* Fill the BD information, and possibly notify FW */
1749 p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
1750 DMA_REGPAIR_LE(p_bd->addr, addr);
1751 p_bd->nbytes = cpu_to_le16(nbytes);
1752 p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
1753 p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
1754
1755 p_ll2_conn->tx_queue.cur_send_frag_num++;
1756
1757 spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
1758 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1759 spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
1760
1761 return 0;
1762}
1763
Michal Kalderon0518c122017-06-09 17:13:22 +03001764int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001765{
Michal Kalderon0518c122017-06-09 17:13:22 +03001766 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001767 struct qed_ll2_info *p_ll2_conn = NULL;
1768 int rc = -EINVAL;
Rahul Verma15582962017-04-06 15:58:29 +03001769 struct qed_ptt *p_ptt;
1770
1771 p_ptt = qed_ptt_acquire(p_hwfn);
1772 if (!p_ptt)
1773 return -EAGAIN;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001774
1775 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
Rahul Verma15582962017-04-06 15:58:29 +03001776 if (!p_ll2_conn) {
1777 rc = -EINVAL;
1778 goto out;
1779 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001780
1781 /* Stop Tx & Rx of connection, if needed */
1782 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1783 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
1784 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001785 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001786 qed_ll2_txq_flush(p_hwfn, connection_handle);
1787 }
1788
1789 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1790 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
1791 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001792 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001793 qed_ll2_rxq_flush(p_hwfn, connection_handle);
1794 }
1795
Kalderon, Michal526d1d02017-07-02 10:29:23 +03001796 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001797 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1798
Mintz, Yuval13c54772017-06-09 17:13:20 +03001799 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
Rahul Verma15582962017-04-06 15:58:29 +03001800 qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
Arun Easi1e128c82017-02-15 06:28:22 -08001801 0x8906, 0,
1802 QED_LLH_FILTER_ETHERTYPE);
Rahul Verma15582962017-04-06 15:58:29 +03001803 qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
Arun Easi1e128c82017-02-15 06:28:22 -08001804 0x8914, 0,
1805 QED_LLH_FILTER_ETHERTYPE);
1806 }
1807
Rahul Verma15582962017-04-06 15:58:29 +03001808out:
1809 qed_ptt_release(p_hwfn, p_ptt);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001810 return rc;
1811}
1812
Mintz, Yuval58de2892017-06-09 17:13:21 +03001813static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
1814 struct qed_ll2_info *p_ll2_conn)
1815{
1816 struct qed_ooo_buffer *p_buffer;
1817
Kalderon, Michal526d1d02017-07-02 10:29:23 +03001818 if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
Mintz, Yuval58de2892017-06-09 17:13:21 +03001819 return;
1820
1821 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1822 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
1823 p_hwfn->p_ooo_info))) {
1824 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1825 p_buffer->rx_buffer_size,
1826 p_buffer->rx_buffer_virt_addr,
1827 p_buffer->rx_buffer_phys_addr);
1828 kfree(p_buffer);
1829 }
1830}
Michal Kalderon0518c122017-06-09 17:13:22 +03001831
1832void qed_ll2_release_connection(void *cxt, u8 connection_handle)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001833{
Michal Kalderon0518c122017-06-09 17:13:22 +03001834 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001835 struct qed_ll2_info *p_ll2_conn = NULL;
1836
1837 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1838 if (!p_ll2_conn)
1839 return;
1840
1841 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1842 p_ll2_conn->rx_queue.b_cb_registred = false;
1843 qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
1844 }
1845
1846 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1847 p_ll2_conn->tx_queue.b_cb_registred = false;
1848 qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
1849 }
1850
1851 kfree(p_ll2_conn->tx_queue.descq_array);
1852 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
1853
1854 kfree(p_ll2_conn->rx_queue.descq_array);
1855 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
1856 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
1857
1858 qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
1859
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001860 qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
1861
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001862 mutex_lock(&p_ll2_conn->mutex);
1863 p_ll2_conn->b_active = false;
1864 mutex_unlock(&p_ll2_conn->mutex);
1865}
1866
Tomer Tayar3587cb82017-05-21 12:10:56 +03001867int qed_ll2_alloc(struct qed_hwfn *p_hwfn)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001868{
1869 struct qed_ll2_info *p_ll2_connections;
1870 u8 i;
1871
1872 /* Allocate LL2's set struct */
1873 p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
1874 sizeof(struct qed_ll2_info), GFP_KERNEL);
1875 if (!p_ll2_connections) {
1876 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
Tomer Tayar3587cb82017-05-21 12:10:56 +03001877 return -ENOMEM;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001878 }
1879
1880 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1881 p_ll2_connections[i].my_id = i;
1882
Tomer Tayar3587cb82017-05-21 12:10:56 +03001883 p_hwfn->p_ll2_info = p_ll2_connections;
1884 return 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001885}
1886
Tomer Tayar3587cb82017-05-21 12:10:56 +03001887void qed_ll2_setup(struct qed_hwfn *p_hwfn)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001888{
1889 int i;
1890
1891 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
Tomer Tayar3587cb82017-05-21 12:10:56 +03001892 mutex_init(&p_hwfn->p_ll2_info[i].mutex);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001893}
1894
Tomer Tayar3587cb82017-05-21 12:10:56 +03001895void qed_ll2_free(struct qed_hwfn *p_hwfn)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001896{
Tomer Tayar3587cb82017-05-21 12:10:56 +03001897 if (!p_hwfn->p_ll2_info)
1898 return;
1899
1900 kfree(p_hwfn->p_ll2_info);
1901 p_hwfn->p_ll2_info = NULL;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001902}
1903
Mintz, Yuvalfef1c3f2017-06-09 17:13:25 +03001904static void _qed_ll2_get_port_stats(struct qed_hwfn *p_hwfn,
1905 struct qed_ptt *p_ptt,
1906 struct qed_ll2_stats *p_stats)
1907{
1908 struct core_ll2_port_stats port_stats;
1909
1910 memset(&port_stats, 0, sizeof(port_stats));
1911 qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
1912 BAR0_MAP_REG_TSDM_RAM +
1913 TSTORM_LL2_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)),
1914 sizeof(port_stats));
1915
1916 p_stats->gsi_invalid_hdr = HILO_64_REGPAIR(port_stats.gsi_invalid_hdr);
1917 p_stats->gsi_invalid_pkt_length =
1918 HILO_64_REGPAIR(port_stats.gsi_invalid_pkt_length);
1919 p_stats->gsi_unsupported_pkt_typ =
1920 HILO_64_REGPAIR(port_stats.gsi_unsupported_pkt_typ);
1921 p_stats->gsi_crcchksm_error =
1922 HILO_64_REGPAIR(port_stats.gsi_crcchksm_error);
1923}
1924
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001925static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
1926 struct qed_ptt *p_ptt,
1927 struct qed_ll2_info *p_ll2_conn,
1928 struct qed_ll2_stats *p_stats)
1929{
1930 struct core_ll2_tstorm_per_queue_stat tstats;
1931 u8 qid = p_ll2_conn->queue_id;
1932 u32 tstats_addr;
1933
1934 memset(&tstats, 0, sizeof(tstats));
1935 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1936 CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
1937 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
1938
1939 p_stats->packet_too_big_discard =
1940 HILO_64_REGPAIR(tstats.packet_too_big_discard);
1941 p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
1942}
1943
1944static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
1945 struct qed_ptt *p_ptt,
1946 struct qed_ll2_info *p_ll2_conn,
1947 struct qed_ll2_stats *p_stats)
1948{
1949 struct core_ll2_ustorm_per_queue_stat ustats;
1950 u8 qid = p_ll2_conn->queue_id;
1951 u32 ustats_addr;
1952
1953 memset(&ustats, 0, sizeof(ustats));
1954 ustats_addr = BAR0_MAP_REG_USDM_RAM +
1955 CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
1956 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
1957
1958 p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1959 p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1960 p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1961 p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1962 p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1963 p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1964}
1965
1966static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
1967 struct qed_ptt *p_ptt,
1968 struct qed_ll2_info *p_ll2_conn,
1969 struct qed_ll2_stats *p_stats)
1970{
1971 struct core_ll2_pstorm_per_queue_stat pstats;
1972 u8 stats_id = p_ll2_conn->tx_stats_id;
1973 u32 pstats_addr;
1974
1975 memset(&pstats, 0, sizeof(pstats));
1976 pstats_addr = BAR0_MAP_REG_PSDM_RAM +
1977 CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
1978 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
1979
1980 p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1981 p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1982 p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1983 p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1984 p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1985 p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1986}
1987
Michal Kalderon0518c122017-06-09 17:13:22 +03001988int qed_ll2_get_stats(void *cxt,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001989 u8 connection_handle, struct qed_ll2_stats *p_stats)
1990{
Michal Kalderon0518c122017-06-09 17:13:22 +03001991 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001992 struct qed_ll2_info *p_ll2_conn = NULL;
1993 struct qed_ptt *p_ptt;
1994
1995 memset(p_stats, 0, sizeof(*p_stats));
1996
1997 if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
1998 !p_hwfn->p_ll2_info)
1999 return -EINVAL;
2000
2001 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
2002
2003 p_ptt = qed_ptt_acquire(p_hwfn);
2004 if (!p_ptt) {
2005 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
2006 return -EINVAL;
2007 }
2008
Mintz, Yuvalfef1c3f2017-06-09 17:13:25 +03002009 if (p_ll2_conn->input.gsi_enable)
2010 _qed_ll2_get_port_stats(p_hwfn, p_ptt, p_stats);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002011 _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2012 _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2013 if (p_ll2_conn->tx_stats_en)
2014 _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2015
2016 qed_ptt_release(p_hwfn, p_ptt);
2017 return 0;
2018}
2019
Michal Kalderon0518c122017-06-09 17:13:22 +03002020static void qed_ll2b_release_rx_packet(void *cxt,
2021 u8 connection_handle,
2022 void *cookie,
2023 dma_addr_t rx_buf_addr,
2024 bool b_last_packet)
2025{
2026 struct qed_hwfn *p_hwfn = cxt;
2027
2028 qed_ll2_dealloc_buffer(p_hwfn->cdev, cookie);
2029}
2030
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002031static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
2032 const struct qed_ll2_cb_ops *ops,
2033 void *cookie)
2034{
2035 cdev->ll2->cbs = ops;
2036 cdev->ll2->cb_cookie = cookie;
2037}
2038
Michal Kalderon0518c122017-06-09 17:13:22 +03002039struct qed_ll2_cbs ll2_cbs = {
2040 .rx_comp_cb = &qed_ll2b_complete_rx_packet,
2041 .rx_release_cb = &qed_ll2b_release_rx_packet,
2042 .tx_comp_cb = &qed_ll2b_complete_tx_packet,
2043 .tx_release_cb = &qed_ll2b_complete_tx_packet,
2044};
2045
Mintz, Yuval13c54772017-06-09 17:13:20 +03002046static void qed_ll2_set_conn_data(struct qed_dev *cdev,
2047 struct qed_ll2_acquire_data *data,
2048 struct qed_ll2_params *params,
2049 enum qed_ll2_conn_type conn_type,
Michal Kalderon0518c122017-06-09 17:13:22 +03002050 u8 *handle, bool lb)
Mintz, Yuval13c54772017-06-09 17:13:20 +03002051{
2052 memset(data, 0, sizeof(*data));
2053
2054 data->input.conn_type = conn_type;
2055 data->input.mtu = params->mtu;
2056 data->input.rx_num_desc = QED_LL2_RX_SIZE;
2057 data->input.rx_drop_ttl0_flg = params->drop_ttl0_packets;
2058 data->input.rx_vlan_removal_en = params->rx_vlan_stripping;
2059 data->input.tx_num_desc = QED_LL2_TX_SIZE;
Mintz, Yuval13c54772017-06-09 17:13:20 +03002060 data->p_connection_handle = handle;
Michal Kalderon0518c122017-06-09 17:13:22 +03002061 data->cbs = &ll2_cbs;
2062 ll2_cbs.cookie = QED_LEADING_HWFN(cdev);
2063
Mintz, Yuval13c54772017-06-09 17:13:20 +03002064 if (lb) {
Kalderon, Michal526d1d02017-07-02 10:29:23 +03002065 data->input.tx_tc = PKT_LB_TC;
Mintz, Yuval13c54772017-06-09 17:13:20 +03002066 data->input.tx_dest = QED_LL2_TX_DEST_LB;
2067 } else {
2068 data->input.tx_tc = 0;
2069 data->input.tx_dest = QED_LL2_TX_DEST_NW;
2070 }
2071}
2072
2073static int qed_ll2_start_ooo(struct qed_dev *cdev,
2074 struct qed_ll2_params *params)
2075{
2076 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2077 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
2078 struct qed_ll2_acquire_data data;
2079 int rc;
2080
2081 qed_ll2_set_conn_data(cdev, &data, params,
Kalderon, Michal526d1d02017-07-02 10:29:23 +03002082 QED_LL2_TYPE_OOO, handle, true);
Mintz, Yuval13c54772017-06-09 17:13:20 +03002083
2084 rc = qed_ll2_acquire_connection(hwfn, &data);
2085 if (rc) {
2086 DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
2087 goto out;
2088 }
2089
2090 rc = qed_ll2_establish_connection(hwfn, *handle);
2091 if (rc) {
2092 DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
2093 goto fail;
2094 }
2095
2096 return 0;
2097
2098fail:
2099 qed_ll2_release_connection(hwfn, *handle);
2100out:
2101 *handle = QED_LL2_UNUSED_HANDLE;
2102 return rc;
2103}
2104
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002105static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
2106{
Wei Yongjun88a24282016-10-10 14:08:28 +00002107 struct qed_ll2_buffer *buffer, *tmp_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002108 enum qed_ll2_conn_type conn_type;
Mintz, Yuval13c54772017-06-09 17:13:20 +03002109 struct qed_ll2_acquire_data data;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002110 struct qed_ptt *p_ptt;
2111 int rc, i;
Michal Kalderon0518c122017-06-09 17:13:22 +03002112
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002113
2114 /* Initialize LL2 locks & lists */
2115 INIT_LIST_HEAD(&cdev->ll2->list);
2116 spin_lock_init(&cdev->ll2->lock);
2117 cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
2118 L1_CACHE_BYTES + params->mtu;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002119
2120 /*Allocate memory for LL2 */
2121 DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
2122 cdev->ll2->rx_size);
2123 for (i = 0; i < QED_LL2_RX_SIZE; i++) {
2124 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2125 if (!buffer) {
2126 DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
2127 goto fail;
2128 }
2129
2130 rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
2131 &buffer->phys_addr);
2132 if (rc) {
2133 kfree(buffer);
2134 goto fail;
2135 }
2136
2137 list_add_tail(&buffer->list, &cdev->ll2->list);
2138 }
2139
2140 switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
Arun Easi1e128c82017-02-15 06:28:22 -08002141 case QED_PCI_FCOE:
2142 conn_type = QED_LL2_TYPE_FCOE;
Arun Easi1e128c82017-02-15 06:28:22 -08002143 break;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002144 case QED_PCI_ISCSI:
2145 conn_type = QED_LL2_TYPE_ISCSI;
2146 break;
2147 case QED_PCI_ETH_ROCE:
2148 conn_type = QED_LL2_TYPE_ROCE;
2149 break;
2150 default:
2151 conn_type = QED_LL2_TYPE_TEST;
2152 }
2153
Mintz, Yuval13c54772017-06-09 17:13:20 +03002154 qed_ll2_set_conn_data(cdev, &data, params, conn_type,
Michal Kalderon0518c122017-06-09 17:13:22 +03002155 &cdev->ll2->handle, false);
Arnd Bergmann0629a332017-01-18 15:52:52 +01002156
Mintz, Yuval13c54772017-06-09 17:13:20 +03002157 rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &data);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002158 if (rc) {
2159 DP_INFO(cdev, "Failed to acquire LL2 connection\n");
2160 goto fail;
2161 }
2162
2163 rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
2164 cdev->ll2->handle);
2165 if (rc) {
2166 DP_INFO(cdev, "Failed to establish LL2 connection\n");
2167 goto release_fail;
2168 }
2169
2170 /* Post all Rx buffers to FW */
2171 spin_lock_bh(&cdev->ll2->lock);
Wei Yongjun88a24282016-10-10 14:08:28 +00002172 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002173 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
2174 cdev->ll2->handle,
2175 buffer->phys_addr, 0, buffer, 1);
2176 if (rc) {
2177 DP_INFO(cdev,
2178 "Failed to post an Rx buffer; Deleting it\n");
2179 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
2180 cdev->ll2->rx_size, DMA_FROM_DEVICE);
2181 kfree(buffer->data);
2182 list_del(&buffer->list);
2183 kfree(buffer);
2184 } else {
2185 cdev->ll2->rx_cnt++;
2186 }
2187 }
2188 spin_unlock_bh(&cdev->ll2->lock);
2189
2190 if (!cdev->ll2->rx_cnt) {
2191 DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
2192 goto release_terminate;
2193 }
2194
2195 if (!is_valid_ether_addr(params->ll2_mac_address)) {
2196 DP_INFO(cdev, "Invalid Ethernet address\n");
2197 goto release_terminate;
2198 }
2199
Yuval Mintz1d6cff42016-12-01 00:21:07 -08002200 if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
2201 cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
2202 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
2203 rc = qed_ll2_start_ooo(cdev, params);
2204 if (rc) {
2205 DP_INFO(cdev,
2206 "Failed to initialize the OOO LL2 queue\n");
2207 goto release_terminate;
2208 }
2209 }
2210
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002211 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2212 if (!p_ptt) {
2213 DP_INFO(cdev, "Failed to acquire PTT\n");
2214 goto release_terminate;
2215 }
2216
2217 rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2218 params->ll2_mac_address);
2219 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2220 if (rc) {
2221 DP_ERR(cdev, "Failed to allocate LLH filter\n");
2222 goto release_terminate_all;
2223 }
2224
2225 ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002226 return 0;
2227
2228release_terminate_all:
2229
2230release_terminate:
2231 qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2232release_fail:
2233 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2234fail:
2235 qed_ll2_kill_buffers(cdev);
2236 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2237 return -EINVAL;
2238}
2239
2240static int qed_ll2_stop(struct qed_dev *cdev)
2241{
2242 struct qed_ptt *p_ptt;
2243 int rc;
2244
2245 if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
2246 return 0;
2247
2248 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2249 if (!p_ptt) {
2250 DP_INFO(cdev, "Failed to acquire PTT\n");
2251 goto fail;
2252 }
2253
2254 qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2255 cdev->ll2_mac_address);
2256 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2257 eth_zero_addr(cdev->ll2_mac_address);
2258
Yuval Mintz1d6cff42016-12-01 00:21:07 -08002259 if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
2260 cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
2261 qed_ll2_stop_ooo(cdev);
2262
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002263 rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
2264 cdev->ll2->handle);
2265 if (rc)
2266 DP_INFO(cdev, "Failed to terminate LL2 connection\n");
2267
2268 qed_ll2_kill_buffers(cdev);
2269
2270 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2271 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2272
2273 return rc;
2274fail:
2275 return -EINVAL;
2276}
2277
2278static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
2279{
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03002280 struct qed_ll2_tx_pkt_info pkt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002281 const skb_frag_t *frag;
2282 int rc = -EINVAL, i;
2283 dma_addr_t mapping;
2284 u16 vlan = 0;
2285 u8 flags = 0;
2286
2287 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
2288 DP_INFO(cdev, "Cannot transmit a checksumed packet\n");
2289 return -EINVAL;
2290 }
2291
2292 if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
2293 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
2294 1 + skb_shinfo(skb)->nr_frags);
2295 return -EINVAL;
2296 }
2297
2298 mapping = dma_map_single(&cdev->pdev->dev, skb->data,
2299 skb->len, DMA_TO_DEVICE);
2300 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2301 DP_NOTICE(cdev, "SKB mapping failed\n");
2302 return -EINVAL;
2303 }
2304
2305 /* Request HW to calculate IP csum */
2306 if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
2307 ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002308 flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002309
2310 if (skb_vlan_tag_present(skb)) {
2311 vlan = skb_vlan_tag_get(skb);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002312 flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002313 }
2314
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03002315 memset(&pkt, 0, sizeof(pkt));
2316 pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags;
2317 pkt.vlan = vlan;
2318 pkt.bd_flags = flags;
2319 pkt.tx_dest = QED_LL2_TX_DEST_NW;
2320 pkt.first_frag = mapping;
2321 pkt.first_frag_len = skb->len;
2322 pkt.cookie = skb;
2323
2324 rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
2325 &pkt, 1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002326 if (rc)
2327 goto err;
2328
2329 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2330 frag = &skb_shinfo(skb)->frags[i];
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002331
Mintz, Yuvald2201a22017-06-09 17:13:23 +03002332 mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
2333 skb_frag_size(frag), DMA_TO_DEVICE);
2334
2335 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2336 DP_NOTICE(cdev,
2337 "Unable to map frag - dropping packet\n");
2338 goto err;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002339 }
2340
2341 rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
2342 cdev->ll2->handle,
2343 mapping,
2344 skb_frag_size(frag));
2345
2346 /* if failed not much to do here, partial packet has been posted
2347 * we can't free memory, will need to wait for completion.
2348 */
2349 if (rc)
2350 goto err2;
2351 }
2352
2353 return 0;
2354
2355err:
2356 dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
2357
2358err2:
2359 return rc;
2360}
2361
2362static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
2363{
2364 if (!cdev->ll2)
2365 return -EINVAL;
2366
2367 return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
2368 cdev->ll2->handle, stats);
2369}
2370
2371const struct qed_ll2_ops qed_ll2_ops_pass = {
2372 .start = &qed_ll2_start,
2373 .stop = &qed_ll2_stop,
2374 .start_xmit = &qed_ll2_start_xmit,
2375 .register_cb_ops = &qed_ll2_register_cb_ops,
2376 .get_stats = &qed_ll2_stats,
2377};
2378
2379int qed_ll2_alloc_if(struct qed_dev *cdev)
2380{
2381 cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
2382 return cdev->ll2 ? 0 : -ENOMEM;
2383}
2384
2385void qed_ll2_dealloc_if(struct qed_dev *cdev)
2386{
2387 kfree(cdev->ll2);
2388 cdev->ll2 = NULL;
2389}