blob: b222b2b471e8b884d2d1e0565a05a1307b773dc8 [file] [log] [blame]
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001/* QLogic qed NIC Driver
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02002 * Copyright (c) 2015-2017 QLogic Corporation
Yuval Mintz0a7fb112016-10-01 21:59:55 +03003 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
Yuval Mintz0a7fb112016-10-01 21:59:55 +03009 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +020010 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Yuval Mintz0a7fb112016-10-01 21:59:55 +030031 */
32
33#include <linux/types.h>
34#include <asm/byteorder.h>
35#include <linux/dma-mapping.h>
36#include <linux/if_vlan.h>
37#include <linux/kernel.h>
38#include <linux/pci.h>
39#include <linux/slab.h>
40#include <linux/stddef.h>
Yuval Mintz0a7fb112016-10-01 21:59:55 +030041#include <linux/workqueue.h>
42#include <net/ipv6.h>
43#include <linux/bitops.h>
44#include <linux/delay.h>
45#include <linux/errno.h>
46#include <linux/etherdevice.h>
47#include <linux/io.h>
48#include <linux/list.h>
49#include <linux/mutex.h>
50#include <linux/spinlock.h>
51#include <linux/string.h>
52#include <linux/qed/qed_ll2_if.h>
53#include "qed.h"
54#include "qed_cxt.h"
55#include "qed_dev_api.h"
56#include "qed_hsi.h"
57#include "qed_hw.h"
58#include "qed_int.h"
59#include "qed_ll2.h"
60#include "qed_mcp.h"
Yuval Mintz1d6cff42016-12-01 00:21:07 -080061#include "qed_ooo.h"
Yuval Mintz0a7fb112016-10-01 21:59:55 +030062#include "qed_reg_addr.h"
63#include "qed_sp.h"
Yuval Mintz0189efb2016-10-13 22:57:02 +030064#include "qed_roce.h"
Yuval Mintz0a7fb112016-10-01 21:59:55 +030065
66#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
67#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
68
69#define QED_LL2_TX_SIZE (256)
70#define QED_LL2_RX_SIZE (4096)
71
72struct qed_cb_ll2_info {
73 int rx_cnt;
74 u32 rx_size;
75 u8 handle;
76 bool frags_mapped;
77
78 /* Lock protecting LL2 buffer lists in sleepless context */
79 spinlock_t lock;
80 struct list_head list;
81
82 const struct qed_ll2_cb_ops *cbs;
83 void *cb_cookie;
84};
85
86struct qed_ll2_buffer {
87 struct list_head list;
88 void *data;
89 dma_addr_t phys_addr;
90};
91
Michal Kalderon0518c122017-06-09 17:13:22 +030092static void qed_ll2b_complete_tx_packet(void *cxt,
Yuval Mintz0a7fb112016-10-01 21:59:55 +030093 u8 connection_handle,
94 void *cookie,
95 dma_addr_t first_frag_addr,
96 bool b_last_fragment,
97 bool b_last_packet)
98{
Michal Kalderon0518c122017-06-09 17:13:22 +030099 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300100 struct qed_dev *cdev = p_hwfn->cdev;
101 struct sk_buff *skb = cookie;
102
103 /* All we need to do is release the mapping */
104 dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
105 skb_headlen(skb), DMA_TO_DEVICE);
106
107 if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
108 cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
109 b_last_fragment);
110
111 if (cdev->ll2->frags_mapped)
112 /* Case where mapped frags were received, need to
113 * free skb with nr_frags marked as 0
114 */
115 skb_shinfo(skb)->nr_frags = 0;
116
117 dev_kfree_skb_any(skb);
118}
119
120static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
121 u8 **data, dma_addr_t *phys_addr)
122{
123 *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
124 if (!(*data)) {
125 DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
126 return -ENOMEM;
127 }
128
129 *phys_addr = dma_map_single(&cdev->pdev->dev,
130 ((*data) + NET_SKB_PAD),
131 cdev->ll2->rx_size, DMA_FROM_DEVICE);
132 if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
133 DP_INFO(cdev, "Failed to map LL2 buffer data\n");
134 kfree((*data));
135 return -ENOMEM;
136 }
137
138 return 0;
139}
140
141static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
142 struct qed_ll2_buffer *buffer)
143{
144 spin_lock_bh(&cdev->ll2->lock);
145
146 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
147 cdev->ll2->rx_size, DMA_FROM_DEVICE);
148 kfree(buffer->data);
149 list_del(&buffer->list);
150
151 cdev->ll2->rx_cnt--;
152 if (!cdev->ll2->rx_cnt)
153 DP_INFO(cdev, "All LL2 entries were removed\n");
154
155 spin_unlock_bh(&cdev->ll2->lock);
156
157 return 0;
158}
159
160static void qed_ll2_kill_buffers(struct qed_dev *cdev)
161{
162 struct qed_ll2_buffer *buffer, *tmp_buffer;
163
164 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
165 qed_ll2_dealloc_buffer(cdev, buffer);
166}
167
Michal Kalderon0518c122017-06-09 17:13:22 +0300168void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300169{
Michal Kalderon0518c122017-06-09 17:13:22 +0300170 struct qed_hwfn *p_hwfn = cxt;
Mintz, Yuval68be9102017-06-09 17:13:19 +0300171 struct qed_ll2_buffer *buffer = data->cookie;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300172 struct qed_dev *cdev = p_hwfn->cdev;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300173 dma_addr_t new_phys_addr;
174 struct sk_buff *skb;
175 bool reuse = false;
176 int rc = -EINVAL;
177 u8 *new_data;
178
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300179 DP_VERBOSE(p_hwfn,
180 (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
181 "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
Mintz, Yuval68be9102017-06-09 17:13:19 +0300182 (u64)data->rx_buf_addr,
183 data->u.placement_offset,
184 data->length.packet_length,
185 data->parse_flags,
186 data->vlan, data->opaque_data_0, data->opaque_data_1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300187
188 if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
189 print_hex_dump(KERN_INFO, "",
190 DUMP_PREFIX_OFFSET, 16, 1,
Mintz, Yuval68be9102017-06-09 17:13:19 +0300191 buffer->data, data->length.packet_length, false);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300192 }
193
194 /* Determine if data is valid */
Mintz, Yuval68be9102017-06-09 17:13:19 +0300195 if (data->length.packet_length < ETH_HLEN)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300196 reuse = true;
197
198 /* Allocate a replacement for buffer; Reuse upon failure */
199 if (!reuse)
200 rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
201 &new_phys_addr);
202
203 /* If need to reuse or there's no replacement buffer, repost this */
204 if (rc)
205 goto out_post;
Mintz, Yuval752ecb22017-03-14 15:26:00 +0200206 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
207 cdev->ll2->rx_size, DMA_FROM_DEVICE);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300208
209 skb = build_skb(buffer->data, 0);
210 if (!skb) {
211 rc = -ENOMEM;
212 goto out_post;
213 }
214
Mintz, Yuval68be9102017-06-09 17:13:19 +0300215 data->u.placement_offset += NET_SKB_PAD;
216 skb_reserve(skb, data->u.placement_offset);
217 skb_put(skb, data->length.packet_length);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300218 skb_checksum_none_assert(skb);
219
220 /* Get parital ethernet information instead of eth_type_trans(),
221 * Since we don't have an associated net_device.
222 */
223 skb_reset_mac_header(skb);
224 skb->protocol = eth_hdr(skb)->h_proto;
225
226 /* Pass SKB onward */
227 if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
Mintz, Yuval68be9102017-06-09 17:13:19 +0300228 if (data->vlan)
229 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
230 data->vlan);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300231 cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
Mintz, Yuval68be9102017-06-09 17:13:19 +0300232 data->opaque_data_0,
233 data->opaque_data_1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300234 }
235
236 /* Update Buffer information and update FW producer */
237 buffer->data = new_data;
238 buffer->phys_addr = new_phys_addr;
239
240out_post:
241 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
242 buffer->phys_addr, 0, buffer, 1);
243
244 if (rc)
245 qed_ll2_dealloc_buffer(cdev, buffer);
246}
247
248static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
249 u8 connection_handle,
250 bool b_lock,
251 bool b_only_active)
252{
253 struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
254
255 if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
256 return NULL;
257
258 if (!p_hwfn->p_ll2_info)
259 return NULL;
260
261 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
262
263 if (b_only_active) {
264 if (b_lock)
265 mutex_lock(&p_ll2_conn->mutex);
266 if (p_ll2_conn->b_active)
267 p_ret = p_ll2_conn;
268 if (b_lock)
269 mutex_unlock(&p_ll2_conn->mutex);
270 } else {
271 p_ret = p_ll2_conn;
272 }
273
274 return p_ret;
275}
276
277static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
278 u8 connection_handle)
279{
280 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
281}
282
283static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
284 u8 connection_handle)
285{
286 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
287}
288
289static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
290 *p_hwfn,
291 u8 connection_handle)
292{
293 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
294}
295
296static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
297{
298 bool b_last_packet = false, b_last_frag = false;
299 struct qed_ll2_tx_packet *p_pkt = NULL;
300 struct qed_ll2_info *p_ll2_conn;
301 struct qed_ll2_tx_queue *p_tx;
Ram Amraniabd49672016-10-01 22:00:01 +0300302 dma_addr_t tx_frag;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300303
304 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
305 if (!p_ll2_conn)
306 return;
307
308 p_tx = &p_ll2_conn->tx_queue;
309
310 while (!list_empty(&p_tx->active_descq)) {
311 p_pkt = list_first_entry(&p_tx->active_descq,
312 struct qed_ll2_tx_packet, list_entry);
313 if (!p_pkt)
314 break;
315
316 list_del(&p_pkt->list_entry);
317 b_last_packet = list_empty(&p_tx->active_descq);
318 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
Mintz, Yuval13c54772017-06-09 17:13:20 +0300319 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800320 struct qed_ooo_buffer *p_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300321
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800322 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
323 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
324 p_buffer);
325 } else {
326 p_tx->cur_completing_packet = *p_pkt;
327 p_tx->cur_completing_bd_idx = 1;
328 b_last_frag =
329 p_tx->cur_completing_bd_idx == p_pkt->bd_used;
330 tx_frag = p_pkt->bds_set[0].tx_frag;
Michal Kalderon0518c122017-06-09 17:13:22 +0300331 p_ll2_conn->cbs.tx_release_cb(p_ll2_conn->cbs.cookie,
332 p_ll2_conn->my_id,
333 p_pkt->cookie,
334 tx_frag,
335 b_last_frag,
336 b_last_packet);
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800337 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300338 }
339}
340
341static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
342{
343 struct qed_ll2_info *p_ll2_conn = p_cookie;
344 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
345 u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
346 struct qed_ll2_tx_packet *p_pkt;
347 bool b_last_frag = false;
348 unsigned long flags;
349 int rc = -EINVAL;
350
351 spin_lock_irqsave(&p_tx->lock, flags);
352 if (p_tx->b_completing_packet) {
353 rc = -EBUSY;
354 goto out;
355 }
356
357 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
358 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
359 while (num_bds) {
360 if (list_empty(&p_tx->active_descq))
361 goto out;
362
363 p_pkt = list_first_entry(&p_tx->active_descq,
364 struct qed_ll2_tx_packet, list_entry);
365 if (!p_pkt)
366 goto out;
367
368 p_tx->b_completing_packet = true;
369 p_tx->cur_completing_packet = *p_pkt;
370 num_bds_in_packet = p_pkt->bd_used;
371 list_del(&p_pkt->list_entry);
372
373 if (num_bds < num_bds_in_packet) {
374 DP_NOTICE(p_hwfn,
375 "Rest of BDs does not cover whole packet\n");
376 goto out;
377 }
378
379 num_bds -= num_bds_in_packet;
380 p_tx->bds_idx += num_bds_in_packet;
381 while (num_bds_in_packet--)
382 qed_chain_consume(&p_tx->txq_chain);
383
384 p_tx->cur_completing_bd_idx = 1;
385 b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
386 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
387
388 spin_unlock_irqrestore(&p_tx->lock, flags);
Michal Kalderon0518c122017-06-09 17:13:22 +0300389
390 p_ll2_conn->cbs.tx_comp_cb(p_ll2_conn->cbs.cookie,
391 p_ll2_conn->my_id,
392 p_pkt->cookie,
393 p_pkt->bds_set[0].tx_frag,
394 b_last_frag, !num_bds);
395
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300396 spin_lock_irqsave(&p_tx->lock, flags);
397 }
398
399 p_tx->b_completing_packet = false;
400 rc = 0;
401out:
402 spin_unlock_irqrestore(&p_tx->lock, flags);
403 return rc;
404}
405
Michal Kalderon0518c122017-06-09 17:13:22 +0300406static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn,
407 union core_rx_cqe_union *p_cqe,
408 struct qed_ll2_comp_rx_data *data)
Ram Amraniabd49672016-10-01 22:00:01 +0300409{
Michal Kalderon0518c122017-06-09 17:13:22 +0300410 data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
411 data->length.data_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
412 data->vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
413 data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
414 data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
415 data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error;
Ram Amraniabd49672016-10-01 22:00:01 +0300416}
417
Mintz, Yuval68be9102017-06-09 17:13:19 +0300418static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
419 union core_rx_cqe_union *p_cqe,
420 struct qed_ll2_comp_rx_data *data)
421{
422 data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_fp.parse_flags.flags);
423 data->length.packet_length =
424 le16_to_cpu(p_cqe->rx_cqe_fp.packet_length);
425 data->vlan = le16_to_cpu(p_cqe->rx_cqe_fp.vlan);
426 data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[0]);
427 data->opaque_data_1 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[1]);
428 data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset;
429}
430
431static int
432qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
433 struct qed_ll2_info *p_ll2_conn,
434 union core_rx_cqe_union *p_cqe,
435 unsigned long *p_lock_flags, bool b_last_cqe)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300436{
437 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
438 struct qed_ll2_rx_packet *p_pkt = NULL;
Mintz, Yuval68be9102017-06-09 17:13:19 +0300439 struct qed_ll2_comp_rx_data data;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300440
441 if (!list_empty(&p_rx->active_descq))
442 p_pkt = list_first_entry(&p_rx->active_descq,
443 struct qed_ll2_rx_packet, list_entry);
444 if (!p_pkt) {
445 DP_NOTICE(p_hwfn,
Mintz, Yuval68be9102017-06-09 17:13:19 +0300446 "[%d] LL2 Rx completion but active_descq is empty\n",
Mintz, Yuval13c54772017-06-09 17:13:20 +0300447 p_ll2_conn->input.conn_type);
Mintz, Yuval68be9102017-06-09 17:13:19 +0300448
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300449 return -EIO;
450 }
451 list_del(&p_pkt->list_entry);
452
Michal Kalderon0518c122017-06-09 17:13:22 +0300453 if (p_cqe->rx_cqe_sp.type == CORE_RX_CQE_TYPE_REGULAR)
454 qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data);
455 else
456 qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300457 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
458 DP_NOTICE(p_hwfn,
459 "Mismatch between active_descq and the LL2 Rx chain\n");
Mintz, Yuval68be9102017-06-09 17:13:19 +0300460
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300461 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
462
Mintz, Yuval68be9102017-06-09 17:13:19 +0300463 data.connection_handle = p_ll2_conn->my_id;
464 data.cookie = p_pkt->cookie;
465 data.rx_buf_addr = p_pkt->rx_buf_addr;
466 data.b_last_packet = b_last_cqe;
467
Ram Amrani1df2ade2017-03-14 15:26:02 +0200468 spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
Michal Kalderon0518c122017-06-09 17:13:22 +0300469 p_ll2_conn->cbs.rx_comp_cb(p_ll2_conn->cbs.cookie, &data);
470
Ram Amrani1df2ade2017-03-14 15:26:02 +0200471 spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300472
473 return 0;
474}
475
476static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
477{
Mintz, Yuval13c54772017-06-09 17:13:20 +0300478 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)cookie;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300479 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
480 union core_rx_cqe_union *cqe = NULL;
481 u16 cq_new_idx = 0, cq_old_idx = 0;
482 unsigned long flags = 0;
483 int rc = 0;
484
485 spin_lock_irqsave(&p_rx->lock, flags);
486 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
487 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
488
489 while (cq_new_idx != cq_old_idx) {
490 bool b_last_cqe = (cq_new_idx == cq_old_idx);
491
Mintz, Yuval13c54772017-06-09 17:13:20 +0300492 cqe =
493 (union core_rx_cqe_union *)
494 qed_chain_consume(&p_rx->rcq_chain);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300495 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
496
497 DP_VERBOSE(p_hwfn,
498 QED_MSG_LL2,
499 "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
500 cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
501
502 switch (cqe->rx_cqe_sp.type) {
503 case CORE_RX_CQE_TYPE_SLOW_PATH:
504 DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
505 rc = -EINVAL;
506 break;
Ram Amraniabd49672016-10-01 22:00:01 +0300507 case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300508 case CORE_RX_CQE_TYPE_REGULAR:
Mintz, Yuval68be9102017-06-09 17:13:19 +0300509 rc = qed_ll2_rxq_handle_completion(p_hwfn, p_ll2_conn,
510 cqe, &flags,
511 b_last_cqe);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300512 break;
513 default:
514 rc = -EIO;
515 }
516 }
517
518 spin_unlock_irqrestore(&p_rx->lock, flags);
519 return rc;
520}
521
Yuval Mintz8c93bea2016-10-13 22:57:03 +0300522static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300523{
524 struct qed_ll2_info *p_ll2_conn = NULL;
525 struct qed_ll2_rx_packet *p_pkt = NULL;
526 struct qed_ll2_rx_queue *p_rx;
527
528 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
529 if (!p_ll2_conn)
530 return;
531
532 p_rx = &p_ll2_conn->rx_queue;
533
534 while (!list_empty(&p_rx->active_descq)) {
535 dma_addr_t rx_buf_addr;
536 void *cookie;
537 bool b_last;
538
539 p_pkt = list_first_entry(&p_rx->active_descq,
540 struct qed_ll2_rx_packet, list_entry);
541 if (!p_pkt)
542 break;
543
Wei Yongjunb4f0fd42016-10-17 15:17:51 +0000544 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300545
Mintz, Yuval13c54772017-06-09 17:13:20 +0300546 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800547 struct qed_ooo_buffer *p_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300548
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800549 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
550 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
551 p_buffer);
552 } else {
553 rx_buf_addr = p_pkt->rx_buf_addr;
554 cookie = p_pkt->cookie;
555
556 b_last = list_empty(&p_rx->active_descq);
557 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300558 }
559}
560
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800561static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
562{
563 u8 bd_flags = 0;
564
565 if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200566 SET_FIELD(bd_flags, CORE_TX_BD_DATA_VLAN_INSERTION, 1);
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800567
568 return bd_flags;
569}
570
571static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
572 struct qed_ll2_info *p_ll2_conn)
573{
574 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
575 u16 packet_length = 0, parse_flags = 0, vlan = 0;
576 struct qed_ll2_rx_packet *p_pkt = NULL;
577 u32 num_ooo_add_to_peninsula = 0, cid;
578 union core_rx_cqe_union *cqe = NULL;
579 u16 cq_new_idx = 0, cq_old_idx = 0;
580 struct qed_ooo_buffer *p_buffer;
581 struct ooo_opaque *iscsi_ooo;
582 u8 placement_offset = 0;
583 u8 cqe_type;
584
585 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
586 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
587 if (cq_new_idx == cq_old_idx)
588 return 0;
589
590 while (cq_new_idx != cq_old_idx) {
591 struct core_rx_fast_path_cqe *p_cqe_fp;
592
593 cqe = qed_chain_consume(&p_rx->rcq_chain);
594 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
595 cqe_type = cqe->rx_cqe_sp.type;
596
597 if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
598 DP_NOTICE(p_hwfn,
599 "Got a non-regular LB LL2 completion [type 0x%02x]\n",
600 cqe_type);
601 return -EINVAL;
602 }
603 p_cqe_fp = &cqe->rx_cqe_fp;
604
605 placement_offset = p_cqe_fp->placement_offset;
606 parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
607 packet_length = le16_to_cpu(p_cqe_fp->packet_length);
608 vlan = le16_to_cpu(p_cqe_fp->vlan);
609 iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
610 qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
611 iscsi_ooo);
612 cid = le32_to_cpu(iscsi_ooo->cid);
613
614 /* Process delete isle first */
615 if (iscsi_ooo->drop_size)
616 qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
617 iscsi_ooo->drop_isle,
618 iscsi_ooo->drop_size);
619
620 if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
621 continue;
622
623 /* Now process create/add/join isles */
624 if (list_empty(&p_rx->active_descq)) {
625 DP_NOTICE(p_hwfn,
626 "LL2 OOO RX chain has no submitted buffers\n"
627 );
628 return -EIO;
629 }
630
631 p_pkt = list_first_entry(&p_rx->active_descq,
632 struct qed_ll2_rx_packet, list_entry);
633
634 if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
635 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
636 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
637 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
638 (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
639 if (!p_pkt) {
640 DP_NOTICE(p_hwfn,
641 "LL2 OOO RX packet is not valid\n");
642 return -EIO;
643 }
644 list_del(&p_pkt->list_entry);
645 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
646 p_buffer->packet_length = packet_length;
647 p_buffer->parse_flags = parse_flags;
648 p_buffer->vlan = vlan;
649 p_buffer->placement_offset = placement_offset;
650 qed_chain_consume(&p_rx->rxq_chain);
651 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
652
653 switch (iscsi_ooo->ooo_opcode) {
654 case TCP_EVENT_ADD_NEW_ISLE:
655 qed_ooo_add_new_isle(p_hwfn,
656 p_hwfn->p_ooo_info,
657 cid,
658 iscsi_ooo->ooo_isle,
659 p_buffer);
660 break;
661 case TCP_EVENT_ADD_ISLE_RIGHT:
662 qed_ooo_add_new_buffer(p_hwfn,
663 p_hwfn->p_ooo_info,
664 cid,
665 iscsi_ooo->ooo_isle,
666 p_buffer,
667 QED_OOO_RIGHT_BUF);
668 break;
669 case TCP_EVENT_ADD_ISLE_LEFT:
670 qed_ooo_add_new_buffer(p_hwfn,
671 p_hwfn->p_ooo_info,
672 cid,
673 iscsi_ooo->ooo_isle,
674 p_buffer,
675 QED_OOO_LEFT_BUF);
676 break;
677 case TCP_EVENT_JOIN:
678 qed_ooo_add_new_buffer(p_hwfn,
679 p_hwfn->p_ooo_info,
680 cid,
681 iscsi_ooo->ooo_isle +
682 1,
683 p_buffer,
684 QED_OOO_LEFT_BUF);
685 qed_ooo_join_isles(p_hwfn,
686 p_hwfn->p_ooo_info,
687 cid, iscsi_ooo->ooo_isle);
688 break;
689 case TCP_EVENT_ADD_PEN:
690 num_ooo_add_to_peninsula++;
691 qed_ooo_put_ready_buffer(p_hwfn,
692 p_hwfn->p_ooo_info,
693 p_buffer, true);
694 break;
695 }
696 } else {
697 DP_NOTICE(p_hwfn,
698 "Unexpected event (%d) TX OOO completion\n",
699 iscsi_ooo->ooo_opcode);
700 }
701 }
702
703 return 0;
704}
705
706static void
707qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
708 struct qed_ll2_info *p_ll2_conn)
709{
Mintz, Yuval7c7973b2017-06-09 17:13:18 +0300710 struct qed_ll2_tx_pkt_info tx_pkt;
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800711 struct qed_ooo_buffer *p_buffer;
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800712 u16 l4_hdr_offset_w;
713 dma_addr_t first_frag;
714 u16 parse_flags;
715 u8 bd_flags;
Mintz, Yuval7c7973b2017-06-09 17:13:18 +0300716 int rc;
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800717
718 /* Submit Tx buffers here */
719 while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
720 p_hwfn->p_ooo_info))) {
721 l4_hdr_offset_w = 0;
722 bd_flags = 0;
723
724 first_frag = p_buffer->rx_buffer_phys_addr +
725 p_buffer->placement_offset;
726 parse_flags = p_buffer->parse_flags;
727 bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200728 SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
729 SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800730
Mintz, Yuval7c7973b2017-06-09 17:13:18 +0300731 memset(&tx_pkt, 0, sizeof(tx_pkt));
732 tx_pkt.num_of_bds = 1;
733 tx_pkt.vlan = p_buffer->vlan;
734 tx_pkt.bd_flags = bd_flags;
735 tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w;
Mintz, Yuval13c54772017-06-09 17:13:20 +0300736 tx_pkt.tx_dest = p_ll2_conn->tx_dest;
Mintz, Yuval7c7973b2017-06-09 17:13:18 +0300737 tx_pkt.first_frag = first_frag;
738 tx_pkt.first_frag_len = p_buffer->packet_length;
739 tx_pkt.cookie = p_buffer;
740
741 rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id,
742 &tx_pkt, true);
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800743 if (rc) {
744 qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
745 p_buffer, false);
746 break;
747 }
748 }
749}
750
751static void
752qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn,
753 struct qed_ll2_info *p_ll2_conn)
754{
755 struct qed_ooo_buffer *p_buffer;
756 int rc;
757
758 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
759 p_hwfn->p_ooo_info))) {
760 rc = qed_ll2_post_rx_buffer(p_hwfn,
761 p_ll2_conn->my_id,
762 p_buffer->rx_buffer_phys_addr,
763 0, p_buffer, true);
764 if (rc) {
765 qed_ooo_put_free_buffer(p_hwfn,
766 p_hwfn->p_ooo_info, p_buffer);
767 break;
768 }
769 }
770}
771
772static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
773{
774 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
775 int rc;
776
777 rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
778 if (rc)
779 return rc;
780
781 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
782 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
783
784 return 0;
785}
786
787static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
788{
789 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
790 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
791 struct qed_ll2_tx_packet *p_pkt = NULL;
792 struct qed_ooo_buffer *p_buffer;
793 bool b_dont_submit_rx = false;
794 u16 new_idx = 0, num_bds = 0;
795 int rc;
796
797 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
798 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
799
800 if (!num_bds)
801 return 0;
802
803 while (num_bds) {
804 if (list_empty(&p_tx->active_descq))
805 return -EINVAL;
806
807 p_pkt = list_first_entry(&p_tx->active_descq,
808 struct qed_ll2_tx_packet, list_entry);
809 if (!p_pkt)
810 return -EINVAL;
811
812 if (p_pkt->bd_used != 1) {
813 DP_NOTICE(p_hwfn,
814 "Unexpectedly many BDs(%d) in TX OOO completion\n",
815 p_pkt->bd_used);
816 return -EINVAL;
817 }
818
819 list_del(&p_pkt->list_entry);
820
821 num_bds--;
822 p_tx->bds_idx++;
823 qed_chain_consume(&p_tx->txq_chain);
824
825 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
826 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
827
828 if (b_dont_submit_rx) {
829 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
830 p_buffer);
831 continue;
832 }
833
834 rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
835 p_buffer->rx_buffer_phys_addr, 0,
836 p_buffer, true);
837 if (rc != 0) {
838 qed_ooo_put_free_buffer(p_hwfn,
839 p_hwfn->p_ooo_info, p_buffer);
840 b_dont_submit_rx = true;
841 }
842 }
843
844 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
845
846 return 0;
847}
848
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800849static void qed_ll2_stop_ooo(struct qed_dev *cdev)
850{
851 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
852 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
853
854 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n",
855 *handle);
856
857 qed_ll2_terminate_connection(hwfn, *handle);
858 qed_ll2_release_connection(hwfn, *handle);
859 *handle = QED_LL2_UNUSED_HANDLE;
860}
861
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300862static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
863 struct qed_ll2_info *p_ll2_conn,
864 u8 action_on_error)
865{
Mintz, Yuval13c54772017-06-09 17:13:20 +0300866 enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300867 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
868 struct core_rx_start_ramrod_data *p_ramrod = NULL;
869 struct qed_spq_entry *p_ent = NULL;
870 struct qed_sp_init_data init_data;
871 u16 cqe_pbl_size;
872 int rc = 0;
873
874 /* Get SPQ entry */
875 memset(&init_data, 0, sizeof(init_data));
876 init_data.cid = p_ll2_conn->cid;
877 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
878 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
879
880 rc = qed_sp_init_request(p_hwfn, &p_ent,
881 CORE_RAMROD_RX_QUEUE_START,
882 PROTOCOLID_CORE, &init_data);
883 if (rc)
884 return rc;
885
886 p_ramrod = &p_ent->ramrod.core_rx_queue_start;
887
888 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
889 p_ramrod->sb_index = p_rx->rx_sb_index;
890 p_ramrod->complete_event_flg = 1;
891
Mintz, Yuval13c54772017-06-09 17:13:20 +0300892 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
893 DMA_REGPAIR_LE(p_ramrod->bd_base, p_rx->rxq_chain.p_phys_addr);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300894 cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
895 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
896 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
897 qed_chain_get_pbl_phys(&p_rx->rcq_chain));
898
Mintz, Yuval13c54772017-06-09 17:13:20 +0300899 p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
900 p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300901 p_ramrod->queue_id = p_ll2_conn->queue_id;
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800902 p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
903 : 1;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300904
905 if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
906 p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
907 p_ramrod->mf_si_bcast_accept_all = 1;
908 p_ramrod->mf_si_mcast_accept_all = 1;
909 } else {
910 p_ramrod->mf_si_bcast_accept_all = 0;
911 p_ramrod->mf_si_mcast_accept_all = 0;
912 }
913
914 p_ramrod->action_on_error.error_type = action_on_error;
Mintz, Yuval13c54772017-06-09 17:13:20 +0300915 p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300916 return qed_spq_post(p_hwfn, p_ent, NULL);
917}
918
919static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
920 struct qed_ll2_info *p_ll2_conn)
921{
Mintz, Yuval13c54772017-06-09 17:13:20 +0300922 enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300923 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
924 struct core_tx_start_ramrod_data *p_ramrod = NULL;
925 struct qed_spq_entry *p_ent = NULL;
926 struct qed_sp_init_data init_data;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300927 u16 pq_id = 0, pbl_size;
928 int rc = -EINVAL;
929
930 if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
931 return 0;
932
Mintz, Yuval13c54772017-06-09 17:13:20 +0300933 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800934 p_ll2_conn->tx_stats_en = 0;
935 else
936 p_ll2_conn->tx_stats_en = 1;
937
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300938 /* Get SPQ entry */
939 memset(&init_data, 0, sizeof(init_data));
940 init_data.cid = p_ll2_conn->cid;
941 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
942 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
943
944 rc = qed_sp_init_request(p_hwfn, &p_ent,
945 CORE_RAMROD_TX_QUEUE_START,
946 PROTOCOLID_CORE, &init_data);
947 if (rc)
948 return rc;
949
950 p_ramrod = &p_ent->ramrod.core_tx_queue_start;
951
952 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
953 p_ramrod->sb_index = p_tx->tx_sb_index;
Mintz, Yuval13c54772017-06-09 17:13:20 +0300954 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300955 p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
956 p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
957
958 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
959 qed_chain_get_pbl_phys(&p_tx->txq_chain));
960 pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
961 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
962
Mintz, Yuval13c54772017-06-09 17:13:20 +0300963 switch (p_ll2_conn->input.tx_tc) {
Ariel Eliorb5a9ee72017-04-03 12:21:09 +0300964 case LB_TC:
965 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
966 break;
967 case OOO_LB_TC:
968 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO);
Colin Ian King827d2402017-04-05 13:35:44 +0100969 break;
Ariel Eliorb5a9ee72017-04-03 12:21:09 +0300970 default:
971 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
972 break;
973 }
974
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300975 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
976
977 switch (conn_type) {
Arun Easi1e128c82017-02-15 06:28:22 -0800978 case QED_LL2_TYPE_FCOE:
979 p_ramrod->conn_type = PROTOCOLID_FCOE;
980 break;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300981 case QED_LL2_TYPE_ISCSI:
982 case QED_LL2_TYPE_ISCSI_OOO:
983 p_ramrod->conn_type = PROTOCOLID_ISCSI;
984 break;
985 case QED_LL2_TYPE_ROCE:
986 p_ramrod->conn_type = PROTOCOLID_ROCE;
987 break;
988 default:
989 p_ramrod->conn_type = PROTOCOLID_ETH;
990 DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
991 }
992
Mintz, Yuval13c54772017-06-09 17:13:20 +0300993 p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
994
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300995 return qed_spq_post(p_hwfn, p_ent, NULL);
996}
997
998static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
999 struct qed_ll2_info *p_ll2_conn)
1000{
1001 struct core_rx_stop_ramrod_data *p_ramrod = NULL;
1002 struct qed_spq_entry *p_ent = NULL;
1003 struct qed_sp_init_data init_data;
1004 int rc = -EINVAL;
1005
1006 /* Get SPQ entry */
1007 memset(&init_data, 0, sizeof(init_data));
1008 init_data.cid = p_ll2_conn->cid;
1009 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1010 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1011
1012 rc = qed_sp_init_request(p_hwfn, &p_ent,
1013 CORE_RAMROD_RX_QUEUE_STOP,
1014 PROTOCOLID_CORE, &init_data);
1015 if (rc)
1016 return rc;
1017
1018 p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
1019
1020 p_ramrod->complete_event_flg = 1;
1021 p_ramrod->queue_id = p_ll2_conn->queue_id;
1022
1023 return qed_spq_post(p_hwfn, p_ent, NULL);
1024}
1025
1026static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
1027 struct qed_ll2_info *p_ll2_conn)
1028{
1029 struct qed_spq_entry *p_ent = NULL;
1030 struct qed_sp_init_data init_data;
1031 int rc = -EINVAL;
1032
1033 /* Get SPQ entry */
1034 memset(&init_data, 0, sizeof(init_data));
1035 init_data.cid = p_ll2_conn->cid;
1036 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1037 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1038
1039 rc = qed_sp_init_request(p_hwfn, &p_ent,
1040 CORE_RAMROD_TX_QUEUE_STOP,
1041 PROTOCOLID_CORE, &init_data);
1042 if (rc)
1043 return rc;
1044
1045 return qed_spq_post(p_hwfn, p_ent, NULL);
1046}
1047
1048static int
1049qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001050 struct qed_ll2_info *p_ll2_info)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001051{
1052 struct qed_ll2_rx_packet *p_descq;
1053 u32 capacity;
1054 int rc = 0;
1055
Mintz, Yuval13c54772017-06-09 17:13:20 +03001056 if (!p_ll2_info->input.rx_num_desc)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001057 goto out;
1058
1059 rc = qed_chain_alloc(p_hwfn->cdev,
1060 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1061 QED_CHAIN_MODE_NEXT_PTR,
1062 QED_CHAIN_CNT_TYPE_U16,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001063 p_ll2_info->input.rx_num_desc,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001064 sizeof(struct core_rx_bd),
1065 &p_ll2_info->rx_queue.rxq_chain);
1066 if (rc) {
1067 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
1068 goto out;
1069 }
1070
1071 capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
1072 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
1073 GFP_KERNEL);
1074 if (!p_descq) {
1075 rc = -ENOMEM;
1076 DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
1077 goto out;
1078 }
1079 p_ll2_info->rx_queue.descq_array = p_descq;
1080
1081 rc = qed_chain_alloc(p_hwfn->cdev,
1082 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1083 QED_CHAIN_MODE_PBL,
1084 QED_CHAIN_CNT_TYPE_U16,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001085 p_ll2_info->input.rx_num_desc,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001086 sizeof(struct core_rx_fast_path_cqe),
1087 &p_ll2_info->rx_queue.rcq_chain);
1088 if (rc) {
1089 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
1090 goto out;
1091 }
1092
1093 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1094 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
Mintz, Yuval13c54772017-06-09 17:13:20 +03001095 p_ll2_info->input.conn_type, p_ll2_info->input.rx_num_desc);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001096
1097out:
1098 return rc;
1099}
1100
1101static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001102 struct qed_ll2_info *p_ll2_info)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001103{
1104 struct qed_ll2_tx_packet *p_descq;
1105 u32 capacity;
1106 int rc = 0;
1107
Mintz, Yuval13c54772017-06-09 17:13:20 +03001108 if (!p_ll2_info->input.tx_num_desc)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001109 goto out;
1110
1111 rc = qed_chain_alloc(p_hwfn->cdev,
1112 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1113 QED_CHAIN_MODE_PBL,
1114 QED_CHAIN_CNT_TYPE_U16,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001115 p_ll2_info->input.tx_num_desc,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001116 sizeof(struct core_tx_bd),
1117 &p_ll2_info->tx_queue.txq_chain);
1118 if (rc)
1119 goto out;
1120
1121 capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
1122 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet),
1123 GFP_KERNEL);
1124 if (!p_descq) {
1125 rc = -ENOMEM;
1126 goto out;
1127 }
1128 p_ll2_info->tx_queue.descq_array = p_descq;
1129
1130 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1131 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
Mintz, Yuval13c54772017-06-09 17:13:20 +03001132 p_ll2_info->input.conn_type, p_ll2_info->input.tx_num_desc);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001133
1134out:
1135 if (rc)
1136 DP_NOTICE(p_hwfn,
1137 "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
Mintz, Yuval13c54772017-06-09 17:13:20 +03001138 p_ll2_info->input.tx_num_desc);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001139 return rc;
1140}
1141
Mintz, Yuval13c54772017-06-09 17:13:20 +03001142static int
1143qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
1144 struct qed_ll2_info *p_ll2_info, u16 mtu)
1145{
1146 struct qed_ooo_buffer *p_buf = NULL;
1147 void *p_virt;
1148 u16 buf_idx;
1149 int rc = 0;
1150
1151 if (p_ll2_info->input.conn_type != QED_LL2_TYPE_ISCSI_OOO)
1152 return rc;
1153
1154 /* Correct number of requested OOO buffers if needed */
1155 if (!p_ll2_info->input.rx_num_ooo_buffers) {
1156 u16 num_desc = p_ll2_info->input.rx_num_desc;
1157
1158 if (!num_desc)
1159 return -EINVAL;
1160 p_ll2_info->input.rx_num_ooo_buffers = num_desc * 2;
1161 }
1162
1163 for (buf_idx = 0; buf_idx < p_ll2_info->input.rx_num_ooo_buffers;
1164 buf_idx++) {
1165 p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
1166 if (!p_buf) {
1167 rc = -ENOMEM;
1168 goto out;
1169 }
1170
1171 p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
1172 p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
1173 ETH_CACHE_LINE_SIZE - 1) &
1174 ~(ETH_CACHE_LINE_SIZE - 1);
1175 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1176 p_buf->rx_buffer_size,
1177 &p_buf->rx_buffer_phys_addr,
1178 GFP_KERNEL);
1179 if (!p_virt) {
1180 kfree(p_buf);
1181 rc = -ENOMEM;
1182 goto out;
1183 }
1184
1185 p_buf->rx_buffer_virt_addr = p_virt;
1186 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
1187 }
1188
1189 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1190 "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
1191 p_ll2_info->input.rx_num_ooo_buffers, p_buf->rx_buffer_size);
1192
1193out:
1194 return rc;
1195}
1196
Michal Kalderon0518c122017-06-09 17:13:22 +03001197static int
1198qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs)
1199{
1200 if (!cbs || (!cbs->rx_comp_cb ||
1201 !cbs->rx_release_cb ||
1202 !cbs->tx_comp_cb || !cbs->tx_release_cb || !cbs->cookie))
1203 return -EINVAL;
1204
1205 p_ll2_info->cbs.rx_comp_cb = cbs->rx_comp_cb;
1206 p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb;
1207 p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb;
1208 p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb;
1209 p_ll2_info->cbs.cookie = cbs->cookie;
1210
1211 return 0;
1212}
1213
Mintz, Yuval13c54772017-06-09 17:13:20 +03001214static enum core_error_handle
1215qed_ll2_get_error_choice(enum qed_ll2_error_handle err)
1216{
1217 switch (err) {
1218 case QED_LL2_DROP_PACKET:
1219 return LL2_DROP_PACKET;
1220 case QED_LL2_DO_NOTHING:
1221 return LL2_DO_NOTHING;
1222 case QED_LL2_ASSERT:
1223 return LL2_ASSERT;
1224 default:
1225 return LL2_DO_NOTHING;
1226 }
1227}
1228
Michal Kalderon0518c122017-06-09 17:13:22 +03001229int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001230{
Michal Kalderon0518c122017-06-09 17:13:22 +03001231 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001232 qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
1233 struct qed_ll2_info *p_ll2_info = NULL;
Mintz, Yuval13c54772017-06-09 17:13:20 +03001234 u8 i, *p_tx_max;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001235 int rc;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001236
Mintz, Yuval13c54772017-06-09 17:13:20 +03001237 if (!data->p_connection_handle || !p_hwfn->p_ll2_info)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001238 return -EINVAL;
1239
1240 /* Find a free connection to be used */
1241 for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
1242 mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
1243 if (p_hwfn->p_ll2_info[i].b_active) {
1244 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1245 continue;
1246 }
1247
1248 p_hwfn->p_ll2_info[i].b_active = true;
1249 p_ll2_info = &p_hwfn->p_ll2_info[i];
1250 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1251 break;
1252 }
1253 if (!p_ll2_info)
1254 return -EBUSY;
1255
Mintz, Yuval13c54772017-06-09 17:13:20 +03001256 memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001257
Mintz, Yuval13c54772017-06-09 17:13:20 +03001258 p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ?
1259 CORE_TX_DEST_NW : CORE_TX_DEST_LB;
1260
1261 /* Correct maximum number of Tx BDs */
1262 p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet;
1263 if (*p_tx_max == 0)
1264 *p_tx_max = CORE_LL2_TX_MAX_BDS_PER_PACKET;
1265 else
1266 *p_tx_max = min_t(u8, *p_tx_max,
1267 CORE_LL2_TX_MAX_BDS_PER_PACKET);
Michal Kalderon0518c122017-06-09 17:13:22 +03001268
1269 rc = qed_ll2_set_cbs(p_ll2_info, data->cbs);
1270 if (rc) {
1271 DP_NOTICE(p_hwfn, "Invalid callback functions\n");
1272 goto q_allocate_fail;
1273 }
1274
Mintz, Yuval13c54772017-06-09 17:13:20 +03001275 rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001276 if (rc)
1277 goto q_allocate_fail;
1278
Mintz, Yuval13c54772017-06-09 17:13:20 +03001279 rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001280 if (rc)
1281 goto q_allocate_fail;
1282
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001283 rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001284 data->input.mtu);
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001285 if (rc)
1286 goto q_allocate_fail;
1287
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001288 /* Register callbacks for the Rx/Tx queues */
Mintz, Yuval13c54772017-06-09 17:13:20 +03001289 if (data->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001290 comp_rx_cb = qed_ll2_lb_rxq_completion;
1291 comp_tx_cb = qed_ll2_lb_txq_completion;
1292 } else {
1293 comp_rx_cb = qed_ll2_rxq_completion;
1294 comp_tx_cb = qed_ll2_txq_completion;
1295 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001296
Mintz, Yuval13c54772017-06-09 17:13:20 +03001297 if (data->input.rx_num_desc) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001298 qed_int_register_cb(p_hwfn, comp_rx_cb,
1299 &p_hwfn->p_ll2_info[i],
1300 &p_ll2_info->rx_queue.rx_sb_index,
1301 &p_ll2_info->rx_queue.p_fw_cons);
1302 p_ll2_info->rx_queue.b_cb_registred = true;
1303 }
1304
Mintz, Yuval13c54772017-06-09 17:13:20 +03001305 if (data->input.tx_num_desc) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001306 qed_int_register_cb(p_hwfn,
1307 comp_tx_cb,
1308 &p_hwfn->p_ll2_info[i],
1309 &p_ll2_info->tx_queue.tx_sb_index,
1310 &p_ll2_info->tx_queue.p_fw_cons);
1311 p_ll2_info->tx_queue.b_cb_registred = true;
1312 }
1313
Mintz, Yuval13c54772017-06-09 17:13:20 +03001314 *data->p_connection_handle = i;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001315 return rc;
1316
1317q_allocate_fail:
1318 qed_ll2_release_connection(p_hwfn, i);
1319 return -ENOMEM;
1320}
1321
1322static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
1323 struct qed_ll2_info *p_ll2_conn)
1324{
Mintz, Yuval13c54772017-06-09 17:13:20 +03001325 enum qed_ll2_error_handle error_input;
1326 enum core_error_handle error_mode;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001327 u8 action_on_error = 0;
1328
1329 if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
1330 return 0;
1331
1332 DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
Mintz, Yuval13c54772017-06-09 17:13:20 +03001333 error_input = p_ll2_conn->input.ai_err_packet_too_big;
1334 error_mode = qed_ll2_get_error_choice(error_input);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001335 SET_FIELD(action_on_error,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001336 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, error_mode);
1337 error_input = p_ll2_conn->input.ai_err_no_buf;
1338 error_mode = qed_ll2_get_error_choice(error_input);
1339 SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001340
1341 return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
1342}
1343
Mintz, Yuval58de2892017-06-09 17:13:21 +03001344static void
1345qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
1346 struct qed_ll2_info *p_ll2_conn)
1347{
1348 if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_ISCSI_OOO)
1349 return;
1350
1351 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1352 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
1353}
Michal Kalderon0518c122017-06-09 17:13:22 +03001354
1355int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001356{
Michal Kalderon0518c122017-06-09 17:13:22 +03001357 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001358 struct qed_ll2_info *p_ll2_conn;
1359 struct qed_ll2_rx_queue *p_rx;
1360 struct qed_ll2_tx_queue *p_tx;
Rahul Verma15582962017-04-06 15:58:29 +03001361 struct qed_ptt *p_ptt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001362 int rc = -EINVAL;
1363 u32 i, capacity;
1364 u8 qid;
1365
Rahul Verma15582962017-04-06 15:58:29 +03001366 p_ptt = qed_ptt_acquire(p_hwfn);
1367 if (!p_ptt)
1368 return -EAGAIN;
1369
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001370 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
Rahul Verma15582962017-04-06 15:58:29 +03001371 if (!p_ll2_conn) {
1372 rc = -EINVAL;
1373 goto out;
1374 }
1375
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001376 p_rx = &p_ll2_conn->rx_queue;
1377 p_tx = &p_ll2_conn->tx_queue;
1378
1379 qed_chain_reset(&p_rx->rxq_chain);
1380 qed_chain_reset(&p_rx->rcq_chain);
1381 INIT_LIST_HEAD(&p_rx->active_descq);
1382 INIT_LIST_HEAD(&p_rx->free_descq);
1383 INIT_LIST_HEAD(&p_rx->posting_descq);
1384 spin_lock_init(&p_rx->lock);
1385 capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
1386 for (i = 0; i < capacity; i++)
1387 list_add_tail(&p_rx->descq_array[i].list_entry,
1388 &p_rx->free_descq);
1389 *p_rx->p_fw_cons = 0;
1390
1391 qed_chain_reset(&p_tx->txq_chain);
1392 INIT_LIST_HEAD(&p_tx->active_descq);
1393 INIT_LIST_HEAD(&p_tx->free_descq);
1394 INIT_LIST_HEAD(&p_tx->sending_descq);
1395 spin_lock_init(&p_tx->lock);
1396 capacity = qed_chain_get_capacity(&p_tx->txq_chain);
1397 for (i = 0; i < capacity; i++)
1398 list_add_tail(&p_tx->descq_array[i].list_entry,
1399 &p_tx->free_descq);
1400 p_tx->cur_completing_bd_idx = 0;
1401 p_tx->bds_idx = 0;
1402 p_tx->b_completing_packet = false;
1403 p_tx->cur_send_packet = NULL;
1404 p_tx->cur_send_frag_num = 0;
1405 p_tx->cur_completing_frag_num = 0;
1406 *p_tx->p_fw_cons = 0;
1407
Rahul Verma15582962017-04-06 15:58:29 +03001408 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
1409 if (rc)
1410 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001411
1412 qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
1413 p_ll2_conn->queue_id = qid;
1414 p_ll2_conn->tx_stats_id = qid;
1415 p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
1416 GTT_BAR0_MAP_REG_TSDM_RAM +
1417 TSTORM_LL2_RX_PRODS_OFFSET(qid);
1418 p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
1419 qed_db_addr(p_ll2_conn->cid,
1420 DQ_DEMS_LEGACY);
1421
1422 rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
1423 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001424 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001425
1426 rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
1427 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001428 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001429
1430 if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
Rahul Verma15582962017-04-06 15:58:29 +03001431 qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001432
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001433 qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
1434
Mintz, Yuval13c54772017-06-09 17:13:20 +03001435 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
Rahul Verma15582962017-04-06 15:58:29 +03001436 qed_llh_add_protocol_filter(p_hwfn, p_ptt,
Arun Easi1e128c82017-02-15 06:28:22 -08001437 0x8906, 0,
1438 QED_LLH_FILTER_ETHERTYPE);
Rahul Verma15582962017-04-06 15:58:29 +03001439 qed_llh_add_protocol_filter(p_hwfn, p_ptt,
Arun Easi1e128c82017-02-15 06:28:22 -08001440 0x8914, 0,
1441 QED_LLH_FILTER_ETHERTYPE);
1442 }
1443
Rahul Verma15582962017-04-06 15:58:29 +03001444out:
1445 qed_ptt_release(p_hwfn, p_ptt);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001446 return rc;
1447}
1448
1449static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
1450 struct qed_ll2_rx_queue *p_rx,
1451 struct qed_ll2_rx_packet *p_curp)
1452{
1453 struct qed_ll2_rx_packet *p_posting_packet = NULL;
1454 struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
1455 bool b_notify_fw = false;
1456 u16 bd_prod, cq_prod;
1457
1458 /* This handles the flushing of already posted buffers */
1459 while (!list_empty(&p_rx->posting_descq)) {
1460 p_posting_packet = list_first_entry(&p_rx->posting_descq,
1461 struct qed_ll2_rx_packet,
1462 list_entry);
Wei Yongjunb4f0fd42016-10-17 15:17:51 +00001463 list_move_tail(&p_posting_packet->list_entry,
1464 &p_rx->active_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001465 b_notify_fw = true;
1466 }
1467
1468 /* This handles the supplied packet [if there is one] */
1469 if (p_curp) {
1470 list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
1471 b_notify_fw = true;
1472 }
1473
1474 if (!b_notify_fw)
1475 return;
1476
1477 bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
1478 cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1479 rx_prod.bd_prod = cpu_to_le16(bd_prod);
1480 rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1481 DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1482}
1483
Michal Kalderon0518c122017-06-09 17:13:22 +03001484int qed_ll2_post_rx_buffer(void *cxt,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001485 u8 connection_handle,
1486 dma_addr_t addr,
1487 u16 buf_len, void *cookie, u8 notify_fw)
1488{
Michal Kalderon0518c122017-06-09 17:13:22 +03001489 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001490 struct core_rx_bd_with_buff_len *p_curb = NULL;
1491 struct qed_ll2_rx_packet *p_curp = NULL;
1492 struct qed_ll2_info *p_ll2_conn;
1493 struct qed_ll2_rx_queue *p_rx;
1494 unsigned long flags;
1495 void *p_data;
1496 int rc = 0;
1497
1498 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1499 if (!p_ll2_conn)
1500 return -EINVAL;
1501 p_rx = &p_ll2_conn->rx_queue;
1502
1503 spin_lock_irqsave(&p_rx->lock, flags);
1504 if (!list_empty(&p_rx->free_descq))
1505 p_curp = list_first_entry(&p_rx->free_descq,
1506 struct qed_ll2_rx_packet, list_entry);
1507 if (p_curp) {
1508 if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
1509 qed_chain_get_elem_left(&p_rx->rcq_chain)) {
1510 p_data = qed_chain_produce(&p_rx->rxq_chain);
1511 p_curb = (struct core_rx_bd_with_buff_len *)p_data;
1512 qed_chain_produce(&p_rx->rcq_chain);
1513 }
1514 }
1515
1516 /* If we're lacking entires, let's try to flush buffers to FW */
1517 if (!p_curp || !p_curb) {
1518 rc = -EBUSY;
1519 p_curp = NULL;
1520 goto out_notify;
1521 }
1522
1523 /* We have an Rx packet we can fill */
1524 DMA_REGPAIR_LE(p_curb->addr, addr);
1525 p_curb->buff_length = cpu_to_le16(buf_len);
1526 p_curp->rx_buf_addr = addr;
1527 p_curp->cookie = cookie;
1528 p_curp->rxq_bd = p_curb;
1529 p_curp->buf_length = buf_len;
1530 list_del(&p_curp->list_entry);
1531
1532 /* Check if we only want to enqueue this packet without informing FW */
1533 if (!notify_fw) {
1534 list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
1535 goto out;
1536 }
1537
1538out_notify:
1539 qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
1540out:
1541 spin_unlock_irqrestore(&p_rx->lock, flags);
1542 return rc;
1543}
1544
1545static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
1546 struct qed_ll2_tx_queue *p_tx,
1547 struct qed_ll2_tx_packet *p_curp,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001548 struct qed_ll2_tx_pkt_info *pkt,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001549 u8 notify_fw)
1550{
1551 list_del(&p_curp->list_entry);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001552 p_curp->cookie = pkt->cookie;
1553 p_curp->bd_used = pkt->num_of_bds;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001554 p_curp->notify_fw = notify_fw;
1555 p_tx->cur_send_packet = p_curp;
1556 p_tx->cur_send_frag_num = 0;
1557
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001558 p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = pkt->first_frag;
1559 p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = pkt->first_frag_len;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001560 p_tx->cur_send_frag_num++;
1561}
1562
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001563static void
1564qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1565 struct qed_ll2_info *p_ll2,
1566 struct qed_ll2_tx_packet *p_curp,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001567 struct qed_ll2_tx_pkt_info *pkt)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001568{
1569 struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
1570 u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
1571 struct core_tx_bd *start_bd = NULL;
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001572 enum core_roce_flavor_type roce_flavor;
1573 enum core_tx_dest tx_dest;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001574 u16 bd_data = 0, frag_idx;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001575
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001576 roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE
1577 : CORE_RROCE;
1578
1579 tx_dest = (pkt->tx_dest == QED_LL2_TX_DEST_NW) ? CORE_TX_DEST_NW
1580 : CORE_TX_DEST_LB;
1581
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001582 start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001583 start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001584 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001585 cpu_to_le16(pkt->l4_hdr_offset_w));
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001586 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001587 bd_data |= pkt->bd_flags;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001588 SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001589 SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001590 SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
1591 start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001592 DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag);
1593 start_bd->nbytes = cpu_to_le16(pkt->first_frag_len);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001594
1595 DP_VERBOSE(p_hwfn,
1596 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1597 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1598 p_ll2->queue_id,
1599 p_ll2->cid,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001600 p_ll2->input.conn_type,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001601 prod_idx,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001602 pkt->first_frag_len,
1603 pkt->num_of_bds,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001604 le32_to_cpu(start_bd->addr.hi),
1605 le32_to_cpu(start_bd->addr.lo));
1606
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001607 if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001608 return;
1609
1610 /* Need to provide the packet with additional BDs for frags */
1611 for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001612 frag_idx < pkt->num_of_bds; frag_idx++) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001613 struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
1614
1615 *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001616 (*p_bd)->bd_data.as_bitfield = 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001617 (*p_bd)->bitfield1 = 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001618 p_curp->bds_set[frag_idx].tx_frag = 0;
1619 p_curp->bds_set[frag_idx].frag_len = 0;
1620 }
1621}
1622
1623/* This should be called while the Txq spinlock is being held */
1624static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
1625 struct qed_ll2_info *p_ll2_conn)
1626{
1627 bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
1628 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1629 struct qed_ll2_tx_packet *p_pkt = NULL;
1630 struct core_db_data db_msg = { 0, 0, 0 };
1631 u16 bd_prod;
1632
1633 /* If there are missing BDs, don't do anything now */
1634 if (p_ll2_conn->tx_queue.cur_send_frag_num !=
1635 p_ll2_conn->tx_queue.cur_send_packet->bd_used)
1636 return;
1637
1638 /* Push the current packet to the list and clean after it */
1639 list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
1640 &p_ll2_conn->tx_queue.sending_descq);
1641 p_ll2_conn->tx_queue.cur_send_packet = NULL;
1642 p_ll2_conn->tx_queue.cur_send_frag_num = 0;
1643
1644 /* Notify FW of packet only if requested to */
1645 if (!b_notify)
1646 return;
1647
1648 bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
1649
1650 while (!list_empty(&p_tx->sending_descq)) {
1651 p_pkt = list_first_entry(&p_tx->sending_descq,
1652 struct qed_ll2_tx_packet, list_entry);
1653 if (!p_pkt)
1654 break;
1655
Wei Yongjunb4f0fd42016-10-17 15:17:51 +00001656 list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001657 }
1658
1659 SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
1660 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1661 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
1662 DQ_XCM_CORE_TX_BD_PROD_CMD);
1663 db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
1664 db_msg.spq_prod = cpu_to_le16(bd_prod);
1665
1666 /* Make sure the BDs data is updated before ringing the doorbell */
1667 wmb();
1668
1669 DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
1670
1671 DP_VERBOSE(p_hwfn,
1672 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1673 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1674 p_ll2_conn->queue_id,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001675 p_ll2_conn->cid,
1676 p_ll2_conn->input.conn_type, db_msg.spq_prod);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001677}
1678
Michal Kalderon0518c122017-06-09 17:13:22 +03001679int qed_ll2_prepare_tx_packet(void *cxt,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001680 u8 connection_handle,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001681 struct qed_ll2_tx_pkt_info *pkt,
1682 bool notify_fw)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001683{
Michal Kalderon0518c122017-06-09 17:13:22 +03001684 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001685 struct qed_ll2_tx_packet *p_curp = NULL;
1686 struct qed_ll2_info *p_ll2_conn = NULL;
1687 struct qed_ll2_tx_queue *p_tx;
1688 struct qed_chain *p_tx_chain;
1689 unsigned long flags;
1690 int rc = 0;
1691
1692 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1693 if (!p_ll2_conn)
1694 return -EINVAL;
1695 p_tx = &p_ll2_conn->tx_queue;
1696 p_tx_chain = &p_tx->txq_chain;
1697
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001698 if (pkt->num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001699 return -EIO;
1700
1701 spin_lock_irqsave(&p_tx->lock, flags);
1702 if (p_tx->cur_send_packet) {
1703 rc = -EEXIST;
1704 goto out;
1705 }
1706
1707 /* Get entry, but only if we have tx elements for it */
1708 if (!list_empty(&p_tx->free_descq))
1709 p_curp = list_first_entry(&p_tx->free_descq,
1710 struct qed_ll2_tx_packet, list_entry);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001711 if (p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001712 p_curp = NULL;
1713
1714 if (!p_curp) {
1715 rc = -EBUSY;
1716 goto out;
1717 }
1718
1719 /* Prepare packet and BD, and perhaps send a doorbell to FW */
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001720 qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, pkt, notify_fw);
1721
1722 qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, pkt);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001723
1724 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1725
1726out:
1727 spin_unlock_irqrestore(&p_tx->lock, flags);
1728 return rc;
1729}
1730
Michal Kalderon0518c122017-06-09 17:13:22 +03001731int qed_ll2_set_fragment_of_tx_packet(void *cxt,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001732 u8 connection_handle,
1733 dma_addr_t addr, u16 nbytes)
1734{
1735 struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
Michal Kalderon0518c122017-06-09 17:13:22 +03001736 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001737 struct qed_ll2_info *p_ll2_conn = NULL;
1738 u16 cur_send_frag_num = 0;
1739 struct core_tx_bd *p_bd;
1740 unsigned long flags;
1741
1742 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1743 if (!p_ll2_conn)
1744 return -EINVAL;
1745
1746 if (!p_ll2_conn->tx_queue.cur_send_packet)
1747 return -EINVAL;
1748
1749 p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
1750 cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
1751
1752 if (cur_send_frag_num >= p_cur_send_packet->bd_used)
1753 return -EINVAL;
1754
1755 /* Fill the BD information, and possibly notify FW */
1756 p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
1757 DMA_REGPAIR_LE(p_bd->addr, addr);
1758 p_bd->nbytes = cpu_to_le16(nbytes);
1759 p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
1760 p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
1761
1762 p_ll2_conn->tx_queue.cur_send_frag_num++;
1763
1764 spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
1765 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1766 spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
1767
1768 return 0;
1769}
1770
Michal Kalderon0518c122017-06-09 17:13:22 +03001771int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001772{
Michal Kalderon0518c122017-06-09 17:13:22 +03001773 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001774 struct qed_ll2_info *p_ll2_conn = NULL;
1775 int rc = -EINVAL;
Rahul Verma15582962017-04-06 15:58:29 +03001776 struct qed_ptt *p_ptt;
1777
1778 p_ptt = qed_ptt_acquire(p_hwfn);
1779 if (!p_ptt)
1780 return -EAGAIN;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001781
1782 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
Rahul Verma15582962017-04-06 15:58:29 +03001783 if (!p_ll2_conn) {
1784 rc = -EINVAL;
1785 goto out;
1786 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001787
1788 /* Stop Tx & Rx of connection, if needed */
1789 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1790 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
1791 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001792 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001793 qed_ll2_txq_flush(p_hwfn, connection_handle);
1794 }
1795
1796 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1797 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
1798 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001799 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001800 qed_ll2_rxq_flush(p_hwfn, connection_handle);
1801 }
1802
Mintz, Yuval13c54772017-06-09 17:13:20 +03001803 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001804 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1805
Mintz, Yuval13c54772017-06-09 17:13:20 +03001806 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
Rahul Verma15582962017-04-06 15:58:29 +03001807 qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
Arun Easi1e128c82017-02-15 06:28:22 -08001808 0x8906, 0,
1809 QED_LLH_FILTER_ETHERTYPE);
Rahul Verma15582962017-04-06 15:58:29 +03001810 qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
Arun Easi1e128c82017-02-15 06:28:22 -08001811 0x8914, 0,
1812 QED_LLH_FILTER_ETHERTYPE);
1813 }
1814
Rahul Verma15582962017-04-06 15:58:29 +03001815out:
1816 qed_ptt_release(p_hwfn, p_ptt);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001817 return rc;
1818}
1819
Mintz, Yuval58de2892017-06-09 17:13:21 +03001820static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
1821 struct qed_ll2_info *p_ll2_conn)
1822{
1823 struct qed_ooo_buffer *p_buffer;
1824
1825 if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_ISCSI_OOO)
1826 return;
1827
1828 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1829 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
1830 p_hwfn->p_ooo_info))) {
1831 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1832 p_buffer->rx_buffer_size,
1833 p_buffer->rx_buffer_virt_addr,
1834 p_buffer->rx_buffer_phys_addr);
1835 kfree(p_buffer);
1836 }
1837}
Michal Kalderon0518c122017-06-09 17:13:22 +03001838
1839void qed_ll2_release_connection(void *cxt, u8 connection_handle)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001840{
Michal Kalderon0518c122017-06-09 17:13:22 +03001841 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001842 struct qed_ll2_info *p_ll2_conn = NULL;
1843
1844 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1845 if (!p_ll2_conn)
1846 return;
1847
1848 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1849 p_ll2_conn->rx_queue.b_cb_registred = false;
1850 qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
1851 }
1852
1853 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1854 p_ll2_conn->tx_queue.b_cb_registred = false;
1855 qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
1856 }
1857
1858 kfree(p_ll2_conn->tx_queue.descq_array);
1859 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
1860
1861 kfree(p_ll2_conn->rx_queue.descq_array);
1862 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
1863 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
1864
1865 qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
1866
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001867 qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
1868
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001869 mutex_lock(&p_ll2_conn->mutex);
1870 p_ll2_conn->b_active = false;
1871 mutex_unlock(&p_ll2_conn->mutex);
1872}
1873
Tomer Tayar3587cb82017-05-21 12:10:56 +03001874int qed_ll2_alloc(struct qed_hwfn *p_hwfn)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001875{
1876 struct qed_ll2_info *p_ll2_connections;
1877 u8 i;
1878
1879 /* Allocate LL2's set struct */
1880 p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
1881 sizeof(struct qed_ll2_info), GFP_KERNEL);
1882 if (!p_ll2_connections) {
1883 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
Tomer Tayar3587cb82017-05-21 12:10:56 +03001884 return -ENOMEM;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001885 }
1886
1887 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1888 p_ll2_connections[i].my_id = i;
1889
Tomer Tayar3587cb82017-05-21 12:10:56 +03001890 p_hwfn->p_ll2_info = p_ll2_connections;
1891 return 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001892}
1893
Tomer Tayar3587cb82017-05-21 12:10:56 +03001894void qed_ll2_setup(struct qed_hwfn *p_hwfn)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001895{
1896 int i;
1897
1898 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
Tomer Tayar3587cb82017-05-21 12:10:56 +03001899 mutex_init(&p_hwfn->p_ll2_info[i].mutex);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001900}
1901
Tomer Tayar3587cb82017-05-21 12:10:56 +03001902void qed_ll2_free(struct qed_hwfn *p_hwfn)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001903{
Tomer Tayar3587cb82017-05-21 12:10:56 +03001904 if (!p_hwfn->p_ll2_info)
1905 return;
1906
1907 kfree(p_hwfn->p_ll2_info);
1908 p_hwfn->p_ll2_info = NULL;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001909}
1910
1911static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
1912 struct qed_ptt *p_ptt,
1913 struct qed_ll2_info *p_ll2_conn,
1914 struct qed_ll2_stats *p_stats)
1915{
1916 struct core_ll2_tstorm_per_queue_stat tstats;
1917 u8 qid = p_ll2_conn->queue_id;
1918 u32 tstats_addr;
1919
1920 memset(&tstats, 0, sizeof(tstats));
1921 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1922 CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
1923 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
1924
1925 p_stats->packet_too_big_discard =
1926 HILO_64_REGPAIR(tstats.packet_too_big_discard);
1927 p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
1928}
1929
1930static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
1931 struct qed_ptt *p_ptt,
1932 struct qed_ll2_info *p_ll2_conn,
1933 struct qed_ll2_stats *p_stats)
1934{
1935 struct core_ll2_ustorm_per_queue_stat ustats;
1936 u8 qid = p_ll2_conn->queue_id;
1937 u32 ustats_addr;
1938
1939 memset(&ustats, 0, sizeof(ustats));
1940 ustats_addr = BAR0_MAP_REG_USDM_RAM +
1941 CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
1942 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
1943
1944 p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1945 p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1946 p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1947 p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1948 p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1949 p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1950}
1951
1952static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
1953 struct qed_ptt *p_ptt,
1954 struct qed_ll2_info *p_ll2_conn,
1955 struct qed_ll2_stats *p_stats)
1956{
1957 struct core_ll2_pstorm_per_queue_stat pstats;
1958 u8 stats_id = p_ll2_conn->tx_stats_id;
1959 u32 pstats_addr;
1960
1961 memset(&pstats, 0, sizeof(pstats));
1962 pstats_addr = BAR0_MAP_REG_PSDM_RAM +
1963 CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
1964 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
1965
1966 p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1967 p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1968 p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1969 p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1970 p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1971 p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1972}
1973
Michal Kalderon0518c122017-06-09 17:13:22 +03001974int qed_ll2_get_stats(void *cxt,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001975 u8 connection_handle, struct qed_ll2_stats *p_stats)
1976{
Michal Kalderon0518c122017-06-09 17:13:22 +03001977 struct qed_hwfn *p_hwfn = cxt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001978 struct qed_ll2_info *p_ll2_conn = NULL;
1979 struct qed_ptt *p_ptt;
1980
1981 memset(p_stats, 0, sizeof(*p_stats));
1982
1983 if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
1984 !p_hwfn->p_ll2_info)
1985 return -EINVAL;
1986
1987 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
1988
1989 p_ptt = qed_ptt_acquire(p_hwfn);
1990 if (!p_ptt) {
1991 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1992 return -EINVAL;
1993 }
1994
1995 _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
1996 _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
1997 if (p_ll2_conn->tx_stats_en)
1998 _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
1999
2000 qed_ptt_release(p_hwfn, p_ptt);
2001 return 0;
2002}
2003
Michal Kalderon0518c122017-06-09 17:13:22 +03002004static void qed_ll2b_release_rx_packet(void *cxt,
2005 u8 connection_handle,
2006 void *cookie,
2007 dma_addr_t rx_buf_addr,
2008 bool b_last_packet)
2009{
2010 struct qed_hwfn *p_hwfn = cxt;
2011
2012 qed_ll2_dealloc_buffer(p_hwfn->cdev, cookie);
2013}
2014
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002015static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
2016 const struct qed_ll2_cb_ops *ops,
2017 void *cookie)
2018{
2019 cdev->ll2->cbs = ops;
2020 cdev->ll2->cb_cookie = cookie;
2021}
2022
Michal Kalderon0518c122017-06-09 17:13:22 +03002023struct qed_ll2_cbs ll2_cbs = {
2024 .rx_comp_cb = &qed_ll2b_complete_rx_packet,
2025 .rx_release_cb = &qed_ll2b_release_rx_packet,
2026 .tx_comp_cb = &qed_ll2b_complete_tx_packet,
2027 .tx_release_cb = &qed_ll2b_complete_tx_packet,
2028};
2029
Mintz, Yuval13c54772017-06-09 17:13:20 +03002030static void qed_ll2_set_conn_data(struct qed_dev *cdev,
2031 struct qed_ll2_acquire_data *data,
2032 struct qed_ll2_params *params,
2033 enum qed_ll2_conn_type conn_type,
Michal Kalderon0518c122017-06-09 17:13:22 +03002034 u8 *handle, bool lb)
Mintz, Yuval13c54772017-06-09 17:13:20 +03002035{
2036 memset(data, 0, sizeof(*data));
2037
2038 data->input.conn_type = conn_type;
2039 data->input.mtu = params->mtu;
2040 data->input.rx_num_desc = QED_LL2_RX_SIZE;
2041 data->input.rx_drop_ttl0_flg = params->drop_ttl0_packets;
2042 data->input.rx_vlan_removal_en = params->rx_vlan_stripping;
2043 data->input.tx_num_desc = QED_LL2_TX_SIZE;
Mintz, Yuval13c54772017-06-09 17:13:20 +03002044 data->p_connection_handle = handle;
Michal Kalderon0518c122017-06-09 17:13:22 +03002045 data->cbs = &ll2_cbs;
2046 ll2_cbs.cookie = QED_LEADING_HWFN(cdev);
2047
Mintz, Yuval13c54772017-06-09 17:13:20 +03002048 if (lb) {
2049 data->input.tx_tc = OOO_LB_TC;
2050 data->input.tx_dest = QED_LL2_TX_DEST_LB;
2051 } else {
2052 data->input.tx_tc = 0;
2053 data->input.tx_dest = QED_LL2_TX_DEST_NW;
2054 }
2055}
2056
2057static int qed_ll2_start_ooo(struct qed_dev *cdev,
2058 struct qed_ll2_params *params)
2059{
2060 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2061 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
2062 struct qed_ll2_acquire_data data;
2063 int rc;
2064
2065 qed_ll2_set_conn_data(cdev, &data, params,
Michal Kalderon0518c122017-06-09 17:13:22 +03002066 QED_LL2_TYPE_ISCSI_OOO, handle, true);
Mintz, Yuval13c54772017-06-09 17:13:20 +03002067
2068 rc = qed_ll2_acquire_connection(hwfn, &data);
2069 if (rc) {
2070 DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
2071 goto out;
2072 }
2073
2074 rc = qed_ll2_establish_connection(hwfn, *handle);
2075 if (rc) {
2076 DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
2077 goto fail;
2078 }
2079
2080 return 0;
2081
2082fail:
2083 qed_ll2_release_connection(hwfn, *handle);
2084out:
2085 *handle = QED_LL2_UNUSED_HANDLE;
2086 return rc;
2087}
2088
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002089static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
2090{
Wei Yongjun88a24282016-10-10 14:08:28 +00002091 struct qed_ll2_buffer *buffer, *tmp_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002092 enum qed_ll2_conn_type conn_type;
Mintz, Yuval13c54772017-06-09 17:13:20 +03002093 struct qed_ll2_acquire_data data;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002094 struct qed_ptt *p_ptt;
2095 int rc, i;
Michal Kalderon0518c122017-06-09 17:13:22 +03002096
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002097
2098 /* Initialize LL2 locks & lists */
2099 INIT_LIST_HEAD(&cdev->ll2->list);
2100 spin_lock_init(&cdev->ll2->lock);
2101 cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
2102 L1_CACHE_BYTES + params->mtu;
2103 cdev->ll2->frags_mapped = params->frags_mapped;
2104
2105 /*Allocate memory for LL2 */
2106 DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
2107 cdev->ll2->rx_size);
2108 for (i = 0; i < QED_LL2_RX_SIZE; i++) {
2109 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2110 if (!buffer) {
2111 DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
2112 goto fail;
2113 }
2114
2115 rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
2116 &buffer->phys_addr);
2117 if (rc) {
2118 kfree(buffer);
2119 goto fail;
2120 }
2121
2122 list_add_tail(&buffer->list, &cdev->ll2->list);
2123 }
2124
2125 switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
Arun Easi1e128c82017-02-15 06:28:22 -08002126 case QED_PCI_FCOE:
2127 conn_type = QED_LL2_TYPE_FCOE;
Arun Easi1e128c82017-02-15 06:28:22 -08002128 break;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002129 case QED_PCI_ISCSI:
2130 conn_type = QED_LL2_TYPE_ISCSI;
2131 break;
2132 case QED_PCI_ETH_ROCE:
2133 conn_type = QED_LL2_TYPE_ROCE;
2134 break;
2135 default:
2136 conn_type = QED_LL2_TYPE_TEST;
2137 }
2138
Mintz, Yuval13c54772017-06-09 17:13:20 +03002139 qed_ll2_set_conn_data(cdev, &data, params, conn_type,
Michal Kalderon0518c122017-06-09 17:13:22 +03002140 &cdev->ll2->handle, false);
Arnd Bergmann0629a332017-01-18 15:52:52 +01002141
Mintz, Yuval13c54772017-06-09 17:13:20 +03002142 rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &data);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002143 if (rc) {
2144 DP_INFO(cdev, "Failed to acquire LL2 connection\n");
2145 goto fail;
2146 }
2147
2148 rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
2149 cdev->ll2->handle);
2150 if (rc) {
2151 DP_INFO(cdev, "Failed to establish LL2 connection\n");
2152 goto release_fail;
2153 }
2154
2155 /* Post all Rx buffers to FW */
2156 spin_lock_bh(&cdev->ll2->lock);
Wei Yongjun88a24282016-10-10 14:08:28 +00002157 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002158 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
2159 cdev->ll2->handle,
2160 buffer->phys_addr, 0, buffer, 1);
2161 if (rc) {
2162 DP_INFO(cdev,
2163 "Failed to post an Rx buffer; Deleting it\n");
2164 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
2165 cdev->ll2->rx_size, DMA_FROM_DEVICE);
2166 kfree(buffer->data);
2167 list_del(&buffer->list);
2168 kfree(buffer);
2169 } else {
2170 cdev->ll2->rx_cnt++;
2171 }
2172 }
2173 spin_unlock_bh(&cdev->ll2->lock);
2174
2175 if (!cdev->ll2->rx_cnt) {
2176 DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
2177 goto release_terminate;
2178 }
2179
2180 if (!is_valid_ether_addr(params->ll2_mac_address)) {
2181 DP_INFO(cdev, "Invalid Ethernet address\n");
2182 goto release_terminate;
2183 }
2184
Yuval Mintz1d6cff42016-12-01 00:21:07 -08002185 if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
2186 cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
2187 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
2188 rc = qed_ll2_start_ooo(cdev, params);
2189 if (rc) {
2190 DP_INFO(cdev,
2191 "Failed to initialize the OOO LL2 queue\n");
2192 goto release_terminate;
2193 }
2194 }
2195
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002196 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2197 if (!p_ptt) {
2198 DP_INFO(cdev, "Failed to acquire PTT\n");
2199 goto release_terminate;
2200 }
2201
2202 rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2203 params->ll2_mac_address);
2204 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2205 if (rc) {
2206 DP_ERR(cdev, "Failed to allocate LLH filter\n");
2207 goto release_terminate_all;
2208 }
2209
2210 ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002211 return 0;
2212
2213release_terminate_all:
2214
2215release_terminate:
2216 qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2217release_fail:
2218 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2219fail:
2220 qed_ll2_kill_buffers(cdev);
2221 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2222 return -EINVAL;
2223}
2224
2225static int qed_ll2_stop(struct qed_dev *cdev)
2226{
2227 struct qed_ptt *p_ptt;
2228 int rc;
2229
2230 if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
2231 return 0;
2232
2233 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2234 if (!p_ptt) {
2235 DP_INFO(cdev, "Failed to acquire PTT\n");
2236 goto fail;
2237 }
2238
2239 qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2240 cdev->ll2_mac_address);
2241 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2242 eth_zero_addr(cdev->ll2_mac_address);
2243
Yuval Mintz1d6cff42016-12-01 00:21:07 -08002244 if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
2245 cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
2246 qed_ll2_stop_ooo(cdev);
2247
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002248 rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
2249 cdev->ll2->handle);
2250 if (rc)
2251 DP_INFO(cdev, "Failed to terminate LL2 connection\n");
2252
2253 qed_ll2_kill_buffers(cdev);
2254
2255 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2256 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2257
2258 return rc;
2259fail:
2260 return -EINVAL;
2261}
2262
2263static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
2264{
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03002265 struct qed_ll2_tx_pkt_info pkt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002266 const skb_frag_t *frag;
2267 int rc = -EINVAL, i;
2268 dma_addr_t mapping;
2269 u16 vlan = 0;
2270 u8 flags = 0;
2271
2272 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
2273 DP_INFO(cdev, "Cannot transmit a checksumed packet\n");
2274 return -EINVAL;
2275 }
2276
2277 if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
2278 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
2279 1 + skb_shinfo(skb)->nr_frags);
2280 return -EINVAL;
2281 }
2282
2283 mapping = dma_map_single(&cdev->pdev->dev, skb->data,
2284 skb->len, DMA_TO_DEVICE);
2285 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2286 DP_NOTICE(cdev, "SKB mapping failed\n");
2287 return -EINVAL;
2288 }
2289
2290 /* Request HW to calculate IP csum */
2291 if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
2292 ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002293 flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002294
2295 if (skb_vlan_tag_present(skb)) {
2296 vlan = skb_vlan_tag_get(skb);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002297 flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002298 }
2299
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03002300 memset(&pkt, 0, sizeof(pkt));
2301 pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags;
2302 pkt.vlan = vlan;
2303 pkt.bd_flags = flags;
2304 pkt.tx_dest = QED_LL2_TX_DEST_NW;
2305 pkt.first_frag = mapping;
2306 pkt.first_frag_len = skb->len;
2307 pkt.cookie = skb;
2308
2309 rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
2310 &pkt, 1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002311 if (rc)
2312 goto err;
2313
2314 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2315 frag = &skb_shinfo(skb)->frags[i];
2316 if (!cdev->ll2->frags_mapped) {
2317 mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
2318 skb_frag_size(frag),
2319 DMA_TO_DEVICE);
2320
2321 if (unlikely(dma_mapping_error(&cdev->pdev->dev,
2322 mapping))) {
2323 DP_NOTICE(cdev,
2324 "Unable to map frag - dropping packet\n");
Pan Bian0ff18d22016-12-04 13:53:53 +08002325 rc = -ENOMEM;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002326 goto err;
2327 }
2328 } else {
2329 mapping = page_to_phys(skb_frag_page(frag)) |
2330 frag->page_offset;
2331 }
2332
2333 rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
2334 cdev->ll2->handle,
2335 mapping,
2336 skb_frag_size(frag));
2337
2338 /* if failed not much to do here, partial packet has been posted
2339 * we can't free memory, will need to wait for completion.
2340 */
2341 if (rc)
2342 goto err2;
2343 }
2344
2345 return 0;
2346
2347err:
2348 dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
2349
2350err2:
2351 return rc;
2352}
2353
2354static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
2355{
2356 if (!cdev->ll2)
2357 return -EINVAL;
2358
2359 return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
2360 cdev->ll2->handle, stats);
2361}
2362
2363const struct qed_ll2_ops qed_ll2_ops_pass = {
2364 .start = &qed_ll2_start,
2365 .stop = &qed_ll2_stop,
2366 .start_xmit = &qed_ll2_start_xmit,
2367 .register_cb_ops = &qed_ll2_register_cb_ops,
2368 .get_stats = &qed_ll2_stats,
2369};
2370
2371int qed_ll2_alloc_if(struct qed_dev *cdev)
2372{
2373 cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
2374 return cdev->ll2 ? 0 : -ENOMEM;
2375}
2376
2377void qed_ll2_dealloc_if(struct qed_dev *cdev)
2378{
2379 kfree(cdev->ll2);
2380 cdev->ll2 = NULL;
2381}