blob: fcf4ea98e0bf952dd989a64bf513c64786e5cc75 [file] [log] [blame]
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001/* QLogic qed NIC Driver
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02002 * Copyright (c) 2015-2017 QLogic Corporation
Yuval Mintz0a7fb112016-10-01 21:59:55 +03003 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
Yuval Mintz0a7fb112016-10-01 21:59:55 +03009 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +020010 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Yuval Mintz0a7fb112016-10-01 21:59:55 +030031 */
32
33#include <linux/types.h>
34#include <asm/byteorder.h>
35#include <linux/dma-mapping.h>
36#include <linux/if_vlan.h>
37#include <linux/kernel.h>
38#include <linux/pci.h>
39#include <linux/slab.h>
40#include <linux/stddef.h>
Yuval Mintz0a7fb112016-10-01 21:59:55 +030041#include <linux/workqueue.h>
42#include <net/ipv6.h>
43#include <linux/bitops.h>
44#include <linux/delay.h>
45#include <linux/errno.h>
46#include <linux/etherdevice.h>
47#include <linux/io.h>
48#include <linux/list.h>
49#include <linux/mutex.h>
50#include <linux/spinlock.h>
51#include <linux/string.h>
52#include <linux/qed/qed_ll2_if.h>
53#include "qed.h"
54#include "qed_cxt.h"
55#include "qed_dev_api.h"
56#include "qed_hsi.h"
57#include "qed_hw.h"
58#include "qed_int.h"
59#include "qed_ll2.h"
60#include "qed_mcp.h"
Yuval Mintz1d6cff42016-12-01 00:21:07 -080061#include "qed_ooo.h"
Yuval Mintz0a7fb112016-10-01 21:59:55 +030062#include "qed_reg_addr.h"
63#include "qed_sp.h"
Yuval Mintz0189efb2016-10-13 22:57:02 +030064#include "qed_roce.h"
Yuval Mintz0a7fb112016-10-01 21:59:55 +030065
66#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
67#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
68
69#define QED_LL2_TX_SIZE (256)
70#define QED_LL2_RX_SIZE (4096)
71
72struct qed_cb_ll2_info {
73 int rx_cnt;
74 u32 rx_size;
75 u8 handle;
76 bool frags_mapped;
77
78 /* Lock protecting LL2 buffer lists in sleepless context */
79 spinlock_t lock;
80 struct list_head list;
81
82 const struct qed_ll2_cb_ops *cbs;
83 void *cb_cookie;
84};
85
86struct qed_ll2_buffer {
87 struct list_head list;
88 void *data;
89 dma_addr_t phys_addr;
90};
91
92static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn,
93 u8 connection_handle,
94 void *cookie,
95 dma_addr_t first_frag_addr,
96 bool b_last_fragment,
97 bool b_last_packet)
98{
99 struct qed_dev *cdev = p_hwfn->cdev;
100 struct sk_buff *skb = cookie;
101
102 /* All we need to do is release the mapping */
103 dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
104 skb_headlen(skb), DMA_TO_DEVICE);
105
106 if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
107 cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
108 b_last_fragment);
109
110 if (cdev->ll2->frags_mapped)
111 /* Case where mapped frags were received, need to
112 * free skb with nr_frags marked as 0
113 */
114 skb_shinfo(skb)->nr_frags = 0;
115
116 dev_kfree_skb_any(skb);
117}
118
119static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
120 u8 **data, dma_addr_t *phys_addr)
121{
122 *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
123 if (!(*data)) {
124 DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
125 return -ENOMEM;
126 }
127
128 *phys_addr = dma_map_single(&cdev->pdev->dev,
129 ((*data) + NET_SKB_PAD),
130 cdev->ll2->rx_size, DMA_FROM_DEVICE);
131 if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
132 DP_INFO(cdev, "Failed to map LL2 buffer data\n");
133 kfree((*data));
134 return -ENOMEM;
135 }
136
137 return 0;
138}
139
140static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
141 struct qed_ll2_buffer *buffer)
142{
143 spin_lock_bh(&cdev->ll2->lock);
144
145 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
146 cdev->ll2->rx_size, DMA_FROM_DEVICE);
147 kfree(buffer->data);
148 list_del(&buffer->list);
149
150 cdev->ll2->rx_cnt--;
151 if (!cdev->ll2->rx_cnt)
152 DP_INFO(cdev, "All LL2 entries were removed\n");
153
154 spin_unlock_bh(&cdev->ll2->lock);
155
156 return 0;
157}
158
159static void qed_ll2_kill_buffers(struct qed_dev *cdev)
160{
161 struct qed_ll2_buffer *buffer, *tmp_buffer;
162
163 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
164 qed_ll2_dealloc_buffer(cdev, buffer);
165}
166
Yuval Mintz8c93bea2016-10-13 22:57:03 +0300167static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
Mintz, Yuval68be9102017-06-09 17:13:19 +0300168 struct qed_ll2_comp_rx_data *data)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300169{
Mintz, Yuval68be9102017-06-09 17:13:19 +0300170 struct qed_ll2_buffer *buffer = data->cookie;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300171 struct qed_dev *cdev = p_hwfn->cdev;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300172 dma_addr_t new_phys_addr;
173 struct sk_buff *skb;
174 bool reuse = false;
175 int rc = -EINVAL;
176 u8 *new_data;
177
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300178 DP_VERBOSE(p_hwfn,
179 (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
180 "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
Mintz, Yuval68be9102017-06-09 17:13:19 +0300181 (u64)data->rx_buf_addr,
182 data->u.placement_offset,
183 data->length.packet_length,
184 data->parse_flags,
185 data->vlan, data->opaque_data_0, data->opaque_data_1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300186
187 if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
188 print_hex_dump(KERN_INFO, "",
189 DUMP_PREFIX_OFFSET, 16, 1,
Mintz, Yuval68be9102017-06-09 17:13:19 +0300190 buffer->data, data->length.packet_length, false);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300191 }
192
193 /* Determine if data is valid */
Mintz, Yuval68be9102017-06-09 17:13:19 +0300194 if (data->length.packet_length < ETH_HLEN)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300195 reuse = true;
196
197 /* Allocate a replacement for buffer; Reuse upon failure */
198 if (!reuse)
199 rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
200 &new_phys_addr);
201
202 /* If need to reuse or there's no replacement buffer, repost this */
203 if (rc)
204 goto out_post;
Mintz, Yuval752ecb22017-03-14 15:26:00 +0200205 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
206 cdev->ll2->rx_size, DMA_FROM_DEVICE);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300207
208 skb = build_skb(buffer->data, 0);
209 if (!skb) {
210 rc = -ENOMEM;
211 goto out_post;
212 }
213
Mintz, Yuval68be9102017-06-09 17:13:19 +0300214 data->u.placement_offset += NET_SKB_PAD;
215 skb_reserve(skb, data->u.placement_offset);
216 skb_put(skb, data->length.packet_length);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300217 skb_checksum_none_assert(skb);
218
219 /* Get parital ethernet information instead of eth_type_trans(),
220 * Since we don't have an associated net_device.
221 */
222 skb_reset_mac_header(skb);
223 skb->protocol = eth_hdr(skb)->h_proto;
224
225 /* Pass SKB onward */
226 if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
Mintz, Yuval68be9102017-06-09 17:13:19 +0300227 if (data->vlan)
228 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
229 data->vlan);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300230 cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
Mintz, Yuval68be9102017-06-09 17:13:19 +0300231 data->opaque_data_0,
232 data->opaque_data_1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300233 }
234
235 /* Update Buffer information and update FW producer */
236 buffer->data = new_data;
237 buffer->phys_addr = new_phys_addr;
238
239out_post:
240 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
241 buffer->phys_addr, 0, buffer, 1);
242
243 if (rc)
244 qed_ll2_dealloc_buffer(cdev, buffer);
245}
246
247static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
248 u8 connection_handle,
249 bool b_lock,
250 bool b_only_active)
251{
252 struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
253
254 if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
255 return NULL;
256
257 if (!p_hwfn->p_ll2_info)
258 return NULL;
259
260 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
261
262 if (b_only_active) {
263 if (b_lock)
264 mutex_lock(&p_ll2_conn->mutex);
265 if (p_ll2_conn->b_active)
266 p_ret = p_ll2_conn;
267 if (b_lock)
268 mutex_unlock(&p_ll2_conn->mutex);
269 } else {
270 p_ret = p_ll2_conn;
271 }
272
273 return p_ret;
274}
275
276static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
277 u8 connection_handle)
278{
279 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
280}
281
282static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
283 u8 connection_handle)
284{
285 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
286}
287
288static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
289 *p_hwfn,
290 u8 connection_handle)
291{
292 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
293}
294
295static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
296{
297 bool b_last_packet = false, b_last_frag = false;
298 struct qed_ll2_tx_packet *p_pkt = NULL;
299 struct qed_ll2_info *p_ll2_conn;
300 struct qed_ll2_tx_queue *p_tx;
Ram Amraniabd49672016-10-01 22:00:01 +0300301 dma_addr_t tx_frag;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300302
303 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
304 if (!p_ll2_conn)
305 return;
306
307 p_tx = &p_ll2_conn->tx_queue;
308
309 while (!list_empty(&p_tx->active_descq)) {
310 p_pkt = list_first_entry(&p_tx->active_descq,
311 struct qed_ll2_tx_packet, list_entry);
312 if (!p_pkt)
313 break;
314
315 list_del(&p_pkt->list_entry);
316 b_last_packet = list_empty(&p_tx->active_descq);
317 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
Mintz, Yuval13c54772017-06-09 17:13:20 +0300318 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800319 struct qed_ooo_buffer *p_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300320
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800321 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
322 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
323 p_buffer);
324 } else {
325 p_tx->cur_completing_packet = *p_pkt;
326 p_tx->cur_completing_bd_idx = 1;
327 b_last_frag =
328 p_tx->cur_completing_bd_idx == p_pkt->bd_used;
329 tx_frag = p_pkt->bds_set[0].tx_frag;
Mintz, Yuval13c54772017-06-09 17:13:20 +0300330 if (p_ll2_conn->input.gsi_enable)
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800331 qed_ll2b_release_tx_gsi_packet(p_hwfn,
332 p_ll2_conn->
333 my_id,
334 p_pkt->cookie,
335 tx_frag,
336 b_last_frag,
337 b_last_packet);
338 else
339 qed_ll2b_complete_tx_packet(p_hwfn,
340 p_ll2_conn->my_id,
341 p_pkt->cookie,
342 tx_frag,
343 b_last_frag,
344 b_last_packet);
345 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300346 }
347}
348
349static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
350{
351 struct qed_ll2_info *p_ll2_conn = p_cookie;
352 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
353 u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
354 struct qed_ll2_tx_packet *p_pkt;
355 bool b_last_frag = false;
356 unsigned long flags;
Ram Amraniabd49672016-10-01 22:00:01 +0300357 dma_addr_t tx_frag;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300358 int rc = -EINVAL;
359
360 spin_lock_irqsave(&p_tx->lock, flags);
361 if (p_tx->b_completing_packet) {
362 rc = -EBUSY;
363 goto out;
364 }
365
366 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
367 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
368 while (num_bds) {
369 if (list_empty(&p_tx->active_descq))
370 goto out;
371
372 p_pkt = list_first_entry(&p_tx->active_descq,
373 struct qed_ll2_tx_packet, list_entry);
374 if (!p_pkt)
375 goto out;
376
377 p_tx->b_completing_packet = true;
378 p_tx->cur_completing_packet = *p_pkt;
379 num_bds_in_packet = p_pkt->bd_used;
380 list_del(&p_pkt->list_entry);
381
382 if (num_bds < num_bds_in_packet) {
383 DP_NOTICE(p_hwfn,
384 "Rest of BDs does not cover whole packet\n");
385 goto out;
386 }
387
388 num_bds -= num_bds_in_packet;
389 p_tx->bds_idx += num_bds_in_packet;
390 while (num_bds_in_packet--)
391 qed_chain_consume(&p_tx->txq_chain);
392
393 p_tx->cur_completing_bd_idx = 1;
394 b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
395 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
396
397 spin_unlock_irqrestore(&p_tx->lock, flags);
Ram Amraniabd49672016-10-01 22:00:01 +0300398 tx_frag = p_pkt->bds_set[0].tx_frag;
Mintz, Yuval13c54772017-06-09 17:13:20 +0300399 if (p_ll2_conn->input.gsi_enable)
Ram Amraniabd49672016-10-01 22:00:01 +0300400 qed_ll2b_complete_tx_gsi_packet(p_hwfn,
401 p_ll2_conn->my_id,
402 p_pkt->cookie,
403 tx_frag,
404 b_last_frag, !num_bds);
405 else
406 qed_ll2b_complete_tx_packet(p_hwfn,
407 p_ll2_conn->my_id,
408 p_pkt->cookie,
409 tx_frag,
410 b_last_frag, !num_bds);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300411 spin_lock_irqsave(&p_tx->lock, flags);
412 }
413
414 p_tx->b_completing_packet = false;
415 rc = 0;
416out:
417 spin_unlock_irqrestore(&p_tx->lock, flags);
418 return rc;
419}
420
Ram Amraniabd49672016-10-01 22:00:01 +0300421static int
422qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
423 struct qed_ll2_info *p_ll2_info,
424 union core_rx_cqe_union *p_cqe,
425 unsigned long lock_flags, bool b_last_cqe)
426{
427 struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue;
428 struct qed_ll2_rx_packet *p_pkt = NULL;
429 u16 packet_length, parse_flags, vlan;
430 u32 src_mac_addrhi;
431 u16 src_mac_addrlo;
432
433 if (!list_empty(&p_rx->active_descq))
434 p_pkt = list_first_entry(&p_rx->active_descq,
435 struct qed_ll2_rx_packet, list_entry);
436 if (!p_pkt) {
437 DP_NOTICE(p_hwfn,
438 "GSI Rx completion but active_descq is empty\n");
439 return -EIO;
440 }
441
442 list_del(&p_pkt->list_entry);
443 parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
444 packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
445 vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
446 src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
447 src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
448 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
449 DP_NOTICE(p_hwfn,
450 "Mismatch between active_descq and the LL2 Rx chain\n");
451 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
452
453 spin_unlock_irqrestore(&p_rx->lock, lock_flags);
454 qed_ll2b_complete_rx_gsi_packet(p_hwfn,
455 p_ll2_info->my_id,
456 p_pkt->cookie,
457 p_pkt->rx_buf_addr,
458 packet_length,
459 p_cqe->rx_cqe_gsi.data_length_error,
460 parse_flags,
461 vlan,
462 src_mac_addrhi,
463 src_mac_addrlo, b_last_cqe);
464 spin_lock_irqsave(&p_rx->lock, lock_flags);
465
466 return 0;
467}
468
Mintz, Yuval68be9102017-06-09 17:13:19 +0300469static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
470 union core_rx_cqe_union *p_cqe,
471 struct qed_ll2_comp_rx_data *data)
472{
473 data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_fp.parse_flags.flags);
474 data->length.packet_length =
475 le16_to_cpu(p_cqe->rx_cqe_fp.packet_length);
476 data->vlan = le16_to_cpu(p_cqe->rx_cqe_fp.vlan);
477 data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[0]);
478 data->opaque_data_1 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[1]);
479 data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset;
480}
481
482static int
483qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
484 struct qed_ll2_info *p_ll2_conn,
485 union core_rx_cqe_union *p_cqe,
486 unsigned long *p_lock_flags, bool b_last_cqe)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300487{
488 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
489 struct qed_ll2_rx_packet *p_pkt = NULL;
Mintz, Yuval68be9102017-06-09 17:13:19 +0300490 struct qed_ll2_comp_rx_data data;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300491
492 if (!list_empty(&p_rx->active_descq))
493 p_pkt = list_first_entry(&p_rx->active_descq,
494 struct qed_ll2_rx_packet, list_entry);
495 if (!p_pkt) {
496 DP_NOTICE(p_hwfn,
Mintz, Yuval68be9102017-06-09 17:13:19 +0300497 "[%d] LL2 Rx completion but active_descq is empty\n",
Mintz, Yuval13c54772017-06-09 17:13:20 +0300498 p_ll2_conn->input.conn_type);
Mintz, Yuval68be9102017-06-09 17:13:19 +0300499
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300500 return -EIO;
501 }
502 list_del(&p_pkt->list_entry);
503
Mintz, Yuval68be9102017-06-09 17:13:19 +0300504 qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300505 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
506 DP_NOTICE(p_hwfn,
507 "Mismatch between active_descq and the LL2 Rx chain\n");
Mintz, Yuval68be9102017-06-09 17:13:19 +0300508
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300509 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
510
Mintz, Yuval68be9102017-06-09 17:13:19 +0300511 data.connection_handle = p_ll2_conn->my_id;
512 data.cookie = p_pkt->cookie;
513 data.rx_buf_addr = p_pkt->rx_buf_addr;
514 data.b_last_packet = b_last_cqe;
515
Ram Amrani1df2ade2017-03-14 15:26:02 +0200516 spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
Mintz, Yuval68be9102017-06-09 17:13:19 +0300517 qed_ll2b_complete_rx_packet(p_hwfn, &data);
Ram Amrani1df2ade2017-03-14 15:26:02 +0200518 spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300519
520 return 0;
521}
522
523static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
524{
Mintz, Yuval13c54772017-06-09 17:13:20 +0300525 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)cookie;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300526 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
527 union core_rx_cqe_union *cqe = NULL;
528 u16 cq_new_idx = 0, cq_old_idx = 0;
529 unsigned long flags = 0;
530 int rc = 0;
531
532 spin_lock_irqsave(&p_rx->lock, flags);
533 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
534 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
535
536 while (cq_new_idx != cq_old_idx) {
537 bool b_last_cqe = (cq_new_idx == cq_old_idx);
538
Mintz, Yuval13c54772017-06-09 17:13:20 +0300539 cqe =
540 (union core_rx_cqe_union *)
541 qed_chain_consume(&p_rx->rcq_chain);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300542 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
543
544 DP_VERBOSE(p_hwfn,
545 QED_MSG_LL2,
546 "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
547 cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
548
549 switch (cqe->rx_cqe_sp.type) {
550 case CORE_RX_CQE_TYPE_SLOW_PATH:
551 DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
552 rc = -EINVAL;
553 break;
Ram Amraniabd49672016-10-01 22:00:01 +0300554 case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
555 rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn,
556 cqe, flags, b_last_cqe);
557 break;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300558 case CORE_RX_CQE_TYPE_REGULAR:
Mintz, Yuval68be9102017-06-09 17:13:19 +0300559 rc = qed_ll2_rxq_handle_completion(p_hwfn, p_ll2_conn,
560 cqe, &flags,
561 b_last_cqe);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300562 break;
563 default:
564 rc = -EIO;
565 }
566 }
567
568 spin_unlock_irqrestore(&p_rx->lock, flags);
569 return rc;
570}
571
Yuval Mintz8c93bea2016-10-13 22:57:03 +0300572static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300573{
574 struct qed_ll2_info *p_ll2_conn = NULL;
575 struct qed_ll2_rx_packet *p_pkt = NULL;
576 struct qed_ll2_rx_queue *p_rx;
577
578 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
579 if (!p_ll2_conn)
580 return;
581
582 p_rx = &p_ll2_conn->rx_queue;
583
584 while (!list_empty(&p_rx->active_descq)) {
585 dma_addr_t rx_buf_addr;
586 void *cookie;
587 bool b_last;
588
589 p_pkt = list_first_entry(&p_rx->active_descq,
590 struct qed_ll2_rx_packet, list_entry);
591 if (!p_pkt)
592 break;
593
Wei Yongjunb4f0fd42016-10-17 15:17:51 +0000594 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300595
Mintz, Yuval13c54772017-06-09 17:13:20 +0300596 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800597 struct qed_ooo_buffer *p_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300598
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800599 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
600 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
601 p_buffer);
602 } else {
603 rx_buf_addr = p_pkt->rx_buf_addr;
604 cookie = p_pkt->cookie;
605
606 b_last = list_empty(&p_rx->active_descq);
607 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300608 }
609}
610
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800611static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
612{
613 u8 bd_flags = 0;
614
615 if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200616 SET_FIELD(bd_flags, CORE_TX_BD_DATA_VLAN_INSERTION, 1);
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800617
618 return bd_flags;
619}
620
621static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
622 struct qed_ll2_info *p_ll2_conn)
623{
624 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
625 u16 packet_length = 0, parse_flags = 0, vlan = 0;
626 struct qed_ll2_rx_packet *p_pkt = NULL;
627 u32 num_ooo_add_to_peninsula = 0, cid;
628 union core_rx_cqe_union *cqe = NULL;
629 u16 cq_new_idx = 0, cq_old_idx = 0;
630 struct qed_ooo_buffer *p_buffer;
631 struct ooo_opaque *iscsi_ooo;
632 u8 placement_offset = 0;
633 u8 cqe_type;
634
635 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
636 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
637 if (cq_new_idx == cq_old_idx)
638 return 0;
639
640 while (cq_new_idx != cq_old_idx) {
641 struct core_rx_fast_path_cqe *p_cqe_fp;
642
643 cqe = qed_chain_consume(&p_rx->rcq_chain);
644 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
645 cqe_type = cqe->rx_cqe_sp.type;
646
647 if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
648 DP_NOTICE(p_hwfn,
649 "Got a non-regular LB LL2 completion [type 0x%02x]\n",
650 cqe_type);
651 return -EINVAL;
652 }
653 p_cqe_fp = &cqe->rx_cqe_fp;
654
655 placement_offset = p_cqe_fp->placement_offset;
656 parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
657 packet_length = le16_to_cpu(p_cqe_fp->packet_length);
658 vlan = le16_to_cpu(p_cqe_fp->vlan);
659 iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
660 qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
661 iscsi_ooo);
662 cid = le32_to_cpu(iscsi_ooo->cid);
663
664 /* Process delete isle first */
665 if (iscsi_ooo->drop_size)
666 qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
667 iscsi_ooo->drop_isle,
668 iscsi_ooo->drop_size);
669
670 if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
671 continue;
672
673 /* Now process create/add/join isles */
674 if (list_empty(&p_rx->active_descq)) {
675 DP_NOTICE(p_hwfn,
676 "LL2 OOO RX chain has no submitted buffers\n"
677 );
678 return -EIO;
679 }
680
681 p_pkt = list_first_entry(&p_rx->active_descq,
682 struct qed_ll2_rx_packet, list_entry);
683
684 if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
685 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
686 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
687 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
688 (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
689 if (!p_pkt) {
690 DP_NOTICE(p_hwfn,
691 "LL2 OOO RX packet is not valid\n");
692 return -EIO;
693 }
694 list_del(&p_pkt->list_entry);
695 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
696 p_buffer->packet_length = packet_length;
697 p_buffer->parse_flags = parse_flags;
698 p_buffer->vlan = vlan;
699 p_buffer->placement_offset = placement_offset;
700 qed_chain_consume(&p_rx->rxq_chain);
701 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
702
703 switch (iscsi_ooo->ooo_opcode) {
704 case TCP_EVENT_ADD_NEW_ISLE:
705 qed_ooo_add_new_isle(p_hwfn,
706 p_hwfn->p_ooo_info,
707 cid,
708 iscsi_ooo->ooo_isle,
709 p_buffer);
710 break;
711 case TCP_EVENT_ADD_ISLE_RIGHT:
712 qed_ooo_add_new_buffer(p_hwfn,
713 p_hwfn->p_ooo_info,
714 cid,
715 iscsi_ooo->ooo_isle,
716 p_buffer,
717 QED_OOO_RIGHT_BUF);
718 break;
719 case TCP_EVENT_ADD_ISLE_LEFT:
720 qed_ooo_add_new_buffer(p_hwfn,
721 p_hwfn->p_ooo_info,
722 cid,
723 iscsi_ooo->ooo_isle,
724 p_buffer,
725 QED_OOO_LEFT_BUF);
726 break;
727 case TCP_EVENT_JOIN:
728 qed_ooo_add_new_buffer(p_hwfn,
729 p_hwfn->p_ooo_info,
730 cid,
731 iscsi_ooo->ooo_isle +
732 1,
733 p_buffer,
734 QED_OOO_LEFT_BUF);
735 qed_ooo_join_isles(p_hwfn,
736 p_hwfn->p_ooo_info,
737 cid, iscsi_ooo->ooo_isle);
738 break;
739 case TCP_EVENT_ADD_PEN:
740 num_ooo_add_to_peninsula++;
741 qed_ooo_put_ready_buffer(p_hwfn,
742 p_hwfn->p_ooo_info,
743 p_buffer, true);
744 break;
745 }
746 } else {
747 DP_NOTICE(p_hwfn,
748 "Unexpected event (%d) TX OOO completion\n",
749 iscsi_ooo->ooo_opcode);
750 }
751 }
752
753 return 0;
754}
755
756static void
757qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
758 struct qed_ll2_info *p_ll2_conn)
759{
Mintz, Yuval7c7973b2017-06-09 17:13:18 +0300760 struct qed_ll2_tx_pkt_info tx_pkt;
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800761 struct qed_ooo_buffer *p_buffer;
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800762 u16 l4_hdr_offset_w;
763 dma_addr_t first_frag;
764 u16 parse_flags;
765 u8 bd_flags;
Mintz, Yuval7c7973b2017-06-09 17:13:18 +0300766 int rc;
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800767
768 /* Submit Tx buffers here */
769 while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
770 p_hwfn->p_ooo_info))) {
771 l4_hdr_offset_w = 0;
772 bd_flags = 0;
773
774 first_frag = p_buffer->rx_buffer_phys_addr +
775 p_buffer->placement_offset;
776 parse_flags = p_buffer->parse_flags;
777 bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200778 SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
779 SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800780
Mintz, Yuval7c7973b2017-06-09 17:13:18 +0300781 memset(&tx_pkt, 0, sizeof(tx_pkt));
782 tx_pkt.num_of_bds = 1;
783 tx_pkt.vlan = p_buffer->vlan;
784 tx_pkt.bd_flags = bd_flags;
785 tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w;
Mintz, Yuval13c54772017-06-09 17:13:20 +0300786 tx_pkt.tx_dest = p_ll2_conn->tx_dest;
Mintz, Yuval7c7973b2017-06-09 17:13:18 +0300787 tx_pkt.first_frag = first_frag;
788 tx_pkt.first_frag_len = p_buffer->packet_length;
789 tx_pkt.cookie = p_buffer;
790
791 rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id,
792 &tx_pkt, true);
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800793 if (rc) {
794 qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
795 p_buffer, false);
796 break;
797 }
798 }
799}
800
801static void
802qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn,
803 struct qed_ll2_info *p_ll2_conn)
804{
805 struct qed_ooo_buffer *p_buffer;
806 int rc;
807
808 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
809 p_hwfn->p_ooo_info))) {
810 rc = qed_ll2_post_rx_buffer(p_hwfn,
811 p_ll2_conn->my_id,
812 p_buffer->rx_buffer_phys_addr,
813 0, p_buffer, true);
814 if (rc) {
815 qed_ooo_put_free_buffer(p_hwfn,
816 p_hwfn->p_ooo_info, p_buffer);
817 break;
818 }
819 }
820}
821
822static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
823{
824 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
825 int rc;
826
827 rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
828 if (rc)
829 return rc;
830
831 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
832 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
833
834 return 0;
835}
836
837static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
838{
839 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
840 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
841 struct qed_ll2_tx_packet *p_pkt = NULL;
842 struct qed_ooo_buffer *p_buffer;
843 bool b_dont_submit_rx = false;
844 u16 new_idx = 0, num_bds = 0;
845 int rc;
846
847 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
848 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
849
850 if (!num_bds)
851 return 0;
852
853 while (num_bds) {
854 if (list_empty(&p_tx->active_descq))
855 return -EINVAL;
856
857 p_pkt = list_first_entry(&p_tx->active_descq,
858 struct qed_ll2_tx_packet, list_entry);
859 if (!p_pkt)
860 return -EINVAL;
861
862 if (p_pkt->bd_used != 1) {
863 DP_NOTICE(p_hwfn,
864 "Unexpectedly many BDs(%d) in TX OOO completion\n",
865 p_pkt->bd_used);
866 return -EINVAL;
867 }
868
869 list_del(&p_pkt->list_entry);
870
871 num_bds--;
872 p_tx->bds_idx++;
873 qed_chain_consume(&p_tx->txq_chain);
874
875 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
876 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
877
878 if (b_dont_submit_rx) {
879 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
880 p_buffer);
881 continue;
882 }
883
884 rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
885 p_buffer->rx_buffer_phys_addr, 0,
886 p_buffer, true);
887 if (rc != 0) {
888 qed_ooo_put_free_buffer(p_hwfn,
889 p_hwfn->p_ooo_info, p_buffer);
890 b_dont_submit_rx = true;
891 }
892 }
893
894 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
895
896 return 0;
897}
898
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800899static void
900qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
901 struct qed_ll2_info *p_ll2_conn)
902{
Mintz, Yuval13c54772017-06-09 17:13:20 +0300903 if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800904 return;
905
906 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
907 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
908}
909
910static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
911 struct qed_ll2_info *p_ll2_conn)
912{
913 struct qed_ooo_buffer *p_buffer;
914
Mintz, Yuval13c54772017-06-09 17:13:20 +0300915 if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800916 return;
917
918 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
919 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
920 p_hwfn->p_ooo_info))) {
921 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
922 p_buffer->rx_buffer_size,
923 p_buffer->rx_buffer_virt_addr,
924 p_buffer->rx_buffer_phys_addr);
925 kfree(p_buffer);
926 }
927}
928
929static void qed_ll2_stop_ooo(struct qed_dev *cdev)
930{
931 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
932 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
933
934 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n",
935 *handle);
936
937 qed_ll2_terminate_connection(hwfn, *handle);
938 qed_ll2_release_connection(hwfn, *handle);
939 *handle = QED_LL2_UNUSED_HANDLE;
940}
941
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300942static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
943 struct qed_ll2_info *p_ll2_conn,
944 u8 action_on_error)
945{
Mintz, Yuval13c54772017-06-09 17:13:20 +0300946 enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300947 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
948 struct core_rx_start_ramrod_data *p_ramrod = NULL;
949 struct qed_spq_entry *p_ent = NULL;
950 struct qed_sp_init_data init_data;
951 u16 cqe_pbl_size;
952 int rc = 0;
953
954 /* Get SPQ entry */
955 memset(&init_data, 0, sizeof(init_data));
956 init_data.cid = p_ll2_conn->cid;
957 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
958 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
959
960 rc = qed_sp_init_request(p_hwfn, &p_ent,
961 CORE_RAMROD_RX_QUEUE_START,
962 PROTOCOLID_CORE, &init_data);
963 if (rc)
964 return rc;
965
966 p_ramrod = &p_ent->ramrod.core_rx_queue_start;
967
968 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
969 p_ramrod->sb_index = p_rx->rx_sb_index;
970 p_ramrod->complete_event_flg = 1;
971
Mintz, Yuval13c54772017-06-09 17:13:20 +0300972 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
973 DMA_REGPAIR_LE(p_ramrod->bd_base, p_rx->rxq_chain.p_phys_addr);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300974 cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
975 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
976 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
977 qed_chain_get_pbl_phys(&p_rx->rcq_chain));
978
Mintz, Yuval13c54772017-06-09 17:13:20 +0300979 p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
980 p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300981 p_ramrod->queue_id = p_ll2_conn->queue_id;
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800982 p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
983 : 1;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300984
985 if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
986 p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
987 p_ramrod->mf_si_bcast_accept_all = 1;
988 p_ramrod->mf_si_mcast_accept_all = 1;
989 } else {
990 p_ramrod->mf_si_bcast_accept_all = 0;
991 p_ramrod->mf_si_mcast_accept_all = 0;
992 }
993
994 p_ramrod->action_on_error.error_type = action_on_error;
Mintz, Yuval13c54772017-06-09 17:13:20 +0300995 p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300996 return qed_spq_post(p_hwfn, p_ent, NULL);
997}
998
999static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
1000 struct qed_ll2_info *p_ll2_conn)
1001{
Mintz, Yuval13c54772017-06-09 17:13:20 +03001002 enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001003 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1004 struct core_tx_start_ramrod_data *p_ramrod = NULL;
1005 struct qed_spq_entry *p_ent = NULL;
1006 struct qed_sp_init_data init_data;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001007 u16 pq_id = 0, pbl_size;
1008 int rc = -EINVAL;
1009
1010 if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
1011 return 0;
1012
Mintz, Yuval13c54772017-06-09 17:13:20 +03001013 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001014 p_ll2_conn->tx_stats_en = 0;
1015 else
1016 p_ll2_conn->tx_stats_en = 1;
1017
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001018 /* Get SPQ entry */
1019 memset(&init_data, 0, sizeof(init_data));
1020 init_data.cid = p_ll2_conn->cid;
1021 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1022 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1023
1024 rc = qed_sp_init_request(p_hwfn, &p_ent,
1025 CORE_RAMROD_TX_QUEUE_START,
1026 PROTOCOLID_CORE, &init_data);
1027 if (rc)
1028 return rc;
1029
1030 p_ramrod = &p_ent->ramrod.core_tx_queue_start;
1031
1032 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
1033 p_ramrod->sb_index = p_tx->tx_sb_index;
Mintz, Yuval13c54772017-06-09 17:13:20 +03001034 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001035 p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
1036 p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
1037
1038 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
1039 qed_chain_get_pbl_phys(&p_tx->txq_chain));
1040 pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
1041 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
1042
Mintz, Yuval13c54772017-06-09 17:13:20 +03001043 switch (p_ll2_conn->input.tx_tc) {
Ariel Eliorb5a9ee72017-04-03 12:21:09 +03001044 case LB_TC:
1045 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
1046 break;
1047 case OOO_LB_TC:
1048 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO);
Colin Ian King827d2402017-04-05 13:35:44 +01001049 break;
Ariel Eliorb5a9ee72017-04-03 12:21:09 +03001050 default:
1051 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
1052 break;
1053 }
1054
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001055 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
1056
1057 switch (conn_type) {
Arun Easi1e128c82017-02-15 06:28:22 -08001058 case QED_LL2_TYPE_FCOE:
1059 p_ramrod->conn_type = PROTOCOLID_FCOE;
1060 break;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001061 case QED_LL2_TYPE_ISCSI:
1062 case QED_LL2_TYPE_ISCSI_OOO:
1063 p_ramrod->conn_type = PROTOCOLID_ISCSI;
1064 break;
1065 case QED_LL2_TYPE_ROCE:
1066 p_ramrod->conn_type = PROTOCOLID_ROCE;
1067 break;
1068 default:
1069 p_ramrod->conn_type = PROTOCOLID_ETH;
1070 DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
1071 }
1072
Mintz, Yuval13c54772017-06-09 17:13:20 +03001073 p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
1074
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001075 return qed_spq_post(p_hwfn, p_ent, NULL);
1076}
1077
1078static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
1079 struct qed_ll2_info *p_ll2_conn)
1080{
1081 struct core_rx_stop_ramrod_data *p_ramrod = NULL;
1082 struct qed_spq_entry *p_ent = NULL;
1083 struct qed_sp_init_data init_data;
1084 int rc = -EINVAL;
1085
1086 /* Get SPQ entry */
1087 memset(&init_data, 0, sizeof(init_data));
1088 init_data.cid = p_ll2_conn->cid;
1089 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1090 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1091
1092 rc = qed_sp_init_request(p_hwfn, &p_ent,
1093 CORE_RAMROD_RX_QUEUE_STOP,
1094 PROTOCOLID_CORE, &init_data);
1095 if (rc)
1096 return rc;
1097
1098 p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
1099
1100 p_ramrod->complete_event_flg = 1;
1101 p_ramrod->queue_id = p_ll2_conn->queue_id;
1102
1103 return qed_spq_post(p_hwfn, p_ent, NULL);
1104}
1105
1106static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
1107 struct qed_ll2_info *p_ll2_conn)
1108{
1109 struct qed_spq_entry *p_ent = NULL;
1110 struct qed_sp_init_data init_data;
1111 int rc = -EINVAL;
1112
1113 /* Get SPQ entry */
1114 memset(&init_data, 0, sizeof(init_data));
1115 init_data.cid = p_ll2_conn->cid;
1116 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1117 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1118
1119 rc = qed_sp_init_request(p_hwfn, &p_ent,
1120 CORE_RAMROD_TX_QUEUE_STOP,
1121 PROTOCOLID_CORE, &init_data);
1122 if (rc)
1123 return rc;
1124
1125 return qed_spq_post(p_hwfn, p_ent, NULL);
1126}
1127
1128static int
1129qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001130 struct qed_ll2_info *p_ll2_info)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001131{
1132 struct qed_ll2_rx_packet *p_descq;
1133 u32 capacity;
1134 int rc = 0;
1135
Mintz, Yuval13c54772017-06-09 17:13:20 +03001136 if (!p_ll2_info->input.rx_num_desc)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001137 goto out;
1138
1139 rc = qed_chain_alloc(p_hwfn->cdev,
1140 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1141 QED_CHAIN_MODE_NEXT_PTR,
1142 QED_CHAIN_CNT_TYPE_U16,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001143 p_ll2_info->input.rx_num_desc,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001144 sizeof(struct core_rx_bd),
1145 &p_ll2_info->rx_queue.rxq_chain);
1146 if (rc) {
1147 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
1148 goto out;
1149 }
1150
1151 capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
1152 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
1153 GFP_KERNEL);
1154 if (!p_descq) {
1155 rc = -ENOMEM;
1156 DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
1157 goto out;
1158 }
1159 p_ll2_info->rx_queue.descq_array = p_descq;
1160
1161 rc = qed_chain_alloc(p_hwfn->cdev,
1162 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1163 QED_CHAIN_MODE_PBL,
1164 QED_CHAIN_CNT_TYPE_U16,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001165 p_ll2_info->input.rx_num_desc,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001166 sizeof(struct core_rx_fast_path_cqe),
1167 &p_ll2_info->rx_queue.rcq_chain);
1168 if (rc) {
1169 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
1170 goto out;
1171 }
1172
1173 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1174 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
Mintz, Yuval13c54772017-06-09 17:13:20 +03001175 p_ll2_info->input.conn_type, p_ll2_info->input.rx_num_desc);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001176
1177out:
1178 return rc;
1179}
1180
1181static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001182 struct qed_ll2_info *p_ll2_info)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001183{
1184 struct qed_ll2_tx_packet *p_descq;
1185 u32 capacity;
1186 int rc = 0;
1187
Mintz, Yuval13c54772017-06-09 17:13:20 +03001188 if (!p_ll2_info->input.tx_num_desc)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001189 goto out;
1190
1191 rc = qed_chain_alloc(p_hwfn->cdev,
1192 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1193 QED_CHAIN_MODE_PBL,
1194 QED_CHAIN_CNT_TYPE_U16,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001195 p_ll2_info->input.tx_num_desc,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001196 sizeof(struct core_tx_bd),
1197 &p_ll2_info->tx_queue.txq_chain);
1198 if (rc)
1199 goto out;
1200
1201 capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
1202 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet),
1203 GFP_KERNEL);
1204 if (!p_descq) {
1205 rc = -ENOMEM;
1206 goto out;
1207 }
1208 p_ll2_info->tx_queue.descq_array = p_descq;
1209
1210 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1211 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
Mintz, Yuval13c54772017-06-09 17:13:20 +03001212 p_ll2_info->input.conn_type, p_ll2_info->input.tx_num_desc);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001213
1214out:
1215 if (rc)
1216 DP_NOTICE(p_hwfn,
1217 "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
Mintz, Yuval13c54772017-06-09 17:13:20 +03001218 p_ll2_info->input.tx_num_desc);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001219 return rc;
1220}
1221
Mintz, Yuval13c54772017-06-09 17:13:20 +03001222static int
1223qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
1224 struct qed_ll2_info *p_ll2_info, u16 mtu)
1225{
1226 struct qed_ooo_buffer *p_buf = NULL;
1227 void *p_virt;
1228 u16 buf_idx;
1229 int rc = 0;
1230
1231 if (p_ll2_info->input.conn_type != QED_LL2_TYPE_ISCSI_OOO)
1232 return rc;
1233
1234 /* Correct number of requested OOO buffers if needed */
1235 if (!p_ll2_info->input.rx_num_ooo_buffers) {
1236 u16 num_desc = p_ll2_info->input.rx_num_desc;
1237
1238 if (!num_desc)
1239 return -EINVAL;
1240 p_ll2_info->input.rx_num_ooo_buffers = num_desc * 2;
1241 }
1242
1243 for (buf_idx = 0; buf_idx < p_ll2_info->input.rx_num_ooo_buffers;
1244 buf_idx++) {
1245 p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
1246 if (!p_buf) {
1247 rc = -ENOMEM;
1248 goto out;
1249 }
1250
1251 p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
1252 p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
1253 ETH_CACHE_LINE_SIZE - 1) &
1254 ~(ETH_CACHE_LINE_SIZE - 1);
1255 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1256 p_buf->rx_buffer_size,
1257 &p_buf->rx_buffer_phys_addr,
1258 GFP_KERNEL);
1259 if (!p_virt) {
1260 kfree(p_buf);
1261 rc = -ENOMEM;
1262 goto out;
1263 }
1264
1265 p_buf->rx_buffer_virt_addr = p_virt;
1266 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
1267 }
1268
1269 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1270 "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
1271 p_ll2_info->input.rx_num_ooo_buffers, p_buf->rx_buffer_size);
1272
1273out:
1274 return rc;
1275}
1276
1277static enum core_error_handle
1278qed_ll2_get_error_choice(enum qed_ll2_error_handle err)
1279{
1280 switch (err) {
1281 case QED_LL2_DROP_PACKET:
1282 return LL2_DROP_PACKET;
1283 case QED_LL2_DO_NOTHING:
1284 return LL2_DO_NOTHING;
1285 case QED_LL2_ASSERT:
1286 return LL2_ASSERT;
1287 default:
1288 return LL2_DO_NOTHING;
1289 }
1290}
1291
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001292int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001293 struct qed_ll2_acquire_data *data)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001294{
1295 qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
1296 struct qed_ll2_info *p_ll2_info = NULL;
Mintz, Yuval13c54772017-06-09 17:13:20 +03001297 u8 i, *p_tx_max;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001298 int rc;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001299
Mintz, Yuval13c54772017-06-09 17:13:20 +03001300 if (!data->p_connection_handle || !p_hwfn->p_ll2_info)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001301 return -EINVAL;
1302
1303 /* Find a free connection to be used */
1304 for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
1305 mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
1306 if (p_hwfn->p_ll2_info[i].b_active) {
1307 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1308 continue;
1309 }
1310
1311 p_hwfn->p_ll2_info[i].b_active = true;
1312 p_ll2_info = &p_hwfn->p_ll2_info[i];
1313 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1314 break;
1315 }
1316 if (!p_ll2_info)
1317 return -EBUSY;
1318
Mintz, Yuval13c54772017-06-09 17:13:20 +03001319 memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001320
Mintz, Yuval13c54772017-06-09 17:13:20 +03001321 p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ?
1322 CORE_TX_DEST_NW : CORE_TX_DEST_LB;
1323
1324 /* Correct maximum number of Tx BDs */
1325 p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet;
1326 if (*p_tx_max == 0)
1327 *p_tx_max = CORE_LL2_TX_MAX_BDS_PER_PACKET;
1328 else
1329 *p_tx_max = min_t(u8, *p_tx_max,
1330 CORE_LL2_TX_MAX_BDS_PER_PACKET);
1331 rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001332 if (rc)
1333 goto q_allocate_fail;
1334
Mintz, Yuval13c54772017-06-09 17:13:20 +03001335 rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001336 if (rc)
1337 goto q_allocate_fail;
1338
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001339 rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001340 data->input.mtu);
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001341 if (rc)
1342 goto q_allocate_fail;
1343
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001344 /* Register callbacks for the Rx/Tx queues */
Mintz, Yuval13c54772017-06-09 17:13:20 +03001345 if (data->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001346 comp_rx_cb = qed_ll2_lb_rxq_completion;
1347 comp_tx_cb = qed_ll2_lb_txq_completion;
1348 } else {
1349 comp_rx_cb = qed_ll2_rxq_completion;
1350 comp_tx_cb = qed_ll2_txq_completion;
1351 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001352
Mintz, Yuval13c54772017-06-09 17:13:20 +03001353 if (data->input.rx_num_desc) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001354 qed_int_register_cb(p_hwfn, comp_rx_cb,
1355 &p_hwfn->p_ll2_info[i],
1356 &p_ll2_info->rx_queue.rx_sb_index,
1357 &p_ll2_info->rx_queue.p_fw_cons);
1358 p_ll2_info->rx_queue.b_cb_registred = true;
1359 }
1360
Mintz, Yuval13c54772017-06-09 17:13:20 +03001361 if (data->input.tx_num_desc) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001362 qed_int_register_cb(p_hwfn,
1363 comp_tx_cb,
1364 &p_hwfn->p_ll2_info[i],
1365 &p_ll2_info->tx_queue.tx_sb_index,
1366 &p_ll2_info->tx_queue.p_fw_cons);
1367 p_ll2_info->tx_queue.b_cb_registred = true;
1368 }
1369
Mintz, Yuval13c54772017-06-09 17:13:20 +03001370 *data->p_connection_handle = i;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001371 return rc;
1372
1373q_allocate_fail:
1374 qed_ll2_release_connection(p_hwfn, i);
1375 return -ENOMEM;
1376}
1377
1378static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
1379 struct qed_ll2_info *p_ll2_conn)
1380{
Mintz, Yuval13c54772017-06-09 17:13:20 +03001381 enum qed_ll2_error_handle error_input;
1382 enum core_error_handle error_mode;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001383 u8 action_on_error = 0;
1384
1385 if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
1386 return 0;
1387
1388 DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
Mintz, Yuval13c54772017-06-09 17:13:20 +03001389 error_input = p_ll2_conn->input.ai_err_packet_too_big;
1390 error_mode = qed_ll2_get_error_choice(error_input);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001391 SET_FIELD(action_on_error,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001392 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, error_mode);
1393 error_input = p_ll2_conn->input.ai_err_no_buf;
1394 error_mode = qed_ll2_get_error_choice(error_input);
1395 SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001396
1397 return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
1398}
1399
1400int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1401{
1402 struct qed_ll2_info *p_ll2_conn;
1403 struct qed_ll2_rx_queue *p_rx;
1404 struct qed_ll2_tx_queue *p_tx;
Rahul Verma15582962017-04-06 15:58:29 +03001405 struct qed_ptt *p_ptt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001406 int rc = -EINVAL;
1407 u32 i, capacity;
1408 u8 qid;
1409
Rahul Verma15582962017-04-06 15:58:29 +03001410 p_ptt = qed_ptt_acquire(p_hwfn);
1411 if (!p_ptt)
1412 return -EAGAIN;
1413
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001414 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
Rahul Verma15582962017-04-06 15:58:29 +03001415 if (!p_ll2_conn) {
1416 rc = -EINVAL;
1417 goto out;
1418 }
1419
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001420 p_rx = &p_ll2_conn->rx_queue;
1421 p_tx = &p_ll2_conn->tx_queue;
1422
1423 qed_chain_reset(&p_rx->rxq_chain);
1424 qed_chain_reset(&p_rx->rcq_chain);
1425 INIT_LIST_HEAD(&p_rx->active_descq);
1426 INIT_LIST_HEAD(&p_rx->free_descq);
1427 INIT_LIST_HEAD(&p_rx->posting_descq);
1428 spin_lock_init(&p_rx->lock);
1429 capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
1430 for (i = 0; i < capacity; i++)
1431 list_add_tail(&p_rx->descq_array[i].list_entry,
1432 &p_rx->free_descq);
1433 *p_rx->p_fw_cons = 0;
1434
1435 qed_chain_reset(&p_tx->txq_chain);
1436 INIT_LIST_HEAD(&p_tx->active_descq);
1437 INIT_LIST_HEAD(&p_tx->free_descq);
1438 INIT_LIST_HEAD(&p_tx->sending_descq);
1439 spin_lock_init(&p_tx->lock);
1440 capacity = qed_chain_get_capacity(&p_tx->txq_chain);
1441 for (i = 0; i < capacity; i++)
1442 list_add_tail(&p_tx->descq_array[i].list_entry,
1443 &p_tx->free_descq);
1444 p_tx->cur_completing_bd_idx = 0;
1445 p_tx->bds_idx = 0;
1446 p_tx->b_completing_packet = false;
1447 p_tx->cur_send_packet = NULL;
1448 p_tx->cur_send_frag_num = 0;
1449 p_tx->cur_completing_frag_num = 0;
1450 *p_tx->p_fw_cons = 0;
1451
Rahul Verma15582962017-04-06 15:58:29 +03001452 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
1453 if (rc)
1454 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001455
1456 qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
1457 p_ll2_conn->queue_id = qid;
1458 p_ll2_conn->tx_stats_id = qid;
1459 p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
1460 GTT_BAR0_MAP_REG_TSDM_RAM +
1461 TSTORM_LL2_RX_PRODS_OFFSET(qid);
1462 p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
1463 qed_db_addr(p_ll2_conn->cid,
1464 DQ_DEMS_LEGACY);
1465
1466 rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
1467 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001468 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001469
1470 rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
1471 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001472 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001473
1474 if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
Rahul Verma15582962017-04-06 15:58:29 +03001475 qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001476
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001477 qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
1478
Mintz, Yuval13c54772017-06-09 17:13:20 +03001479 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
Rahul Verma15582962017-04-06 15:58:29 +03001480 qed_llh_add_protocol_filter(p_hwfn, p_ptt,
Arun Easi1e128c82017-02-15 06:28:22 -08001481 0x8906, 0,
1482 QED_LLH_FILTER_ETHERTYPE);
Rahul Verma15582962017-04-06 15:58:29 +03001483 qed_llh_add_protocol_filter(p_hwfn, p_ptt,
Arun Easi1e128c82017-02-15 06:28:22 -08001484 0x8914, 0,
1485 QED_LLH_FILTER_ETHERTYPE);
1486 }
1487
Rahul Verma15582962017-04-06 15:58:29 +03001488out:
1489 qed_ptt_release(p_hwfn, p_ptt);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001490 return rc;
1491}
1492
1493static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
1494 struct qed_ll2_rx_queue *p_rx,
1495 struct qed_ll2_rx_packet *p_curp)
1496{
1497 struct qed_ll2_rx_packet *p_posting_packet = NULL;
1498 struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
1499 bool b_notify_fw = false;
1500 u16 bd_prod, cq_prod;
1501
1502 /* This handles the flushing of already posted buffers */
1503 while (!list_empty(&p_rx->posting_descq)) {
1504 p_posting_packet = list_first_entry(&p_rx->posting_descq,
1505 struct qed_ll2_rx_packet,
1506 list_entry);
Wei Yongjunb4f0fd42016-10-17 15:17:51 +00001507 list_move_tail(&p_posting_packet->list_entry,
1508 &p_rx->active_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001509 b_notify_fw = true;
1510 }
1511
1512 /* This handles the supplied packet [if there is one] */
1513 if (p_curp) {
1514 list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
1515 b_notify_fw = true;
1516 }
1517
1518 if (!b_notify_fw)
1519 return;
1520
1521 bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
1522 cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1523 rx_prod.bd_prod = cpu_to_le16(bd_prod);
1524 rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1525 DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1526}
1527
1528int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
1529 u8 connection_handle,
1530 dma_addr_t addr,
1531 u16 buf_len, void *cookie, u8 notify_fw)
1532{
1533 struct core_rx_bd_with_buff_len *p_curb = NULL;
1534 struct qed_ll2_rx_packet *p_curp = NULL;
1535 struct qed_ll2_info *p_ll2_conn;
1536 struct qed_ll2_rx_queue *p_rx;
1537 unsigned long flags;
1538 void *p_data;
1539 int rc = 0;
1540
1541 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1542 if (!p_ll2_conn)
1543 return -EINVAL;
1544 p_rx = &p_ll2_conn->rx_queue;
1545
1546 spin_lock_irqsave(&p_rx->lock, flags);
1547 if (!list_empty(&p_rx->free_descq))
1548 p_curp = list_first_entry(&p_rx->free_descq,
1549 struct qed_ll2_rx_packet, list_entry);
1550 if (p_curp) {
1551 if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
1552 qed_chain_get_elem_left(&p_rx->rcq_chain)) {
1553 p_data = qed_chain_produce(&p_rx->rxq_chain);
1554 p_curb = (struct core_rx_bd_with_buff_len *)p_data;
1555 qed_chain_produce(&p_rx->rcq_chain);
1556 }
1557 }
1558
1559 /* If we're lacking entires, let's try to flush buffers to FW */
1560 if (!p_curp || !p_curb) {
1561 rc = -EBUSY;
1562 p_curp = NULL;
1563 goto out_notify;
1564 }
1565
1566 /* We have an Rx packet we can fill */
1567 DMA_REGPAIR_LE(p_curb->addr, addr);
1568 p_curb->buff_length = cpu_to_le16(buf_len);
1569 p_curp->rx_buf_addr = addr;
1570 p_curp->cookie = cookie;
1571 p_curp->rxq_bd = p_curb;
1572 p_curp->buf_length = buf_len;
1573 list_del(&p_curp->list_entry);
1574
1575 /* Check if we only want to enqueue this packet without informing FW */
1576 if (!notify_fw) {
1577 list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
1578 goto out;
1579 }
1580
1581out_notify:
1582 qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
1583out:
1584 spin_unlock_irqrestore(&p_rx->lock, flags);
1585 return rc;
1586}
1587
1588static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
1589 struct qed_ll2_tx_queue *p_tx,
1590 struct qed_ll2_tx_packet *p_curp,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001591 struct qed_ll2_tx_pkt_info *pkt,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001592 u8 notify_fw)
1593{
1594 list_del(&p_curp->list_entry);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001595 p_curp->cookie = pkt->cookie;
1596 p_curp->bd_used = pkt->num_of_bds;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001597 p_curp->notify_fw = notify_fw;
1598 p_tx->cur_send_packet = p_curp;
1599 p_tx->cur_send_frag_num = 0;
1600
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001601 p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = pkt->first_frag;
1602 p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = pkt->first_frag_len;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001603 p_tx->cur_send_frag_num++;
1604}
1605
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001606static void
1607qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1608 struct qed_ll2_info *p_ll2,
1609 struct qed_ll2_tx_packet *p_curp,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001610 struct qed_ll2_tx_pkt_info *pkt)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001611{
1612 struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
1613 u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
1614 struct core_tx_bd *start_bd = NULL;
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001615 enum core_roce_flavor_type roce_flavor;
1616 enum core_tx_dest tx_dest;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001617 u16 bd_data = 0, frag_idx;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001618
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001619 roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE
1620 : CORE_RROCE;
1621
1622 tx_dest = (pkt->tx_dest == QED_LL2_TX_DEST_NW) ? CORE_TX_DEST_NW
1623 : CORE_TX_DEST_LB;
1624
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001625 start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001626 start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001627 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001628 cpu_to_le16(pkt->l4_hdr_offset_w));
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001629 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001630 bd_data |= pkt->bd_flags;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001631 SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001632 SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001633 SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
1634 start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001635 DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag);
1636 start_bd->nbytes = cpu_to_le16(pkt->first_frag_len);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001637
1638 DP_VERBOSE(p_hwfn,
1639 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1640 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1641 p_ll2->queue_id,
1642 p_ll2->cid,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001643 p_ll2->input.conn_type,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001644 prod_idx,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001645 pkt->first_frag_len,
1646 pkt->num_of_bds,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001647 le32_to_cpu(start_bd->addr.hi),
1648 le32_to_cpu(start_bd->addr.lo));
1649
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001650 if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001651 return;
1652
1653 /* Need to provide the packet with additional BDs for frags */
1654 for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001655 frag_idx < pkt->num_of_bds; frag_idx++) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001656 struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
1657
1658 *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001659 (*p_bd)->bd_data.as_bitfield = 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001660 (*p_bd)->bitfield1 = 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001661 p_curp->bds_set[frag_idx].tx_frag = 0;
1662 p_curp->bds_set[frag_idx].frag_len = 0;
1663 }
1664}
1665
1666/* This should be called while the Txq spinlock is being held */
1667static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
1668 struct qed_ll2_info *p_ll2_conn)
1669{
1670 bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
1671 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1672 struct qed_ll2_tx_packet *p_pkt = NULL;
1673 struct core_db_data db_msg = { 0, 0, 0 };
1674 u16 bd_prod;
1675
1676 /* If there are missing BDs, don't do anything now */
1677 if (p_ll2_conn->tx_queue.cur_send_frag_num !=
1678 p_ll2_conn->tx_queue.cur_send_packet->bd_used)
1679 return;
1680
1681 /* Push the current packet to the list and clean after it */
1682 list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
1683 &p_ll2_conn->tx_queue.sending_descq);
1684 p_ll2_conn->tx_queue.cur_send_packet = NULL;
1685 p_ll2_conn->tx_queue.cur_send_frag_num = 0;
1686
1687 /* Notify FW of packet only if requested to */
1688 if (!b_notify)
1689 return;
1690
1691 bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
1692
1693 while (!list_empty(&p_tx->sending_descq)) {
1694 p_pkt = list_first_entry(&p_tx->sending_descq,
1695 struct qed_ll2_tx_packet, list_entry);
1696 if (!p_pkt)
1697 break;
1698
Wei Yongjunb4f0fd42016-10-17 15:17:51 +00001699 list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001700 }
1701
1702 SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
1703 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1704 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
1705 DQ_XCM_CORE_TX_BD_PROD_CMD);
1706 db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
1707 db_msg.spq_prod = cpu_to_le16(bd_prod);
1708
1709 /* Make sure the BDs data is updated before ringing the doorbell */
1710 wmb();
1711
1712 DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
1713
1714 DP_VERBOSE(p_hwfn,
1715 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1716 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1717 p_ll2_conn->queue_id,
Mintz, Yuval13c54772017-06-09 17:13:20 +03001718 p_ll2_conn->cid,
1719 p_ll2_conn->input.conn_type, db_msg.spq_prod);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001720}
1721
1722int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
1723 u8 connection_handle,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001724 struct qed_ll2_tx_pkt_info *pkt,
1725 bool notify_fw)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001726{
1727 struct qed_ll2_tx_packet *p_curp = NULL;
1728 struct qed_ll2_info *p_ll2_conn = NULL;
1729 struct qed_ll2_tx_queue *p_tx;
1730 struct qed_chain *p_tx_chain;
1731 unsigned long flags;
1732 int rc = 0;
1733
1734 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1735 if (!p_ll2_conn)
1736 return -EINVAL;
1737 p_tx = &p_ll2_conn->tx_queue;
1738 p_tx_chain = &p_tx->txq_chain;
1739
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001740 if (pkt->num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001741 return -EIO;
1742
1743 spin_lock_irqsave(&p_tx->lock, flags);
1744 if (p_tx->cur_send_packet) {
1745 rc = -EEXIST;
1746 goto out;
1747 }
1748
1749 /* Get entry, but only if we have tx elements for it */
1750 if (!list_empty(&p_tx->free_descq))
1751 p_curp = list_first_entry(&p_tx->free_descq,
1752 struct qed_ll2_tx_packet, list_entry);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001753 if (p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001754 p_curp = NULL;
1755
1756 if (!p_curp) {
1757 rc = -EBUSY;
1758 goto out;
1759 }
1760
1761 /* Prepare packet and BD, and perhaps send a doorbell to FW */
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001762 qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, pkt, notify_fw);
1763
1764 qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, pkt);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001765
1766 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1767
1768out:
1769 spin_unlock_irqrestore(&p_tx->lock, flags);
1770 return rc;
1771}
1772
1773int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
1774 u8 connection_handle,
1775 dma_addr_t addr, u16 nbytes)
1776{
1777 struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
1778 struct qed_ll2_info *p_ll2_conn = NULL;
1779 u16 cur_send_frag_num = 0;
1780 struct core_tx_bd *p_bd;
1781 unsigned long flags;
1782
1783 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1784 if (!p_ll2_conn)
1785 return -EINVAL;
1786
1787 if (!p_ll2_conn->tx_queue.cur_send_packet)
1788 return -EINVAL;
1789
1790 p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
1791 cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
1792
1793 if (cur_send_frag_num >= p_cur_send_packet->bd_used)
1794 return -EINVAL;
1795
1796 /* Fill the BD information, and possibly notify FW */
1797 p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
1798 DMA_REGPAIR_LE(p_bd->addr, addr);
1799 p_bd->nbytes = cpu_to_le16(nbytes);
1800 p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
1801 p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
1802
1803 p_ll2_conn->tx_queue.cur_send_frag_num++;
1804
1805 spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
1806 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1807 spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
1808
1809 return 0;
1810}
1811
1812int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1813{
1814 struct qed_ll2_info *p_ll2_conn = NULL;
1815 int rc = -EINVAL;
Rahul Verma15582962017-04-06 15:58:29 +03001816 struct qed_ptt *p_ptt;
1817
1818 p_ptt = qed_ptt_acquire(p_hwfn);
1819 if (!p_ptt)
1820 return -EAGAIN;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001821
1822 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
Rahul Verma15582962017-04-06 15:58:29 +03001823 if (!p_ll2_conn) {
1824 rc = -EINVAL;
1825 goto out;
1826 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001827
1828 /* Stop Tx & Rx of connection, if needed */
1829 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1830 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
1831 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001832 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001833 qed_ll2_txq_flush(p_hwfn, connection_handle);
1834 }
1835
1836 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1837 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
1838 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001839 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001840 qed_ll2_rxq_flush(p_hwfn, connection_handle);
1841 }
1842
Mintz, Yuval13c54772017-06-09 17:13:20 +03001843 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001844 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1845
Mintz, Yuval13c54772017-06-09 17:13:20 +03001846 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
Rahul Verma15582962017-04-06 15:58:29 +03001847 qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
Arun Easi1e128c82017-02-15 06:28:22 -08001848 0x8906, 0,
1849 QED_LLH_FILTER_ETHERTYPE);
Rahul Verma15582962017-04-06 15:58:29 +03001850 qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
Arun Easi1e128c82017-02-15 06:28:22 -08001851 0x8914, 0,
1852 QED_LLH_FILTER_ETHERTYPE);
1853 }
1854
Rahul Verma15582962017-04-06 15:58:29 +03001855out:
1856 qed_ptt_release(p_hwfn, p_ptt);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001857 return rc;
1858}
1859
1860void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1861{
1862 struct qed_ll2_info *p_ll2_conn = NULL;
1863
1864 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1865 if (!p_ll2_conn)
1866 return;
1867
1868 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1869 p_ll2_conn->rx_queue.b_cb_registred = false;
1870 qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
1871 }
1872
1873 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1874 p_ll2_conn->tx_queue.b_cb_registred = false;
1875 qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
1876 }
1877
1878 kfree(p_ll2_conn->tx_queue.descq_array);
1879 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
1880
1881 kfree(p_ll2_conn->rx_queue.descq_array);
1882 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
1883 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
1884
1885 qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
1886
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001887 qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
1888
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001889 mutex_lock(&p_ll2_conn->mutex);
1890 p_ll2_conn->b_active = false;
1891 mutex_unlock(&p_ll2_conn->mutex);
1892}
1893
Tomer Tayar3587cb82017-05-21 12:10:56 +03001894int qed_ll2_alloc(struct qed_hwfn *p_hwfn)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001895{
1896 struct qed_ll2_info *p_ll2_connections;
1897 u8 i;
1898
1899 /* Allocate LL2's set struct */
1900 p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
1901 sizeof(struct qed_ll2_info), GFP_KERNEL);
1902 if (!p_ll2_connections) {
1903 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
Tomer Tayar3587cb82017-05-21 12:10:56 +03001904 return -ENOMEM;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001905 }
1906
1907 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1908 p_ll2_connections[i].my_id = i;
1909
Tomer Tayar3587cb82017-05-21 12:10:56 +03001910 p_hwfn->p_ll2_info = p_ll2_connections;
1911 return 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001912}
1913
Tomer Tayar3587cb82017-05-21 12:10:56 +03001914void qed_ll2_setup(struct qed_hwfn *p_hwfn)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001915{
1916 int i;
1917
1918 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
Tomer Tayar3587cb82017-05-21 12:10:56 +03001919 mutex_init(&p_hwfn->p_ll2_info[i].mutex);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001920}
1921
Tomer Tayar3587cb82017-05-21 12:10:56 +03001922void qed_ll2_free(struct qed_hwfn *p_hwfn)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001923{
Tomer Tayar3587cb82017-05-21 12:10:56 +03001924 if (!p_hwfn->p_ll2_info)
1925 return;
1926
1927 kfree(p_hwfn->p_ll2_info);
1928 p_hwfn->p_ll2_info = NULL;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001929}
1930
1931static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
1932 struct qed_ptt *p_ptt,
1933 struct qed_ll2_info *p_ll2_conn,
1934 struct qed_ll2_stats *p_stats)
1935{
1936 struct core_ll2_tstorm_per_queue_stat tstats;
1937 u8 qid = p_ll2_conn->queue_id;
1938 u32 tstats_addr;
1939
1940 memset(&tstats, 0, sizeof(tstats));
1941 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1942 CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
1943 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
1944
1945 p_stats->packet_too_big_discard =
1946 HILO_64_REGPAIR(tstats.packet_too_big_discard);
1947 p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
1948}
1949
1950static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
1951 struct qed_ptt *p_ptt,
1952 struct qed_ll2_info *p_ll2_conn,
1953 struct qed_ll2_stats *p_stats)
1954{
1955 struct core_ll2_ustorm_per_queue_stat ustats;
1956 u8 qid = p_ll2_conn->queue_id;
1957 u32 ustats_addr;
1958
1959 memset(&ustats, 0, sizeof(ustats));
1960 ustats_addr = BAR0_MAP_REG_USDM_RAM +
1961 CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
1962 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
1963
1964 p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1965 p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1966 p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1967 p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1968 p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1969 p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1970}
1971
1972static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
1973 struct qed_ptt *p_ptt,
1974 struct qed_ll2_info *p_ll2_conn,
1975 struct qed_ll2_stats *p_stats)
1976{
1977 struct core_ll2_pstorm_per_queue_stat pstats;
1978 u8 stats_id = p_ll2_conn->tx_stats_id;
1979 u32 pstats_addr;
1980
1981 memset(&pstats, 0, sizeof(pstats));
1982 pstats_addr = BAR0_MAP_REG_PSDM_RAM +
1983 CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
1984 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
1985
1986 p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1987 p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1988 p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1989 p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1990 p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1991 p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1992}
1993
1994int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
1995 u8 connection_handle, struct qed_ll2_stats *p_stats)
1996{
1997 struct qed_ll2_info *p_ll2_conn = NULL;
1998 struct qed_ptt *p_ptt;
1999
2000 memset(p_stats, 0, sizeof(*p_stats));
2001
2002 if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
2003 !p_hwfn->p_ll2_info)
2004 return -EINVAL;
2005
2006 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
2007
2008 p_ptt = qed_ptt_acquire(p_hwfn);
2009 if (!p_ptt) {
2010 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
2011 return -EINVAL;
2012 }
2013
2014 _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2015 _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2016 if (p_ll2_conn->tx_stats_en)
2017 _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2018
2019 qed_ptt_release(p_hwfn, p_ptt);
2020 return 0;
2021}
2022
2023static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
2024 const struct qed_ll2_cb_ops *ops,
2025 void *cookie)
2026{
2027 cdev->ll2->cbs = ops;
2028 cdev->ll2->cb_cookie = cookie;
2029}
2030
Mintz, Yuval13c54772017-06-09 17:13:20 +03002031static void qed_ll2_set_conn_data(struct qed_dev *cdev,
2032 struct qed_ll2_acquire_data *data,
2033 struct qed_ll2_params *params,
2034 enum qed_ll2_conn_type conn_type,
2035 u8 *handle, bool lb, u8 gsi_enable)
2036{
2037 memset(data, 0, sizeof(*data));
2038
2039 data->input.conn_type = conn_type;
2040 data->input.mtu = params->mtu;
2041 data->input.rx_num_desc = QED_LL2_RX_SIZE;
2042 data->input.rx_drop_ttl0_flg = params->drop_ttl0_packets;
2043 data->input.rx_vlan_removal_en = params->rx_vlan_stripping;
2044 data->input.tx_num_desc = QED_LL2_TX_SIZE;
2045 data->input.gsi_enable = gsi_enable;
2046 data->p_connection_handle = handle;
2047 if (lb) {
2048 data->input.tx_tc = OOO_LB_TC;
2049 data->input.tx_dest = QED_LL2_TX_DEST_LB;
2050 } else {
2051 data->input.tx_tc = 0;
2052 data->input.tx_dest = QED_LL2_TX_DEST_NW;
2053 }
2054}
2055
2056static int qed_ll2_start_ooo(struct qed_dev *cdev,
2057 struct qed_ll2_params *params)
2058{
2059 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2060 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
2061 struct qed_ll2_acquire_data data;
2062 int rc;
2063
2064 qed_ll2_set_conn_data(cdev, &data, params,
2065 QED_LL2_TYPE_ISCSI_OOO, handle, true, 0);
2066
2067 rc = qed_ll2_acquire_connection(hwfn, &data);
2068 if (rc) {
2069 DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
2070 goto out;
2071 }
2072
2073 rc = qed_ll2_establish_connection(hwfn, *handle);
2074 if (rc) {
2075 DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
2076 goto fail;
2077 }
2078
2079 return 0;
2080
2081fail:
2082 qed_ll2_release_connection(hwfn, *handle);
2083out:
2084 *handle = QED_LL2_UNUSED_HANDLE;
2085 return rc;
2086}
2087
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002088static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
2089{
Wei Yongjun88a24282016-10-10 14:08:28 +00002090 struct qed_ll2_buffer *buffer, *tmp_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002091 enum qed_ll2_conn_type conn_type;
Mintz, Yuval13c54772017-06-09 17:13:20 +03002092 struct qed_ll2_acquire_data data;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002093 struct qed_ptt *p_ptt;
2094 int rc, i;
Yuval Mintzfc831822016-12-01 00:21:06 -08002095 u8 gsi_enable = 1;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002096
2097 /* Initialize LL2 locks & lists */
2098 INIT_LIST_HEAD(&cdev->ll2->list);
2099 spin_lock_init(&cdev->ll2->lock);
2100 cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
2101 L1_CACHE_BYTES + params->mtu;
2102 cdev->ll2->frags_mapped = params->frags_mapped;
2103
2104 /*Allocate memory for LL2 */
2105 DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
2106 cdev->ll2->rx_size);
2107 for (i = 0; i < QED_LL2_RX_SIZE; i++) {
2108 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2109 if (!buffer) {
2110 DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
2111 goto fail;
2112 }
2113
2114 rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
2115 &buffer->phys_addr);
2116 if (rc) {
2117 kfree(buffer);
2118 goto fail;
2119 }
2120
2121 list_add_tail(&buffer->list, &cdev->ll2->list);
2122 }
2123
2124 switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
Arun Easi1e128c82017-02-15 06:28:22 -08002125 case QED_PCI_FCOE:
2126 conn_type = QED_LL2_TYPE_FCOE;
2127 gsi_enable = 0;
2128 break;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002129 case QED_PCI_ISCSI:
2130 conn_type = QED_LL2_TYPE_ISCSI;
Yuval Mintzfc831822016-12-01 00:21:06 -08002131 gsi_enable = 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002132 break;
2133 case QED_PCI_ETH_ROCE:
2134 conn_type = QED_LL2_TYPE_ROCE;
2135 break;
2136 default:
2137 conn_type = QED_LL2_TYPE_TEST;
2138 }
2139
Mintz, Yuval13c54772017-06-09 17:13:20 +03002140 qed_ll2_set_conn_data(cdev, &data, params, conn_type,
2141 &cdev->ll2->handle, false, gsi_enable);
Arnd Bergmann0629a332017-01-18 15:52:52 +01002142
Mintz, Yuval13c54772017-06-09 17:13:20 +03002143 rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &data);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002144 if (rc) {
2145 DP_INFO(cdev, "Failed to acquire LL2 connection\n");
2146 goto fail;
2147 }
2148
2149 rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
2150 cdev->ll2->handle);
2151 if (rc) {
2152 DP_INFO(cdev, "Failed to establish LL2 connection\n");
2153 goto release_fail;
2154 }
2155
2156 /* Post all Rx buffers to FW */
2157 spin_lock_bh(&cdev->ll2->lock);
Wei Yongjun88a24282016-10-10 14:08:28 +00002158 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002159 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
2160 cdev->ll2->handle,
2161 buffer->phys_addr, 0, buffer, 1);
2162 if (rc) {
2163 DP_INFO(cdev,
2164 "Failed to post an Rx buffer; Deleting it\n");
2165 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
2166 cdev->ll2->rx_size, DMA_FROM_DEVICE);
2167 kfree(buffer->data);
2168 list_del(&buffer->list);
2169 kfree(buffer);
2170 } else {
2171 cdev->ll2->rx_cnt++;
2172 }
2173 }
2174 spin_unlock_bh(&cdev->ll2->lock);
2175
2176 if (!cdev->ll2->rx_cnt) {
2177 DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
2178 goto release_terminate;
2179 }
2180
2181 if (!is_valid_ether_addr(params->ll2_mac_address)) {
2182 DP_INFO(cdev, "Invalid Ethernet address\n");
2183 goto release_terminate;
2184 }
2185
Yuval Mintz1d6cff42016-12-01 00:21:07 -08002186 if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
2187 cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
2188 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
2189 rc = qed_ll2_start_ooo(cdev, params);
2190 if (rc) {
2191 DP_INFO(cdev,
2192 "Failed to initialize the OOO LL2 queue\n");
2193 goto release_terminate;
2194 }
2195 }
2196
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002197 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2198 if (!p_ptt) {
2199 DP_INFO(cdev, "Failed to acquire PTT\n");
2200 goto release_terminate;
2201 }
2202
2203 rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2204 params->ll2_mac_address);
2205 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2206 if (rc) {
2207 DP_ERR(cdev, "Failed to allocate LLH filter\n");
2208 goto release_terminate_all;
2209 }
2210
2211 ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002212 return 0;
2213
2214release_terminate_all:
2215
2216release_terminate:
2217 qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2218release_fail:
2219 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2220fail:
2221 qed_ll2_kill_buffers(cdev);
2222 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2223 return -EINVAL;
2224}
2225
2226static int qed_ll2_stop(struct qed_dev *cdev)
2227{
2228 struct qed_ptt *p_ptt;
2229 int rc;
2230
2231 if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
2232 return 0;
2233
2234 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2235 if (!p_ptt) {
2236 DP_INFO(cdev, "Failed to acquire PTT\n");
2237 goto fail;
2238 }
2239
2240 qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2241 cdev->ll2_mac_address);
2242 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2243 eth_zero_addr(cdev->ll2_mac_address);
2244
Yuval Mintz1d6cff42016-12-01 00:21:07 -08002245 if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
2246 cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
2247 qed_ll2_stop_ooo(cdev);
2248
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002249 rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
2250 cdev->ll2->handle);
2251 if (rc)
2252 DP_INFO(cdev, "Failed to terminate LL2 connection\n");
2253
2254 qed_ll2_kill_buffers(cdev);
2255
2256 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2257 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2258
2259 return rc;
2260fail:
2261 return -EINVAL;
2262}
2263
2264static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
2265{
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03002266 struct qed_ll2_tx_pkt_info pkt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002267 const skb_frag_t *frag;
2268 int rc = -EINVAL, i;
2269 dma_addr_t mapping;
2270 u16 vlan = 0;
2271 u8 flags = 0;
2272
2273 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
2274 DP_INFO(cdev, "Cannot transmit a checksumed packet\n");
2275 return -EINVAL;
2276 }
2277
2278 if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
2279 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
2280 1 + skb_shinfo(skb)->nr_frags);
2281 return -EINVAL;
2282 }
2283
2284 mapping = dma_map_single(&cdev->pdev->dev, skb->data,
2285 skb->len, DMA_TO_DEVICE);
2286 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2287 DP_NOTICE(cdev, "SKB mapping failed\n");
2288 return -EINVAL;
2289 }
2290
2291 /* Request HW to calculate IP csum */
2292 if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
2293 ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002294 flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002295
2296 if (skb_vlan_tag_present(skb)) {
2297 vlan = skb_vlan_tag_get(skb);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002298 flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002299 }
2300
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03002301 memset(&pkt, 0, sizeof(pkt));
2302 pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags;
2303 pkt.vlan = vlan;
2304 pkt.bd_flags = flags;
2305 pkt.tx_dest = QED_LL2_TX_DEST_NW;
2306 pkt.first_frag = mapping;
2307 pkt.first_frag_len = skb->len;
2308 pkt.cookie = skb;
2309
2310 rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
2311 &pkt, 1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002312 if (rc)
2313 goto err;
2314
2315 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2316 frag = &skb_shinfo(skb)->frags[i];
2317 if (!cdev->ll2->frags_mapped) {
2318 mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
2319 skb_frag_size(frag),
2320 DMA_TO_DEVICE);
2321
2322 if (unlikely(dma_mapping_error(&cdev->pdev->dev,
2323 mapping))) {
2324 DP_NOTICE(cdev,
2325 "Unable to map frag - dropping packet\n");
Pan Bian0ff18d22016-12-04 13:53:53 +08002326 rc = -ENOMEM;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002327 goto err;
2328 }
2329 } else {
2330 mapping = page_to_phys(skb_frag_page(frag)) |
2331 frag->page_offset;
2332 }
2333
2334 rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
2335 cdev->ll2->handle,
2336 mapping,
2337 skb_frag_size(frag));
2338
2339 /* if failed not much to do here, partial packet has been posted
2340 * we can't free memory, will need to wait for completion.
2341 */
2342 if (rc)
2343 goto err2;
2344 }
2345
2346 return 0;
2347
2348err:
2349 dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
2350
2351err2:
2352 return rc;
2353}
2354
2355static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
2356{
2357 if (!cdev->ll2)
2358 return -EINVAL;
2359
2360 return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
2361 cdev->ll2->handle, stats);
2362}
2363
2364const struct qed_ll2_ops qed_ll2_ops_pass = {
2365 .start = &qed_ll2_start,
2366 .stop = &qed_ll2_stop,
2367 .start_xmit = &qed_ll2_start_xmit,
2368 .register_cb_ops = &qed_ll2_register_cb_ops,
2369 .get_stats = &qed_ll2_stats,
2370};
2371
2372int qed_ll2_alloc_if(struct qed_dev *cdev)
2373{
2374 cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
2375 return cdev->ll2 ? 0 : -ENOMEM;
2376}
2377
2378void qed_ll2_dealloc_if(struct qed_dev *cdev)
2379{
2380 kfree(cdev->ll2);
2381 cdev->ll2 = NULL;
2382}