blob: 09c86411918c1ea9dae48d64d5376a7157605426 [file] [log] [blame]
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001/* QLogic qed NIC Driver
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02002 * Copyright (c) 2015-2017 QLogic Corporation
Yuval Mintz0a7fb112016-10-01 21:59:55 +03003 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
Yuval Mintz0a7fb112016-10-01 21:59:55 +03009 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +020010 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Yuval Mintz0a7fb112016-10-01 21:59:55 +030031 */
32
33#include <linux/types.h>
34#include <asm/byteorder.h>
35#include <linux/dma-mapping.h>
36#include <linux/if_vlan.h>
37#include <linux/kernel.h>
38#include <linux/pci.h>
39#include <linux/slab.h>
40#include <linux/stddef.h>
41#include <linux/version.h>
42#include <linux/workqueue.h>
43#include <net/ipv6.h>
44#include <linux/bitops.h>
45#include <linux/delay.h>
46#include <linux/errno.h>
47#include <linux/etherdevice.h>
48#include <linux/io.h>
49#include <linux/list.h>
50#include <linux/mutex.h>
51#include <linux/spinlock.h>
52#include <linux/string.h>
53#include <linux/qed/qed_ll2_if.h>
54#include "qed.h"
55#include "qed_cxt.h"
56#include "qed_dev_api.h"
57#include "qed_hsi.h"
58#include "qed_hw.h"
59#include "qed_int.h"
60#include "qed_ll2.h"
61#include "qed_mcp.h"
Yuval Mintz1d6cff42016-12-01 00:21:07 -080062#include "qed_ooo.h"
Yuval Mintz0a7fb112016-10-01 21:59:55 +030063#include "qed_reg_addr.h"
64#include "qed_sp.h"
Yuval Mintz0189efb2016-10-13 22:57:02 +030065#include "qed_roce.h"
Yuval Mintz0a7fb112016-10-01 21:59:55 +030066
67#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
68#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
69
70#define QED_LL2_TX_SIZE (256)
71#define QED_LL2_RX_SIZE (4096)
72
73struct qed_cb_ll2_info {
74 int rx_cnt;
75 u32 rx_size;
76 u8 handle;
77 bool frags_mapped;
78
79 /* Lock protecting LL2 buffer lists in sleepless context */
80 spinlock_t lock;
81 struct list_head list;
82
83 const struct qed_ll2_cb_ops *cbs;
84 void *cb_cookie;
85};
86
87struct qed_ll2_buffer {
88 struct list_head list;
89 void *data;
90 dma_addr_t phys_addr;
91};
92
93static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn,
94 u8 connection_handle,
95 void *cookie,
96 dma_addr_t first_frag_addr,
97 bool b_last_fragment,
98 bool b_last_packet)
99{
100 struct qed_dev *cdev = p_hwfn->cdev;
101 struct sk_buff *skb = cookie;
102
103 /* All we need to do is release the mapping */
104 dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
105 skb_headlen(skb), DMA_TO_DEVICE);
106
107 if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
108 cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
109 b_last_fragment);
110
111 if (cdev->ll2->frags_mapped)
112 /* Case where mapped frags were received, need to
113 * free skb with nr_frags marked as 0
114 */
115 skb_shinfo(skb)->nr_frags = 0;
116
117 dev_kfree_skb_any(skb);
118}
119
120static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
121 u8 **data, dma_addr_t *phys_addr)
122{
123 *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
124 if (!(*data)) {
125 DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
126 return -ENOMEM;
127 }
128
129 *phys_addr = dma_map_single(&cdev->pdev->dev,
130 ((*data) + NET_SKB_PAD),
131 cdev->ll2->rx_size, DMA_FROM_DEVICE);
132 if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
133 DP_INFO(cdev, "Failed to map LL2 buffer data\n");
134 kfree((*data));
135 return -ENOMEM;
136 }
137
138 return 0;
139}
140
141static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
142 struct qed_ll2_buffer *buffer)
143{
144 spin_lock_bh(&cdev->ll2->lock);
145
146 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
147 cdev->ll2->rx_size, DMA_FROM_DEVICE);
148 kfree(buffer->data);
149 list_del(&buffer->list);
150
151 cdev->ll2->rx_cnt--;
152 if (!cdev->ll2->rx_cnt)
153 DP_INFO(cdev, "All LL2 entries were removed\n");
154
155 spin_unlock_bh(&cdev->ll2->lock);
156
157 return 0;
158}
159
160static void qed_ll2_kill_buffers(struct qed_dev *cdev)
161{
162 struct qed_ll2_buffer *buffer, *tmp_buffer;
163
164 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
165 qed_ll2_dealloc_buffer(cdev, buffer);
166}
167
Yuval Mintz8c93bea2016-10-13 22:57:03 +0300168static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
169 u8 connection_handle,
170 struct qed_ll2_rx_packet *p_pkt,
171 struct core_rx_fast_path_cqe *p_cqe,
172 bool b_last_packet)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300173{
174 u16 packet_length = le16_to_cpu(p_cqe->packet_length);
175 struct qed_ll2_buffer *buffer = p_pkt->cookie;
176 struct qed_dev *cdev = p_hwfn->cdev;
177 u16 vlan = le16_to_cpu(p_cqe->vlan);
178 u32 opaque_data_0, opaque_data_1;
179 u8 pad = p_cqe->placement_offset;
180 dma_addr_t new_phys_addr;
181 struct sk_buff *skb;
182 bool reuse = false;
183 int rc = -EINVAL;
184 u8 *new_data;
185
186 opaque_data_0 = le32_to_cpu(p_cqe->opaque_data.data[0]);
187 opaque_data_1 = le32_to_cpu(p_cqe->opaque_data.data[1]);
188
189 DP_VERBOSE(p_hwfn,
190 (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
191 "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
192 (u64)p_pkt->rx_buf_addr, pad, packet_length,
193 le16_to_cpu(p_cqe->parse_flags.flags), vlan,
194 opaque_data_0, opaque_data_1);
195
196 if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
197 print_hex_dump(KERN_INFO, "",
198 DUMP_PREFIX_OFFSET, 16, 1,
199 buffer->data, packet_length, false);
200 }
201
202 /* Determine if data is valid */
203 if (packet_length < ETH_HLEN)
204 reuse = true;
205
206 /* Allocate a replacement for buffer; Reuse upon failure */
207 if (!reuse)
208 rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
209 &new_phys_addr);
210
211 /* If need to reuse or there's no replacement buffer, repost this */
212 if (rc)
213 goto out_post;
Mintz, Yuval752ecb22017-03-14 15:26:00 +0200214 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
215 cdev->ll2->rx_size, DMA_FROM_DEVICE);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300216
217 skb = build_skb(buffer->data, 0);
218 if (!skb) {
219 rc = -ENOMEM;
220 goto out_post;
221 }
222
223 pad += NET_SKB_PAD;
224 skb_reserve(skb, pad);
225 skb_put(skb, packet_length);
226 skb_checksum_none_assert(skb);
227
228 /* Get parital ethernet information instead of eth_type_trans(),
229 * Since we don't have an associated net_device.
230 */
231 skb_reset_mac_header(skb);
232 skb->protocol = eth_hdr(skb)->h_proto;
233
234 /* Pass SKB onward */
235 if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
236 if (vlan)
237 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
238 cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
239 opaque_data_0, opaque_data_1);
240 }
241
242 /* Update Buffer information and update FW producer */
243 buffer->data = new_data;
244 buffer->phys_addr = new_phys_addr;
245
246out_post:
247 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
248 buffer->phys_addr, 0, buffer, 1);
249
250 if (rc)
251 qed_ll2_dealloc_buffer(cdev, buffer);
252}
253
254static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
255 u8 connection_handle,
256 bool b_lock,
257 bool b_only_active)
258{
259 struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
260
261 if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
262 return NULL;
263
264 if (!p_hwfn->p_ll2_info)
265 return NULL;
266
267 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
268
269 if (b_only_active) {
270 if (b_lock)
271 mutex_lock(&p_ll2_conn->mutex);
272 if (p_ll2_conn->b_active)
273 p_ret = p_ll2_conn;
274 if (b_lock)
275 mutex_unlock(&p_ll2_conn->mutex);
276 } else {
277 p_ret = p_ll2_conn;
278 }
279
280 return p_ret;
281}
282
283static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
284 u8 connection_handle)
285{
286 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
287}
288
289static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
290 u8 connection_handle)
291{
292 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
293}
294
295static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
296 *p_hwfn,
297 u8 connection_handle)
298{
299 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
300}
301
302static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
303{
304 bool b_last_packet = false, b_last_frag = false;
305 struct qed_ll2_tx_packet *p_pkt = NULL;
306 struct qed_ll2_info *p_ll2_conn;
307 struct qed_ll2_tx_queue *p_tx;
Ram Amraniabd49672016-10-01 22:00:01 +0300308 dma_addr_t tx_frag;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300309
310 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
311 if (!p_ll2_conn)
312 return;
313
314 p_tx = &p_ll2_conn->tx_queue;
315
316 while (!list_empty(&p_tx->active_descq)) {
317 p_pkt = list_first_entry(&p_tx->active_descq,
318 struct qed_ll2_tx_packet, list_entry);
319 if (!p_pkt)
320 break;
321
322 list_del(&p_pkt->list_entry);
323 b_last_packet = list_empty(&p_tx->active_descq);
324 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
Arnd Bergmann0629a332017-01-18 15:52:52 +0100325 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800326 struct qed_ooo_buffer *p_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300327
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800328 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
329 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
330 p_buffer);
331 } else {
332 p_tx->cur_completing_packet = *p_pkt;
333 p_tx->cur_completing_bd_idx = 1;
334 b_last_frag =
335 p_tx->cur_completing_bd_idx == p_pkt->bd_used;
336 tx_frag = p_pkt->bds_set[0].tx_frag;
Arnd Bergmann0629a332017-01-18 15:52:52 +0100337 if (p_ll2_conn->conn.gsi_enable)
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800338 qed_ll2b_release_tx_gsi_packet(p_hwfn,
339 p_ll2_conn->
340 my_id,
341 p_pkt->cookie,
342 tx_frag,
343 b_last_frag,
344 b_last_packet);
345 else
346 qed_ll2b_complete_tx_packet(p_hwfn,
347 p_ll2_conn->my_id,
348 p_pkt->cookie,
349 tx_frag,
350 b_last_frag,
351 b_last_packet);
352 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300353 }
354}
355
356static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
357{
358 struct qed_ll2_info *p_ll2_conn = p_cookie;
359 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
360 u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
361 struct qed_ll2_tx_packet *p_pkt;
362 bool b_last_frag = false;
363 unsigned long flags;
Ram Amraniabd49672016-10-01 22:00:01 +0300364 dma_addr_t tx_frag;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300365 int rc = -EINVAL;
366
367 spin_lock_irqsave(&p_tx->lock, flags);
368 if (p_tx->b_completing_packet) {
369 rc = -EBUSY;
370 goto out;
371 }
372
373 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
374 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
375 while (num_bds) {
376 if (list_empty(&p_tx->active_descq))
377 goto out;
378
379 p_pkt = list_first_entry(&p_tx->active_descq,
380 struct qed_ll2_tx_packet, list_entry);
381 if (!p_pkt)
382 goto out;
383
384 p_tx->b_completing_packet = true;
385 p_tx->cur_completing_packet = *p_pkt;
386 num_bds_in_packet = p_pkt->bd_used;
387 list_del(&p_pkt->list_entry);
388
389 if (num_bds < num_bds_in_packet) {
390 DP_NOTICE(p_hwfn,
391 "Rest of BDs does not cover whole packet\n");
392 goto out;
393 }
394
395 num_bds -= num_bds_in_packet;
396 p_tx->bds_idx += num_bds_in_packet;
397 while (num_bds_in_packet--)
398 qed_chain_consume(&p_tx->txq_chain);
399
400 p_tx->cur_completing_bd_idx = 1;
401 b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
402 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
403
404 spin_unlock_irqrestore(&p_tx->lock, flags);
Ram Amraniabd49672016-10-01 22:00:01 +0300405 tx_frag = p_pkt->bds_set[0].tx_frag;
Arnd Bergmann0629a332017-01-18 15:52:52 +0100406 if (p_ll2_conn->conn.gsi_enable)
Ram Amraniabd49672016-10-01 22:00:01 +0300407 qed_ll2b_complete_tx_gsi_packet(p_hwfn,
408 p_ll2_conn->my_id,
409 p_pkt->cookie,
410 tx_frag,
411 b_last_frag, !num_bds);
412 else
413 qed_ll2b_complete_tx_packet(p_hwfn,
414 p_ll2_conn->my_id,
415 p_pkt->cookie,
416 tx_frag,
417 b_last_frag, !num_bds);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300418 spin_lock_irqsave(&p_tx->lock, flags);
419 }
420
421 p_tx->b_completing_packet = false;
422 rc = 0;
423out:
424 spin_unlock_irqrestore(&p_tx->lock, flags);
425 return rc;
426}
427
Ram Amraniabd49672016-10-01 22:00:01 +0300428static int
429qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
430 struct qed_ll2_info *p_ll2_info,
431 union core_rx_cqe_union *p_cqe,
432 unsigned long lock_flags, bool b_last_cqe)
433{
434 struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue;
435 struct qed_ll2_rx_packet *p_pkt = NULL;
436 u16 packet_length, parse_flags, vlan;
437 u32 src_mac_addrhi;
438 u16 src_mac_addrlo;
439
440 if (!list_empty(&p_rx->active_descq))
441 p_pkt = list_first_entry(&p_rx->active_descq,
442 struct qed_ll2_rx_packet, list_entry);
443 if (!p_pkt) {
444 DP_NOTICE(p_hwfn,
445 "GSI Rx completion but active_descq is empty\n");
446 return -EIO;
447 }
448
449 list_del(&p_pkt->list_entry);
450 parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
451 packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
452 vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
453 src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
454 src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
455 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
456 DP_NOTICE(p_hwfn,
457 "Mismatch between active_descq and the LL2 Rx chain\n");
458 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
459
460 spin_unlock_irqrestore(&p_rx->lock, lock_flags);
461 qed_ll2b_complete_rx_gsi_packet(p_hwfn,
462 p_ll2_info->my_id,
463 p_pkt->cookie,
464 p_pkt->rx_buf_addr,
465 packet_length,
466 p_cqe->rx_cqe_gsi.data_length_error,
467 parse_flags,
468 vlan,
469 src_mac_addrhi,
470 src_mac_addrlo, b_last_cqe);
471 spin_lock_irqsave(&p_rx->lock, lock_flags);
472
473 return 0;
474}
475
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300476static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
477 struct qed_ll2_info *p_ll2_conn,
478 union core_rx_cqe_union *p_cqe,
Ram Amrani1df2ade2017-03-14 15:26:02 +0200479 unsigned long *p_lock_flags,
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300480 bool b_last_cqe)
481{
482 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
483 struct qed_ll2_rx_packet *p_pkt = NULL;
484
485 if (!list_empty(&p_rx->active_descq))
486 p_pkt = list_first_entry(&p_rx->active_descq,
487 struct qed_ll2_rx_packet, list_entry);
488 if (!p_pkt) {
489 DP_NOTICE(p_hwfn,
490 "LL2 Rx completion but active_descq is empty\n");
491 return -EIO;
492 }
493 list_del(&p_pkt->list_entry);
494
495 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
496 DP_NOTICE(p_hwfn,
497 "Mismatch between active_descq and the LL2 Rx chain\n");
498 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
499
Ram Amrani1df2ade2017-03-14 15:26:02 +0200500 spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300501 qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
502 p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
Ram Amrani1df2ade2017-03-14 15:26:02 +0200503 spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300504
505 return 0;
506}
507
508static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
509{
510 struct qed_ll2_info *p_ll2_conn = cookie;
511 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
512 union core_rx_cqe_union *cqe = NULL;
513 u16 cq_new_idx = 0, cq_old_idx = 0;
514 unsigned long flags = 0;
515 int rc = 0;
516
517 spin_lock_irqsave(&p_rx->lock, flags);
518 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
519 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
520
521 while (cq_new_idx != cq_old_idx) {
522 bool b_last_cqe = (cq_new_idx == cq_old_idx);
523
524 cqe = qed_chain_consume(&p_rx->rcq_chain);
525 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
526
527 DP_VERBOSE(p_hwfn,
528 QED_MSG_LL2,
529 "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
530 cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
531
532 switch (cqe->rx_cqe_sp.type) {
533 case CORE_RX_CQE_TYPE_SLOW_PATH:
534 DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
535 rc = -EINVAL;
536 break;
Ram Amraniabd49672016-10-01 22:00:01 +0300537 case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
538 rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn,
539 cqe, flags, b_last_cqe);
540 break;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300541 case CORE_RX_CQE_TYPE_REGULAR:
542 rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
Ram Amrani1df2ade2017-03-14 15:26:02 +0200543 cqe, &flags,
544 b_last_cqe);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300545 break;
546 default:
547 rc = -EIO;
548 }
549 }
550
551 spin_unlock_irqrestore(&p_rx->lock, flags);
552 return rc;
553}
554
Yuval Mintz8c93bea2016-10-13 22:57:03 +0300555static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300556{
557 struct qed_ll2_info *p_ll2_conn = NULL;
558 struct qed_ll2_rx_packet *p_pkt = NULL;
559 struct qed_ll2_rx_queue *p_rx;
560
561 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
562 if (!p_ll2_conn)
563 return;
564
565 p_rx = &p_ll2_conn->rx_queue;
566
567 while (!list_empty(&p_rx->active_descq)) {
568 dma_addr_t rx_buf_addr;
569 void *cookie;
570 bool b_last;
571
572 p_pkt = list_first_entry(&p_rx->active_descq,
573 struct qed_ll2_rx_packet, list_entry);
574 if (!p_pkt)
575 break;
576
Wei Yongjunb4f0fd42016-10-17 15:17:51 +0000577 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300578
Arnd Bergmann0629a332017-01-18 15:52:52 +0100579 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800580 struct qed_ooo_buffer *p_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300581
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800582 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
583 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
584 p_buffer);
585 } else {
586 rx_buf_addr = p_pkt->rx_buf_addr;
587 cookie = p_pkt->cookie;
588
589 b_last = list_empty(&p_rx->active_descq);
590 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300591 }
592}
593
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800594#if IS_ENABLED(CONFIG_QED_ISCSI)
595static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
596{
597 u8 bd_flags = 0;
598
599 if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200600 SET_FIELD(bd_flags, CORE_TX_BD_DATA_VLAN_INSERTION, 1);
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800601
602 return bd_flags;
603}
604
605static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
606 struct qed_ll2_info *p_ll2_conn)
607{
608 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
609 u16 packet_length = 0, parse_flags = 0, vlan = 0;
610 struct qed_ll2_rx_packet *p_pkt = NULL;
611 u32 num_ooo_add_to_peninsula = 0, cid;
612 union core_rx_cqe_union *cqe = NULL;
613 u16 cq_new_idx = 0, cq_old_idx = 0;
614 struct qed_ooo_buffer *p_buffer;
615 struct ooo_opaque *iscsi_ooo;
616 u8 placement_offset = 0;
617 u8 cqe_type;
618
619 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
620 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
621 if (cq_new_idx == cq_old_idx)
622 return 0;
623
624 while (cq_new_idx != cq_old_idx) {
625 struct core_rx_fast_path_cqe *p_cqe_fp;
626
627 cqe = qed_chain_consume(&p_rx->rcq_chain);
628 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
629 cqe_type = cqe->rx_cqe_sp.type;
630
631 if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
632 DP_NOTICE(p_hwfn,
633 "Got a non-regular LB LL2 completion [type 0x%02x]\n",
634 cqe_type);
635 return -EINVAL;
636 }
637 p_cqe_fp = &cqe->rx_cqe_fp;
638
639 placement_offset = p_cqe_fp->placement_offset;
640 parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
641 packet_length = le16_to_cpu(p_cqe_fp->packet_length);
642 vlan = le16_to_cpu(p_cqe_fp->vlan);
643 iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
644 qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
645 iscsi_ooo);
646 cid = le32_to_cpu(iscsi_ooo->cid);
647
648 /* Process delete isle first */
649 if (iscsi_ooo->drop_size)
650 qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
651 iscsi_ooo->drop_isle,
652 iscsi_ooo->drop_size);
653
654 if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
655 continue;
656
657 /* Now process create/add/join isles */
658 if (list_empty(&p_rx->active_descq)) {
659 DP_NOTICE(p_hwfn,
660 "LL2 OOO RX chain has no submitted buffers\n"
661 );
662 return -EIO;
663 }
664
665 p_pkt = list_first_entry(&p_rx->active_descq,
666 struct qed_ll2_rx_packet, list_entry);
667
668 if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
669 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
670 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
671 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
672 (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
673 if (!p_pkt) {
674 DP_NOTICE(p_hwfn,
675 "LL2 OOO RX packet is not valid\n");
676 return -EIO;
677 }
678 list_del(&p_pkt->list_entry);
679 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
680 p_buffer->packet_length = packet_length;
681 p_buffer->parse_flags = parse_flags;
682 p_buffer->vlan = vlan;
683 p_buffer->placement_offset = placement_offset;
684 qed_chain_consume(&p_rx->rxq_chain);
685 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
686
687 switch (iscsi_ooo->ooo_opcode) {
688 case TCP_EVENT_ADD_NEW_ISLE:
689 qed_ooo_add_new_isle(p_hwfn,
690 p_hwfn->p_ooo_info,
691 cid,
692 iscsi_ooo->ooo_isle,
693 p_buffer);
694 break;
695 case TCP_EVENT_ADD_ISLE_RIGHT:
696 qed_ooo_add_new_buffer(p_hwfn,
697 p_hwfn->p_ooo_info,
698 cid,
699 iscsi_ooo->ooo_isle,
700 p_buffer,
701 QED_OOO_RIGHT_BUF);
702 break;
703 case TCP_EVENT_ADD_ISLE_LEFT:
704 qed_ooo_add_new_buffer(p_hwfn,
705 p_hwfn->p_ooo_info,
706 cid,
707 iscsi_ooo->ooo_isle,
708 p_buffer,
709 QED_OOO_LEFT_BUF);
710 break;
711 case TCP_EVENT_JOIN:
712 qed_ooo_add_new_buffer(p_hwfn,
713 p_hwfn->p_ooo_info,
714 cid,
715 iscsi_ooo->ooo_isle +
716 1,
717 p_buffer,
718 QED_OOO_LEFT_BUF);
719 qed_ooo_join_isles(p_hwfn,
720 p_hwfn->p_ooo_info,
721 cid, iscsi_ooo->ooo_isle);
722 break;
723 case TCP_EVENT_ADD_PEN:
724 num_ooo_add_to_peninsula++;
725 qed_ooo_put_ready_buffer(p_hwfn,
726 p_hwfn->p_ooo_info,
727 p_buffer, true);
728 break;
729 }
730 } else {
731 DP_NOTICE(p_hwfn,
732 "Unexpected event (%d) TX OOO completion\n",
733 iscsi_ooo->ooo_opcode);
734 }
735 }
736
737 return 0;
738}
739
740static void
741qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
742 struct qed_ll2_info *p_ll2_conn)
743{
744 struct qed_ooo_buffer *p_buffer;
745 int rc;
746 u16 l4_hdr_offset_w;
747 dma_addr_t first_frag;
748 u16 parse_flags;
749 u8 bd_flags;
750
751 /* Submit Tx buffers here */
752 while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
753 p_hwfn->p_ooo_info))) {
754 l4_hdr_offset_w = 0;
755 bd_flags = 0;
756
757 first_frag = p_buffer->rx_buffer_phys_addr +
758 p_buffer->placement_offset;
759 parse_flags = p_buffer->parse_flags;
760 bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200761 SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
762 SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800763
764 rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
765 p_buffer->vlan, bd_flags,
766 l4_hdr_offset_w,
Arnd Bergmann0629a332017-01-18 15:52:52 +0100767 p_ll2_conn->conn.tx_dest, 0,
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800768 first_frag,
769 p_buffer->packet_length,
770 p_buffer, true);
771 if (rc) {
772 qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
773 p_buffer, false);
774 break;
775 }
776 }
777}
778
779static void
780qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn,
781 struct qed_ll2_info *p_ll2_conn)
782{
783 struct qed_ooo_buffer *p_buffer;
784 int rc;
785
786 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
787 p_hwfn->p_ooo_info))) {
788 rc = qed_ll2_post_rx_buffer(p_hwfn,
789 p_ll2_conn->my_id,
790 p_buffer->rx_buffer_phys_addr,
791 0, p_buffer, true);
792 if (rc) {
793 qed_ooo_put_free_buffer(p_hwfn,
794 p_hwfn->p_ooo_info, p_buffer);
795 break;
796 }
797 }
798}
799
800static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
801{
802 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
803 int rc;
804
805 rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
806 if (rc)
807 return rc;
808
809 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
810 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
811
812 return 0;
813}
814
815static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
816{
817 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
818 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
819 struct qed_ll2_tx_packet *p_pkt = NULL;
820 struct qed_ooo_buffer *p_buffer;
821 bool b_dont_submit_rx = false;
822 u16 new_idx = 0, num_bds = 0;
823 int rc;
824
825 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
826 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
827
828 if (!num_bds)
829 return 0;
830
831 while (num_bds) {
832 if (list_empty(&p_tx->active_descq))
833 return -EINVAL;
834
835 p_pkt = list_first_entry(&p_tx->active_descq,
836 struct qed_ll2_tx_packet, list_entry);
837 if (!p_pkt)
838 return -EINVAL;
839
840 if (p_pkt->bd_used != 1) {
841 DP_NOTICE(p_hwfn,
842 "Unexpectedly many BDs(%d) in TX OOO completion\n",
843 p_pkt->bd_used);
844 return -EINVAL;
845 }
846
847 list_del(&p_pkt->list_entry);
848
849 num_bds--;
850 p_tx->bds_idx++;
851 qed_chain_consume(&p_tx->txq_chain);
852
853 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
854 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
855
856 if (b_dont_submit_rx) {
857 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
858 p_buffer);
859 continue;
860 }
861
862 rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
863 p_buffer->rx_buffer_phys_addr, 0,
864 p_buffer, true);
865 if (rc != 0) {
866 qed_ooo_put_free_buffer(p_hwfn,
867 p_hwfn->p_ooo_info, p_buffer);
868 b_dont_submit_rx = true;
869 }
870 }
871
872 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
873
874 return 0;
875}
876
877static int
878qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
879 struct qed_ll2_info *p_ll2_info,
880 u16 rx_num_ooo_buffers, u16 mtu)
881{
882 struct qed_ooo_buffer *p_buf = NULL;
883 void *p_virt;
884 u16 buf_idx;
885 int rc = 0;
886
Arnd Bergmann0629a332017-01-18 15:52:52 +0100887 if (p_ll2_info->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800888 return rc;
889
890 if (!rx_num_ooo_buffers)
891 return -EINVAL;
892
893 for (buf_idx = 0; buf_idx < rx_num_ooo_buffers; buf_idx++) {
894 p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
895 if (!p_buf) {
896 rc = -ENOMEM;
897 goto out;
898 }
899
900 p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
901 p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
902 ETH_CACHE_LINE_SIZE - 1) &
903 ~(ETH_CACHE_LINE_SIZE - 1);
904 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
905 p_buf->rx_buffer_size,
906 &p_buf->rx_buffer_phys_addr,
907 GFP_KERNEL);
908 if (!p_virt) {
909 kfree(p_buf);
910 rc = -ENOMEM;
911 goto out;
912 }
913
914 p_buf->rx_buffer_virt_addr = p_virt;
915 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
916 }
917
918 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
919 "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
920 rx_num_ooo_buffers, p_buf->rx_buffer_size);
921
922out:
923 return rc;
924}
925
926static void
927qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
928 struct qed_ll2_info *p_ll2_conn)
929{
Arnd Bergmann0629a332017-01-18 15:52:52 +0100930 if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800931 return;
932
933 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
934 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
935}
936
937static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
938 struct qed_ll2_info *p_ll2_conn)
939{
940 struct qed_ooo_buffer *p_buffer;
941
Arnd Bergmann0629a332017-01-18 15:52:52 +0100942 if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800943 return;
944
945 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
946 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
947 p_hwfn->p_ooo_info))) {
948 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
949 p_buffer->rx_buffer_size,
950 p_buffer->rx_buffer_virt_addr,
951 p_buffer->rx_buffer_phys_addr);
952 kfree(p_buffer);
953 }
954}
955
956static void qed_ll2_stop_ooo(struct qed_dev *cdev)
957{
958 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
959 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
960
961 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n",
962 *handle);
963
964 qed_ll2_terminate_connection(hwfn, *handle);
965 qed_ll2_release_connection(hwfn, *handle);
966 *handle = QED_LL2_UNUSED_HANDLE;
967}
968
969static int qed_ll2_start_ooo(struct qed_dev *cdev,
970 struct qed_ll2_params *params)
971{
972 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
973 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
robert.foss@collabora.com8aad6f12017-03-07 11:46:25 -0500974 struct qed_ll2_conn ll2_info = { 0 };
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800975 int rc;
976
Arnd Bergmann0629a332017-01-18 15:52:52 +0100977 ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
978 ll2_info.mtu = params->mtu;
979 ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
980 ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
981 ll2_info.tx_tc = OOO_LB_TC;
982 ll2_info.tx_dest = CORE_TX_DEST_LB;
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800983
Arnd Bergmann0629a332017-01-18 15:52:52 +0100984 rc = qed_ll2_acquire_connection(hwfn, &ll2_info,
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800985 QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
986 handle);
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800987 if (rc) {
988 DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
989 goto out;
990 }
991
992 rc = qed_ll2_establish_connection(hwfn, *handle);
993 if (rc) {
994 DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
995 goto fail;
996 }
997
998 return 0;
999
1000fail:
1001 qed_ll2_release_connection(hwfn, *handle);
1002out:
1003 *handle = QED_LL2_UNUSED_HANDLE;
1004 return rc;
1005}
1006#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
1007static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn,
1008 void *p_cookie) { return -EINVAL; }
1009static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn,
1010 void *p_cookie) { return -EINVAL; }
1011static inline int
1012qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
1013 struct qed_ll2_info *p_ll2_info,
1014 u16 rx_num_ooo_buffers, u16 mtu) { return 0; }
1015static inline void
1016qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
1017 struct qed_ll2_info *p_ll2_conn) { return; }
1018static inline void
1019qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
1020 struct qed_ll2_info *p_ll2_conn) { return; }
1021static inline void qed_ll2_stop_ooo(struct qed_dev *cdev) { return; }
1022static inline int qed_ll2_start_ooo(struct qed_dev *cdev,
1023 struct qed_ll2_params *params)
1024 { return -EINVAL; }
1025#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
1026
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001027static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
1028 struct qed_ll2_info *p_ll2_conn,
1029 u8 action_on_error)
1030{
Arnd Bergmann0629a332017-01-18 15:52:52 +01001031 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001032 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
1033 struct core_rx_start_ramrod_data *p_ramrod = NULL;
1034 struct qed_spq_entry *p_ent = NULL;
1035 struct qed_sp_init_data init_data;
1036 u16 cqe_pbl_size;
1037 int rc = 0;
1038
1039 /* Get SPQ entry */
1040 memset(&init_data, 0, sizeof(init_data));
1041 init_data.cid = p_ll2_conn->cid;
1042 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1043 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1044
1045 rc = qed_sp_init_request(p_hwfn, &p_ent,
1046 CORE_RAMROD_RX_QUEUE_START,
1047 PROTOCOLID_CORE, &init_data);
1048 if (rc)
1049 return rc;
1050
1051 p_ramrod = &p_ent->ramrod.core_rx_queue_start;
1052
1053 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
1054 p_ramrod->sb_index = p_rx->rx_sb_index;
1055 p_ramrod->complete_event_flg = 1;
1056
Arnd Bergmann0629a332017-01-18 15:52:52 +01001057 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001058 DMA_REGPAIR_LE(p_ramrod->bd_base,
1059 p_rx->rxq_chain.p_phys_addr);
1060 cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
1061 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
1062 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
1063 qed_chain_get_pbl_phys(&p_rx->rcq_chain));
1064
Arnd Bergmann0629a332017-01-18 15:52:52 +01001065 p_ramrod->drop_ttl0_flg = p_ll2_conn->conn.rx_drop_ttl0_flg;
1066 p_ramrod->inner_vlan_removal_en = p_ll2_conn->conn.rx_vlan_removal_en;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001067 p_ramrod->queue_id = p_ll2_conn->queue_id;
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001068 p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
1069 : 1;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001070
1071 if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
1072 p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
1073 p_ramrod->mf_si_bcast_accept_all = 1;
1074 p_ramrod->mf_si_mcast_accept_all = 1;
1075 } else {
1076 p_ramrod->mf_si_bcast_accept_all = 0;
1077 p_ramrod->mf_si_mcast_accept_all = 0;
1078 }
1079
1080 p_ramrod->action_on_error.error_type = action_on_error;
Arnd Bergmann0629a332017-01-18 15:52:52 +01001081 p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001082 return qed_spq_post(p_hwfn, p_ent, NULL);
1083}
1084
1085static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
1086 struct qed_ll2_info *p_ll2_conn)
1087{
Arnd Bergmann0629a332017-01-18 15:52:52 +01001088 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001089 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1090 struct core_tx_start_ramrod_data *p_ramrod = NULL;
1091 struct qed_spq_entry *p_ent = NULL;
1092 struct qed_sp_init_data init_data;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001093 u16 pq_id = 0, pbl_size;
1094 int rc = -EINVAL;
1095
1096 if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
1097 return 0;
1098
Arnd Bergmann0629a332017-01-18 15:52:52 +01001099 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001100 p_ll2_conn->tx_stats_en = 0;
1101 else
1102 p_ll2_conn->tx_stats_en = 1;
1103
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001104 /* Get SPQ entry */
1105 memset(&init_data, 0, sizeof(init_data));
1106 init_data.cid = p_ll2_conn->cid;
1107 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1108 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1109
1110 rc = qed_sp_init_request(p_hwfn, &p_ent,
1111 CORE_RAMROD_TX_QUEUE_START,
1112 PROTOCOLID_CORE, &init_data);
1113 if (rc)
1114 return rc;
1115
1116 p_ramrod = &p_ent->ramrod.core_tx_queue_start;
1117
1118 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
1119 p_ramrod->sb_index = p_tx->tx_sb_index;
Arnd Bergmann0629a332017-01-18 15:52:52 +01001120 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001121 p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
1122 p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
1123
1124 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
1125 qed_chain_get_pbl_phys(&p_tx->txq_chain));
1126 pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
1127 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
1128
Ariel Eliorb5a9ee72017-04-03 12:21:09 +03001129 switch (p_ll2_conn->conn.tx_tc) {
1130 case LB_TC:
1131 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
1132 break;
1133 case OOO_LB_TC:
1134 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO);
Colin Ian King827d2402017-04-05 13:35:44 +01001135 break;
Ariel Eliorb5a9ee72017-04-03 12:21:09 +03001136 default:
1137 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
1138 break;
1139 }
1140
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001141 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
1142
1143 switch (conn_type) {
Arun Easi1e128c82017-02-15 06:28:22 -08001144 case QED_LL2_TYPE_FCOE:
1145 p_ramrod->conn_type = PROTOCOLID_FCOE;
1146 break;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001147 case QED_LL2_TYPE_ISCSI:
1148 case QED_LL2_TYPE_ISCSI_OOO:
1149 p_ramrod->conn_type = PROTOCOLID_ISCSI;
1150 break;
1151 case QED_LL2_TYPE_ROCE:
1152 p_ramrod->conn_type = PROTOCOLID_ROCE;
1153 break;
1154 default:
1155 p_ramrod->conn_type = PROTOCOLID_ETH;
1156 DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
1157 }
1158
Arnd Bergmann0629a332017-01-18 15:52:52 +01001159 p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001160 return qed_spq_post(p_hwfn, p_ent, NULL);
1161}
1162
1163static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
1164 struct qed_ll2_info *p_ll2_conn)
1165{
1166 struct core_rx_stop_ramrod_data *p_ramrod = NULL;
1167 struct qed_spq_entry *p_ent = NULL;
1168 struct qed_sp_init_data init_data;
1169 int rc = -EINVAL;
1170
1171 /* Get SPQ entry */
1172 memset(&init_data, 0, sizeof(init_data));
1173 init_data.cid = p_ll2_conn->cid;
1174 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1175 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1176
1177 rc = qed_sp_init_request(p_hwfn, &p_ent,
1178 CORE_RAMROD_RX_QUEUE_STOP,
1179 PROTOCOLID_CORE, &init_data);
1180 if (rc)
1181 return rc;
1182
1183 p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
1184
1185 p_ramrod->complete_event_flg = 1;
1186 p_ramrod->queue_id = p_ll2_conn->queue_id;
1187
1188 return qed_spq_post(p_hwfn, p_ent, NULL);
1189}
1190
1191static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
1192 struct qed_ll2_info *p_ll2_conn)
1193{
1194 struct qed_spq_entry *p_ent = NULL;
1195 struct qed_sp_init_data init_data;
1196 int rc = -EINVAL;
1197
1198 /* Get SPQ entry */
1199 memset(&init_data, 0, sizeof(init_data));
1200 init_data.cid = p_ll2_conn->cid;
1201 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1202 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1203
1204 rc = qed_sp_init_request(p_hwfn, &p_ent,
1205 CORE_RAMROD_TX_QUEUE_STOP,
1206 PROTOCOLID_CORE, &init_data);
1207 if (rc)
1208 return rc;
1209
1210 return qed_spq_post(p_hwfn, p_ent, NULL);
1211}
1212
1213static int
1214qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
1215 struct qed_ll2_info *p_ll2_info, u16 rx_num_desc)
1216{
1217 struct qed_ll2_rx_packet *p_descq;
1218 u32 capacity;
1219 int rc = 0;
1220
1221 if (!rx_num_desc)
1222 goto out;
1223
1224 rc = qed_chain_alloc(p_hwfn->cdev,
1225 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1226 QED_CHAIN_MODE_NEXT_PTR,
1227 QED_CHAIN_CNT_TYPE_U16,
1228 rx_num_desc,
1229 sizeof(struct core_rx_bd),
1230 &p_ll2_info->rx_queue.rxq_chain);
1231 if (rc) {
1232 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
1233 goto out;
1234 }
1235
1236 capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
1237 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
1238 GFP_KERNEL);
1239 if (!p_descq) {
1240 rc = -ENOMEM;
1241 DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
1242 goto out;
1243 }
1244 p_ll2_info->rx_queue.descq_array = p_descq;
1245
1246 rc = qed_chain_alloc(p_hwfn->cdev,
1247 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1248 QED_CHAIN_MODE_PBL,
1249 QED_CHAIN_CNT_TYPE_U16,
1250 rx_num_desc,
1251 sizeof(struct core_rx_fast_path_cqe),
1252 &p_ll2_info->rx_queue.rcq_chain);
1253 if (rc) {
1254 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
1255 goto out;
1256 }
1257
1258 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1259 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
Arnd Bergmann0629a332017-01-18 15:52:52 +01001260 p_ll2_info->conn.conn_type, rx_num_desc);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001261
1262out:
1263 return rc;
1264}
1265
1266static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
1267 struct qed_ll2_info *p_ll2_info,
1268 u16 tx_num_desc)
1269{
1270 struct qed_ll2_tx_packet *p_descq;
1271 u32 capacity;
1272 int rc = 0;
1273
1274 if (!tx_num_desc)
1275 goto out;
1276
1277 rc = qed_chain_alloc(p_hwfn->cdev,
1278 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1279 QED_CHAIN_MODE_PBL,
1280 QED_CHAIN_CNT_TYPE_U16,
1281 tx_num_desc,
1282 sizeof(struct core_tx_bd),
1283 &p_ll2_info->tx_queue.txq_chain);
1284 if (rc)
1285 goto out;
1286
1287 capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
1288 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet),
1289 GFP_KERNEL);
1290 if (!p_descq) {
1291 rc = -ENOMEM;
1292 goto out;
1293 }
1294 p_ll2_info->tx_queue.descq_array = p_descq;
1295
1296 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1297 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
Arnd Bergmann0629a332017-01-18 15:52:52 +01001298 p_ll2_info->conn.conn_type, tx_num_desc);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001299
1300out:
1301 if (rc)
1302 DP_NOTICE(p_hwfn,
1303 "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
1304 tx_num_desc);
1305 return rc;
1306}
1307
1308int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
Arnd Bergmann0629a332017-01-18 15:52:52 +01001309 struct qed_ll2_conn *p_params,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001310 u16 rx_num_desc,
1311 u16 tx_num_desc,
1312 u8 *p_connection_handle)
1313{
1314 qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
1315 struct qed_ll2_info *p_ll2_info = NULL;
1316 int rc;
1317 u8 i;
1318
1319 if (!p_connection_handle || !p_hwfn->p_ll2_info)
1320 return -EINVAL;
1321
1322 /* Find a free connection to be used */
1323 for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
1324 mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
1325 if (p_hwfn->p_ll2_info[i].b_active) {
1326 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1327 continue;
1328 }
1329
1330 p_hwfn->p_ll2_info[i].b_active = true;
1331 p_ll2_info = &p_hwfn->p_ll2_info[i];
1332 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1333 break;
1334 }
1335 if (!p_ll2_info)
1336 return -EBUSY;
1337
Arnd Bergmann0629a332017-01-18 15:52:52 +01001338 p_ll2_info->conn = *p_params;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001339
1340 rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
1341 if (rc)
1342 goto q_allocate_fail;
1343
1344 rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info, tx_num_desc);
1345 if (rc)
1346 goto q_allocate_fail;
1347
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001348 rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
1349 rx_num_desc * 2, p_params->mtu);
1350 if (rc)
1351 goto q_allocate_fail;
1352
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001353 /* Register callbacks for the Rx/Tx queues */
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001354 if (p_params->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
1355 comp_rx_cb = qed_ll2_lb_rxq_completion;
1356 comp_tx_cb = qed_ll2_lb_txq_completion;
1357 } else {
1358 comp_rx_cb = qed_ll2_rxq_completion;
1359 comp_tx_cb = qed_ll2_txq_completion;
1360 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001361
1362 if (rx_num_desc) {
1363 qed_int_register_cb(p_hwfn, comp_rx_cb,
1364 &p_hwfn->p_ll2_info[i],
1365 &p_ll2_info->rx_queue.rx_sb_index,
1366 &p_ll2_info->rx_queue.p_fw_cons);
1367 p_ll2_info->rx_queue.b_cb_registred = true;
1368 }
1369
1370 if (tx_num_desc) {
1371 qed_int_register_cb(p_hwfn,
1372 comp_tx_cb,
1373 &p_hwfn->p_ll2_info[i],
1374 &p_ll2_info->tx_queue.tx_sb_index,
1375 &p_ll2_info->tx_queue.p_fw_cons);
1376 p_ll2_info->tx_queue.b_cb_registred = true;
1377 }
1378
1379 *p_connection_handle = i;
1380 return rc;
1381
1382q_allocate_fail:
1383 qed_ll2_release_connection(p_hwfn, i);
1384 return -ENOMEM;
1385}
1386
1387static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
1388 struct qed_ll2_info *p_ll2_conn)
1389{
1390 u8 action_on_error = 0;
1391
1392 if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
1393 return 0;
1394
1395 DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
1396
1397 SET_FIELD(action_on_error,
1398 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
Arnd Bergmann0629a332017-01-18 15:52:52 +01001399 p_ll2_conn->conn.ai_err_packet_too_big);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001400 SET_FIELD(action_on_error,
Arnd Bergmann0629a332017-01-18 15:52:52 +01001401 CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->conn.ai_err_no_buf);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001402
1403 return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
1404}
1405
1406int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1407{
1408 struct qed_ll2_info *p_ll2_conn;
1409 struct qed_ll2_rx_queue *p_rx;
1410 struct qed_ll2_tx_queue *p_tx;
Rahul Verma15582962017-04-06 15:58:29 +03001411 struct qed_ptt *p_ptt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001412 int rc = -EINVAL;
1413 u32 i, capacity;
1414 u8 qid;
1415
Rahul Verma15582962017-04-06 15:58:29 +03001416 p_ptt = qed_ptt_acquire(p_hwfn);
1417 if (!p_ptt)
1418 return -EAGAIN;
1419
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001420 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
Rahul Verma15582962017-04-06 15:58:29 +03001421 if (!p_ll2_conn) {
1422 rc = -EINVAL;
1423 goto out;
1424 }
1425
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001426 p_rx = &p_ll2_conn->rx_queue;
1427 p_tx = &p_ll2_conn->tx_queue;
1428
1429 qed_chain_reset(&p_rx->rxq_chain);
1430 qed_chain_reset(&p_rx->rcq_chain);
1431 INIT_LIST_HEAD(&p_rx->active_descq);
1432 INIT_LIST_HEAD(&p_rx->free_descq);
1433 INIT_LIST_HEAD(&p_rx->posting_descq);
1434 spin_lock_init(&p_rx->lock);
1435 capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
1436 for (i = 0; i < capacity; i++)
1437 list_add_tail(&p_rx->descq_array[i].list_entry,
1438 &p_rx->free_descq);
1439 *p_rx->p_fw_cons = 0;
1440
1441 qed_chain_reset(&p_tx->txq_chain);
1442 INIT_LIST_HEAD(&p_tx->active_descq);
1443 INIT_LIST_HEAD(&p_tx->free_descq);
1444 INIT_LIST_HEAD(&p_tx->sending_descq);
1445 spin_lock_init(&p_tx->lock);
1446 capacity = qed_chain_get_capacity(&p_tx->txq_chain);
1447 for (i = 0; i < capacity; i++)
1448 list_add_tail(&p_tx->descq_array[i].list_entry,
1449 &p_tx->free_descq);
1450 p_tx->cur_completing_bd_idx = 0;
1451 p_tx->bds_idx = 0;
1452 p_tx->b_completing_packet = false;
1453 p_tx->cur_send_packet = NULL;
1454 p_tx->cur_send_frag_num = 0;
1455 p_tx->cur_completing_frag_num = 0;
1456 *p_tx->p_fw_cons = 0;
1457
Rahul Verma15582962017-04-06 15:58:29 +03001458 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
1459 if (rc)
1460 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001461
1462 qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
1463 p_ll2_conn->queue_id = qid;
1464 p_ll2_conn->tx_stats_id = qid;
1465 p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
1466 GTT_BAR0_MAP_REG_TSDM_RAM +
1467 TSTORM_LL2_RX_PRODS_OFFSET(qid);
1468 p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
1469 qed_db_addr(p_ll2_conn->cid,
1470 DQ_DEMS_LEGACY);
1471
1472 rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
1473 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001474 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001475
1476 rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
1477 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001478 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001479
1480 if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
Rahul Verma15582962017-04-06 15:58:29 +03001481 qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001482
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001483 qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
1484
Arun Easi1e128c82017-02-15 06:28:22 -08001485 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
Rahul Verma15582962017-04-06 15:58:29 +03001486 qed_llh_add_protocol_filter(p_hwfn, p_ptt,
Arun Easi1e128c82017-02-15 06:28:22 -08001487 0x8906, 0,
1488 QED_LLH_FILTER_ETHERTYPE);
Rahul Verma15582962017-04-06 15:58:29 +03001489 qed_llh_add_protocol_filter(p_hwfn, p_ptt,
Arun Easi1e128c82017-02-15 06:28:22 -08001490 0x8914, 0,
1491 QED_LLH_FILTER_ETHERTYPE);
1492 }
1493
Rahul Verma15582962017-04-06 15:58:29 +03001494out:
1495 qed_ptt_release(p_hwfn, p_ptt);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001496 return rc;
1497}
1498
1499static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
1500 struct qed_ll2_rx_queue *p_rx,
1501 struct qed_ll2_rx_packet *p_curp)
1502{
1503 struct qed_ll2_rx_packet *p_posting_packet = NULL;
1504 struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
1505 bool b_notify_fw = false;
1506 u16 bd_prod, cq_prod;
1507
1508 /* This handles the flushing of already posted buffers */
1509 while (!list_empty(&p_rx->posting_descq)) {
1510 p_posting_packet = list_first_entry(&p_rx->posting_descq,
1511 struct qed_ll2_rx_packet,
1512 list_entry);
Wei Yongjunb4f0fd42016-10-17 15:17:51 +00001513 list_move_tail(&p_posting_packet->list_entry,
1514 &p_rx->active_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001515 b_notify_fw = true;
1516 }
1517
1518 /* This handles the supplied packet [if there is one] */
1519 if (p_curp) {
1520 list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
1521 b_notify_fw = true;
1522 }
1523
1524 if (!b_notify_fw)
1525 return;
1526
1527 bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
1528 cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1529 rx_prod.bd_prod = cpu_to_le16(bd_prod);
1530 rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1531 DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1532}
1533
1534int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
1535 u8 connection_handle,
1536 dma_addr_t addr,
1537 u16 buf_len, void *cookie, u8 notify_fw)
1538{
1539 struct core_rx_bd_with_buff_len *p_curb = NULL;
1540 struct qed_ll2_rx_packet *p_curp = NULL;
1541 struct qed_ll2_info *p_ll2_conn;
1542 struct qed_ll2_rx_queue *p_rx;
1543 unsigned long flags;
1544 void *p_data;
1545 int rc = 0;
1546
1547 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1548 if (!p_ll2_conn)
1549 return -EINVAL;
1550 p_rx = &p_ll2_conn->rx_queue;
1551
1552 spin_lock_irqsave(&p_rx->lock, flags);
1553 if (!list_empty(&p_rx->free_descq))
1554 p_curp = list_first_entry(&p_rx->free_descq,
1555 struct qed_ll2_rx_packet, list_entry);
1556 if (p_curp) {
1557 if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
1558 qed_chain_get_elem_left(&p_rx->rcq_chain)) {
1559 p_data = qed_chain_produce(&p_rx->rxq_chain);
1560 p_curb = (struct core_rx_bd_with_buff_len *)p_data;
1561 qed_chain_produce(&p_rx->rcq_chain);
1562 }
1563 }
1564
1565 /* If we're lacking entires, let's try to flush buffers to FW */
1566 if (!p_curp || !p_curb) {
1567 rc = -EBUSY;
1568 p_curp = NULL;
1569 goto out_notify;
1570 }
1571
1572 /* We have an Rx packet we can fill */
1573 DMA_REGPAIR_LE(p_curb->addr, addr);
1574 p_curb->buff_length = cpu_to_le16(buf_len);
1575 p_curp->rx_buf_addr = addr;
1576 p_curp->cookie = cookie;
1577 p_curp->rxq_bd = p_curb;
1578 p_curp->buf_length = buf_len;
1579 list_del(&p_curp->list_entry);
1580
1581 /* Check if we only want to enqueue this packet without informing FW */
1582 if (!notify_fw) {
1583 list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
1584 goto out;
1585 }
1586
1587out_notify:
1588 qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
1589out:
1590 spin_unlock_irqrestore(&p_rx->lock, flags);
1591 return rc;
1592}
1593
1594static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
1595 struct qed_ll2_tx_queue *p_tx,
1596 struct qed_ll2_tx_packet *p_curp,
1597 u8 num_of_bds,
1598 dma_addr_t first_frag,
1599 u16 first_frag_len, void *p_cookie,
1600 u8 notify_fw)
1601{
1602 list_del(&p_curp->list_entry);
1603 p_curp->cookie = p_cookie;
1604 p_curp->bd_used = num_of_bds;
1605 p_curp->notify_fw = notify_fw;
1606 p_tx->cur_send_packet = p_curp;
1607 p_tx->cur_send_frag_num = 0;
1608
1609 p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = first_frag;
1610 p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = first_frag_len;
1611 p_tx->cur_send_frag_num++;
1612}
1613
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001614static void
1615qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1616 struct qed_ll2_info *p_ll2,
1617 struct qed_ll2_tx_packet *p_curp,
1618 u8 num_of_bds,
1619 enum core_tx_dest tx_dest,
1620 u16 vlan,
1621 u8 bd_flags,
1622 u16 l4_hdr_offset_w,
1623 enum core_roce_flavor_type roce_flavor,
1624 dma_addr_t first_frag,
1625 u16 first_frag_len)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001626{
1627 struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
1628 u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
1629 struct core_tx_bd *start_bd = NULL;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001630 u16 bd_data = 0, frag_idx;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001631
1632 start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1633 start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
1634 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
1635 cpu_to_le16(l4_hdr_offset_w));
1636 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001637 bd_data |= bd_flags;
1638 SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
1639 SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, num_of_bds);
1640 SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
1641 start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001642 DMA_REGPAIR_LE(start_bd->addr, first_frag);
1643 start_bd->nbytes = cpu_to_le16(first_frag_len);
1644
1645 DP_VERBOSE(p_hwfn,
1646 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1647 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1648 p_ll2->queue_id,
1649 p_ll2->cid,
Arnd Bergmann0629a332017-01-18 15:52:52 +01001650 p_ll2->conn.conn_type,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001651 prod_idx,
1652 first_frag_len,
1653 num_of_bds,
1654 le32_to_cpu(start_bd->addr.hi),
1655 le32_to_cpu(start_bd->addr.lo));
1656
1657 if (p_ll2->tx_queue.cur_send_frag_num == num_of_bds)
1658 return;
1659
1660 /* Need to provide the packet with additional BDs for frags */
1661 for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
1662 frag_idx < num_of_bds; frag_idx++) {
1663 struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
1664
1665 *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001666 (*p_bd)->bd_data.as_bitfield = 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001667 (*p_bd)->bitfield1 = 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001668 p_curp->bds_set[frag_idx].tx_frag = 0;
1669 p_curp->bds_set[frag_idx].frag_len = 0;
1670 }
1671}
1672
1673/* This should be called while the Txq spinlock is being held */
1674static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
1675 struct qed_ll2_info *p_ll2_conn)
1676{
1677 bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
1678 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1679 struct qed_ll2_tx_packet *p_pkt = NULL;
1680 struct core_db_data db_msg = { 0, 0, 0 };
1681 u16 bd_prod;
1682
1683 /* If there are missing BDs, don't do anything now */
1684 if (p_ll2_conn->tx_queue.cur_send_frag_num !=
1685 p_ll2_conn->tx_queue.cur_send_packet->bd_used)
1686 return;
1687
1688 /* Push the current packet to the list and clean after it */
1689 list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
1690 &p_ll2_conn->tx_queue.sending_descq);
1691 p_ll2_conn->tx_queue.cur_send_packet = NULL;
1692 p_ll2_conn->tx_queue.cur_send_frag_num = 0;
1693
1694 /* Notify FW of packet only if requested to */
1695 if (!b_notify)
1696 return;
1697
1698 bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
1699
1700 while (!list_empty(&p_tx->sending_descq)) {
1701 p_pkt = list_first_entry(&p_tx->sending_descq,
1702 struct qed_ll2_tx_packet, list_entry);
1703 if (!p_pkt)
1704 break;
1705
Wei Yongjunb4f0fd42016-10-17 15:17:51 +00001706 list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001707 }
1708
1709 SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
1710 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1711 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
1712 DQ_XCM_CORE_TX_BD_PROD_CMD);
1713 db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
1714 db_msg.spq_prod = cpu_to_le16(bd_prod);
1715
1716 /* Make sure the BDs data is updated before ringing the doorbell */
1717 wmb();
1718
1719 DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
1720
1721 DP_VERBOSE(p_hwfn,
1722 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1723 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1724 p_ll2_conn->queue_id,
Arnd Bergmann0629a332017-01-18 15:52:52 +01001725 p_ll2_conn->cid, p_ll2_conn->conn.conn_type, db_msg.spq_prod);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001726}
1727
1728int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
1729 u8 connection_handle,
1730 u8 num_of_bds,
1731 u16 vlan,
1732 u8 bd_flags,
1733 u16 l4_hdr_offset_w,
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001734 enum qed_ll2_tx_dest e_tx_dest,
Ram Amraniabd49672016-10-01 22:00:01 +03001735 enum qed_ll2_roce_flavor_type qed_roce_flavor,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001736 dma_addr_t first_frag,
1737 u16 first_frag_len, void *cookie, u8 notify_fw)
1738{
1739 struct qed_ll2_tx_packet *p_curp = NULL;
1740 struct qed_ll2_info *p_ll2_conn = NULL;
Ram Amraniabd49672016-10-01 22:00:01 +03001741 enum core_roce_flavor_type roce_flavor;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001742 struct qed_ll2_tx_queue *p_tx;
1743 struct qed_chain *p_tx_chain;
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001744 enum core_tx_dest tx_dest;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001745 unsigned long flags;
1746 int rc = 0;
1747
1748 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1749 if (!p_ll2_conn)
1750 return -EINVAL;
1751 p_tx = &p_ll2_conn->tx_queue;
1752 p_tx_chain = &p_tx->txq_chain;
1753
1754 if (num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
1755 return -EIO;
1756
1757 spin_lock_irqsave(&p_tx->lock, flags);
1758 if (p_tx->cur_send_packet) {
1759 rc = -EEXIST;
1760 goto out;
1761 }
1762
1763 /* Get entry, but only if we have tx elements for it */
1764 if (!list_empty(&p_tx->free_descq))
1765 p_curp = list_first_entry(&p_tx->free_descq,
1766 struct qed_ll2_tx_packet, list_entry);
1767 if (p_curp && qed_chain_get_elem_left(p_tx_chain) < num_of_bds)
1768 p_curp = NULL;
1769
1770 if (!p_curp) {
1771 rc = -EBUSY;
1772 goto out;
1773 }
1774
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001775 tx_dest = e_tx_dest == QED_LL2_TX_DEST_NW ? CORE_TX_DEST_NW :
1776 CORE_TX_DEST_LB;
Ram Amraniabd49672016-10-01 22:00:01 +03001777 if (qed_roce_flavor == QED_LL2_ROCE) {
1778 roce_flavor = CORE_ROCE;
1779 } else if (qed_roce_flavor == QED_LL2_RROCE) {
1780 roce_flavor = CORE_RROCE;
1781 } else {
1782 rc = -EINVAL;
1783 goto out;
1784 }
1785
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001786 /* Prepare packet and BD, and perhaps send a doorbell to FW */
1787 qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp,
1788 num_of_bds, first_frag,
1789 first_frag_len, cookie, notify_fw);
1790 qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001791 num_of_bds, tx_dest,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001792 vlan, bd_flags, l4_hdr_offset_w,
Ram Amraniabd49672016-10-01 22:00:01 +03001793 roce_flavor,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001794 first_frag, first_frag_len);
1795
1796 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1797
1798out:
1799 spin_unlock_irqrestore(&p_tx->lock, flags);
1800 return rc;
1801}
1802
1803int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
1804 u8 connection_handle,
1805 dma_addr_t addr, u16 nbytes)
1806{
1807 struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
1808 struct qed_ll2_info *p_ll2_conn = NULL;
1809 u16 cur_send_frag_num = 0;
1810 struct core_tx_bd *p_bd;
1811 unsigned long flags;
1812
1813 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1814 if (!p_ll2_conn)
1815 return -EINVAL;
1816
1817 if (!p_ll2_conn->tx_queue.cur_send_packet)
1818 return -EINVAL;
1819
1820 p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
1821 cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
1822
1823 if (cur_send_frag_num >= p_cur_send_packet->bd_used)
1824 return -EINVAL;
1825
1826 /* Fill the BD information, and possibly notify FW */
1827 p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
1828 DMA_REGPAIR_LE(p_bd->addr, addr);
1829 p_bd->nbytes = cpu_to_le16(nbytes);
1830 p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
1831 p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
1832
1833 p_ll2_conn->tx_queue.cur_send_frag_num++;
1834
1835 spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
1836 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1837 spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
1838
1839 return 0;
1840}
1841
1842int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1843{
1844 struct qed_ll2_info *p_ll2_conn = NULL;
1845 int rc = -EINVAL;
Rahul Verma15582962017-04-06 15:58:29 +03001846 struct qed_ptt *p_ptt;
1847
1848 p_ptt = qed_ptt_acquire(p_hwfn);
1849 if (!p_ptt)
1850 return -EAGAIN;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001851
1852 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
Rahul Verma15582962017-04-06 15:58:29 +03001853 if (!p_ll2_conn) {
1854 rc = -EINVAL;
1855 goto out;
1856 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001857
1858 /* Stop Tx & Rx of connection, if needed */
1859 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1860 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
1861 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001862 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001863 qed_ll2_txq_flush(p_hwfn, connection_handle);
1864 }
1865
1866 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1867 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
1868 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001869 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001870 qed_ll2_rxq_flush(p_hwfn, connection_handle);
1871 }
1872
Arnd Bergmann0629a332017-01-18 15:52:52 +01001873 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001874 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1875
Arun Easi1e128c82017-02-15 06:28:22 -08001876 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
Rahul Verma15582962017-04-06 15:58:29 +03001877 qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
Arun Easi1e128c82017-02-15 06:28:22 -08001878 0x8906, 0,
1879 QED_LLH_FILTER_ETHERTYPE);
Rahul Verma15582962017-04-06 15:58:29 +03001880 qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
Arun Easi1e128c82017-02-15 06:28:22 -08001881 0x8914, 0,
1882 QED_LLH_FILTER_ETHERTYPE);
1883 }
1884
Rahul Verma15582962017-04-06 15:58:29 +03001885out:
1886 qed_ptt_release(p_hwfn, p_ptt);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001887 return rc;
1888}
1889
1890void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1891{
1892 struct qed_ll2_info *p_ll2_conn = NULL;
1893
1894 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1895 if (!p_ll2_conn)
1896 return;
1897
1898 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1899 p_ll2_conn->rx_queue.b_cb_registred = false;
1900 qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
1901 }
1902
1903 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1904 p_ll2_conn->tx_queue.b_cb_registred = false;
1905 qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
1906 }
1907
1908 kfree(p_ll2_conn->tx_queue.descq_array);
1909 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
1910
1911 kfree(p_ll2_conn->rx_queue.descq_array);
1912 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
1913 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
1914
1915 qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
1916
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001917 qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
1918
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001919 mutex_lock(&p_ll2_conn->mutex);
1920 p_ll2_conn->b_active = false;
1921 mutex_unlock(&p_ll2_conn->mutex);
1922}
1923
1924struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn)
1925{
1926 struct qed_ll2_info *p_ll2_connections;
1927 u8 i;
1928
1929 /* Allocate LL2's set struct */
1930 p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
1931 sizeof(struct qed_ll2_info), GFP_KERNEL);
1932 if (!p_ll2_connections) {
1933 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
1934 return NULL;
1935 }
1936
1937 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1938 p_ll2_connections[i].my_id = i;
1939
1940 return p_ll2_connections;
1941}
1942
1943void qed_ll2_setup(struct qed_hwfn *p_hwfn,
1944 struct qed_ll2_info *p_ll2_connections)
1945{
1946 int i;
1947
1948 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1949 mutex_init(&p_ll2_connections[i].mutex);
1950}
1951
1952void qed_ll2_free(struct qed_hwfn *p_hwfn,
1953 struct qed_ll2_info *p_ll2_connections)
1954{
1955 kfree(p_ll2_connections);
1956}
1957
1958static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
1959 struct qed_ptt *p_ptt,
1960 struct qed_ll2_info *p_ll2_conn,
1961 struct qed_ll2_stats *p_stats)
1962{
1963 struct core_ll2_tstorm_per_queue_stat tstats;
1964 u8 qid = p_ll2_conn->queue_id;
1965 u32 tstats_addr;
1966
1967 memset(&tstats, 0, sizeof(tstats));
1968 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1969 CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
1970 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
1971
1972 p_stats->packet_too_big_discard =
1973 HILO_64_REGPAIR(tstats.packet_too_big_discard);
1974 p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
1975}
1976
1977static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
1978 struct qed_ptt *p_ptt,
1979 struct qed_ll2_info *p_ll2_conn,
1980 struct qed_ll2_stats *p_stats)
1981{
1982 struct core_ll2_ustorm_per_queue_stat ustats;
1983 u8 qid = p_ll2_conn->queue_id;
1984 u32 ustats_addr;
1985
1986 memset(&ustats, 0, sizeof(ustats));
1987 ustats_addr = BAR0_MAP_REG_USDM_RAM +
1988 CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
1989 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
1990
1991 p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1992 p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1993 p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1994 p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1995 p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1996 p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1997}
1998
1999static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
2000 struct qed_ptt *p_ptt,
2001 struct qed_ll2_info *p_ll2_conn,
2002 struct qed_ll2_stats *p_stats)
2003{
2004 struct core_ll2_pstorm_per_queue_stat pstats;
2005 u8 stats_id = p_ll2_conn->tx_stats_id;
2006 u32 pstats_addr;
2007
2008 memset(&pstats, 0, sizeof(pstats));
2009 pstats_addr = BAR0_MAP_REG_PSDM_RAM +
2010 CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
2011 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
2012
2013 p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
2014 p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
2015 p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
2016 p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
2017 p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
2018 p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
2019}
2020
2021int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
2022 u8 connection_handle, struct qed_ll2_stats *p_stats)
2023{
2024 struct qed_ll2_info *p_ll2_conn = NULL;
2025 struct qed_ptt *p_ptt;
2026
2027 memset(p_stats, 0, sizeof(*p_stats));
2028
2029 if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
2030 !p_hwfn->p_ll2_info)
2031 return -EINVAL;
2032
2033 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
2034
2035 p_ptt = qed_ptt_acquire(p_hwfn);
2036 if (!p_ptt) {
2037 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
2038 return -EINVAL;
2039 }
2040
2041 _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2042 _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2043 if (p_ll2_conn->tx_stats_en)
2044 _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2045
2046 qed_ptt_release(p_hwfn, p_ptt);
2047 return 0;
2048}
2049
2050static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
2051 const struct qed_ll2_cb_ops *ops,
2052 void *cookie)
2053{
2054 cdev->ll2->cbs = ops;
2055 cdev->ll2->cb_cookie = cookie;
2056}
2057
2058static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
2059{
Arnd Bergmann0629a332017-01-18 15:52:52 +01002060 struct qed_ll2_conn ll2_info;
Wei Yongjun88a24282016-10-10 14:08:28 +00002061 struct qed_ll2_buffer *buffer, *tmp_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002062 enum qed_ll2_conn_type conn_type;
2063 struct qed_ptt *p_ptt;
2064 int rc, i;
Yuval Mintzfc831822016-12-01 00:21:06 -08002065 u8 gsi_enable = 1;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002066
2067 /* Initialize LL2 locks & lists */
2068 INIT_LIST_HEAD(&cdev->ll2->list);
2069 spin_lock_init(&cdev->ll2->lock);
2070 cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
2071 L1_CACHE_BYTES + params->mtu;
2072 cdev->ll2->frags_mapped = params->frags_mapped;
2073
2074 /*Allocate memory for LL2 */
2075 DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
2076 cdev->ll2->rx_size);
2077 for (i = 0; i < QED_LL2_RX_SIZE; i++) {
2078 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2079 if (!buffer) {
2080 DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
2081 goto fail;
2082 }
2083
2084 rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
2085 &buffer->phys_addr);
2086 if (rc) {
2087 kfree(buffer);
2088 goto fail;
2089 }
2090
2091 list_add_tail(&buffer->list, &cdev->ll2->list);
2092 }
2093
2094 switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
Arun Easi1e128c82017-02-15 06:28:22 -08002095 case QED_PCI_FCOE:
2096 conn_type = QED_LL2_TYPE_FCOE;
2097 gsi_enable = 0;
2098 break;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002099 case QED_PCI_ISCSI:
2100 conn_type = QED_LL2_TYPE_ISCSI;
Yuval Mintzfc831822016-12-01 00:21:06 -08002101 gsi_enable = 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002102 break;
2103 case QED_PCI_ETH_ROCE:
2104 conn_type = QED_LL2_TYPE_ROCE;
2105 break;
2106 default:
2107 conn_type = QED_LL2_TYPE_TEST;
2108 }
2109
2110 /* Prepare the temporary ll2 information */
2111 memset(&ll2_info, 0, sizeof(ll2_info));
Arnd Bergmann0629a332017-01-18 15:52:52 +01002112
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002113 ll2_info.conn_type = conn_type;
2114 ll2_info.mtu = params->mtu;
2115 ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
2116 ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
2117 ll2_info.tx_tc = 0;
2118 ll2_info.tx_dest = CORE_TX_DEST_NW;
Yuval Mintzfc831822016-12-01 00:21:06 -08002119 ll2_info.gsi_enable = gsi_enable;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002120
2121 rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info,
2122 QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
2123 &cdev->ll2->handle);
2124 if (rc) {
2125 DP_INFO(cdev, "Failed to acquire LL2 connection\n");
2126 goto fail;
2127 }
2128
2129 rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
2130 cdev->ll2->handle);
2131 if (rc) {
2132 DP_INFO(cdev, "Failed to establish LL2 connection\n");
2133 goto release_fail;
2134 }
2135
2136 /* Post all Rx buffers to FW */
2137 spin_lock_bh(&cdev->ll2->lock);
Wei Yongjun88a24282016-10-10 14:08:28 +00002138 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002139 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
2140 cdev->ll2->handle,
2141 buffer->phys_addr, 0, buffer, 1);
2142 if (rc) {
2143 DP_INFO(cdev,
2144 "Failed to post an Rx buffer; Deleting it\n");
2145 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
2146 cdev->ll2->rx_size, DMA_FROM_DEVICE);
2147 kfree(buffer->data);
2148 list_del(&buffer->list);
2149 kfree(buffer);
2150 } else {
2151 cdev->ll2->rx_cnt++;
2152 }
2153 }
2154 spin_unlock_bh(&cdev->ll2->lock);
2155
2156 if (!cdev->ll2->rx_cnt) {
2157 DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
2158 goto release_terminate;
2159 }
2160
2161 if (!is_valid_ether_addr(params->ll2_mac_address)) {
2162 DP_INFO(cdev, "Invalid Ethernet address\n");
2163 goto release_terminate;
2164 }
2165
Yuval Mintz1d6cff42016-12-01 00:21:07 -08002166 if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
2167 cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
2168 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
2169 rc = qed_ll2_start_ooo(cdev, params);
2170 if (rc) {
2171 DP_INFO(cdev,
2172 "Failed to initialize the OOO LL2 queue\n");
2173 goto release_terminate;
2174 }
2175 }
2176
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002177 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2178 if (!p_ptt) {
2179 DP_INFO(cdev, "Failed to acquire PTT\n");
2180 goto release_terminate;
2181 }
2182
2183 rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2184 params->ll2_mac_address);
2185 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2186 if (rc) {
2187 DP_ERR(cdev, "Failed to allocate LLH filter\n");
2188 goto release_terminate_all;
2189 }
2190
2191 ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002192 return 0;
2193
2194release_terminate_all:
2195
2196release_terminate:
2197 qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2198release_fail:
2199 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2200fail:
2201 qed_ll2_kill_buffers(cdev);
2202 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2203 return -EINVAL;
2204}
2205
2206static int qed_ll2_stop(struct qed_dev *cdev)
2207{
2208 struct qed_ptt *p_ptt;
2209 int rc;
2210
2211 if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
2212 return 0;
2213
2214 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2215 if (!p_ptt) {
2216 DP_INFO(cdev, "Failed to acquire PTT\n");
2217 goto fail;
2218 }
2219
2220 qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2221 cdev->ll2_mac_address);
2222 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2223 eth_zero_addr(cdev->ll2_mac_address);
2224
Yuval Mintz1d6cff42016-12-01 00:21:07 -08002225 if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
2226 cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
2227 qed_ll2_stop_ooo(cdev);
2228
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002229 rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
2230 cdev->ll2->handle);
2231 if (rc)
2232 DP_INFO(cdev, "Failed to terminate LL2 connection\n");
2233
2234 qed_ll2_kill_buffers(cdev);
2235
2236 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2237 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2238
2239 return rc;
2240fail:
2241 return -EINVAL;
2242}
2243
2244static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
2245{
2246 const skb_frag_t *frag;
2247 int rc = -EINVAL, i;
2248 dma_addr_t mapping;
2249 u16 vlan = 0;
2250 u8 flags = 0;
2251
2252 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
2253 DP_INFO(cdev, "Cannot transmit a checksumed packet\n");
2254 return -EINVAL;
2255 }
2256
2257 if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
2258 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
2259 1 + skb_shinfo(skb)->nr_frags);
2260 return -EINVAL;
2261 }
2262
2263 mapping = dma_map_single(&cdev->pdev->dev, skb->data,
2264 skb->len, DMA_TO_DEVICE);
2265 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2266 DP_NOTICE(cdev, "SKB mapping failed\n");
2267 return -EINVAL;
2268 }
2269
2270 /* Request HW to calculate IP csum */
2271 if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
2272 ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002273 flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002274
2275 if (skb_vlan_tag_present(skb)) {
2276 vlan = skb_vlan_tag_get(skb);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002277 flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002278 }
2279
2280 rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
2281 cdev->ll2->handle,
2282 1 + skb_shinfo(skb)->nr_frags,
Yuval Mintz1d6cff42016-12-01 00:21:07 -08002283 vlan, flags, 0, QED_LL2_TX_DEST_NW,
2284 0 /* RoCE FLAVOR */,
Ram Amraniabd49672016-10-01 22:00:01 +03002285 mapping, skb->len, skb, 1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002286 if (rc)
2287 goto err;
2288
2289 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2290 frag = &skb_shinfo(skb)->frags[i];
2291 if (!cdev->ll2->frags_mapped) {
2292 mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
2293 skb_frag_size(frag),
2294 DMA_TO_DEVICE);
2295
2296 if (unlikely(dma_mapping_error(&cdev->pdev->dev,
2297 mapping))) {
2298 DP_NOTICE(cdev,
2299 "Unable to map frag - dropping packet\n");
Pan Bian0ff18d22016-12-04 13:53:53 +08002300 rc = -ENOMEM;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002301 goto err;
2302 }
2303 } else {
2304 mapping = page_to_phys(skb_frag_page(frag)) |
2305 frag->page_offset;
2306 }
2307
2308 rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
2309 cdev->ll2->handle,
2310 mapping,
2311 skb_frag_size(frag));
2312
2313 /* if failed not much to do here, partial packet has been posted
2314 * we can't free memory, will need to wait for completion.
2315 */
2316 if (rc)
2317 goto err2;
2318 }
2319
2320 return 0;
2321
2322err:
2323 dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
2324
2325err2:
2326 return rc;
2327}
2328
2329static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
2330{
2331 if (!cdev->ll2)
2332 return -EINVAL;
2333
2334 return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
2335 cdev->ll2->handle, stats);
2336}
2337
2338const struct qed_ll2_ops qed_ll2_ops_pass = {
2339 .start = &qed_ll2_start,
2340 .stop = &qed_ll2_stop,
2341 .start_xmit = &qed_ll2_start_xmit,
2342 .register_cb_ops = &qed_ll2_register_cb_ops,
2343 .get_stats = &qed_ll2_stats,
2344};
2345
2346int qed_ll2_alloc_if(struct qed_dev *cdev)
2347{
2348 cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
2349 return cdev->ll2 ? 0 : -ENOMEM;
2350}
2351
2352void qed_ll2_dealloc_if(struct qed_dev *cdev)
2353{
2354 kfree(cdev->ll2);
2355 cdev->ll2 = NULL;
2356}