blob: 0b75f5dd25f8ec877255be44750fe1bbb4473fc5 [file] [log] [blame]
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001/* QLogic qed NIC Driver
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02002 * Copyright (c) 2015-2017 QLogic Corporation
Yuval Mintz0a7fb112016-10-01 21:59:55 +03003 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
Yuval Mintz0a7fb112016-10-01 21:59:55 +03009 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +020010 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Yuval Mintz0a7fb112016-10-01 21:59:55 +030031 */
32
33#include <linux/types.h>
34#include <asm/byteorder.h>
35#include <linux/dma-mapping.h>
36#include <linux/if_vlan.h>
37#include <linux/kernel.h>
38#include <linux/pci.h>
39#include <linux/slab.h>
40#include <linux/stddef.h>
Yuval Mintz0a7fb112016-10-01 21:59:55 +030041#include <linux/workqueue.h>
42#include <net/ipv6.h>
43#include <linux/bitops.h>
44#include <linux/delay.h>
45#include <linux/errno.h>
46#include <linux/etherdevice.h>
47#include <linux/io.h>
48#include <linux/list.h>
49#include <linux/mutex.h>
50#include <linux/spinlock.h>
51#include <linux/string.h>
52#include <linux/qed/qed_ll2_if.h>
53#include "qed.h"
54#include "qed_cxt.h"
55#include "qed_dev_api.h"
56#include "qed_hsi.h"
57#include "qed_hw.h"
58#include "qed_int.h"
59#include "qed_ll2.h"
60#include "qed_mcp.h"
Yuval Mintz1d6cff42016-12-01 00:21:07 -080061#include "qed_ooo.h"
Yuval Mintz0a7fb112016-10-01 21:59:55 +030062#include "qed_reg_addr.h"
63#include "qed_sp.h"
Yuval Mintz0189efb2016-10-13 22:57:02 +030064#include "qed_roce.h"
Yuval Mintz0a7fb112016-10-01 21:59:55 +030065
66#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
67#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
68
69#define QED_LL2_TX_SIZE (256)
70#define QED_LL2_RX_SIZE (4096)
71
72struct qed_cb_ll2_info {
73 int rx_cnt;
74 u32 rx_size;
75 u8 handle;
76 bool frags_mapped;
77
78 /* Lock protecting LL2 buffer lists in sleepless context */
79 spinlock_t lock;
80 struct list_head list;
81
82 const struct qed_ll2_cb_ops *cbs;
83 void *cb_cookie;
84};
85
86struct qed_ll2_buffer {
87 struct list_head list;
88 void *data;
89 dma_addr_t phys_addr;
90};
91
92static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn,
93 u8 connection_handle,
94 void *cookie,
95 dma_addr_t first_frag_addr,
96 bool b_last_fragment,
97 bool b_last_packet)
98{
99 struct qed_dev *cdev = p_hwfn->cdev;
100 struct sk_buff *skb = cookie;
101
102 /* All we need to do is release the mapping */
103 dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
104 skb_headlen(skb), DMA_TO_DEVICE);
105
106 if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
107 cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
108 b_last_fragment);
109
110 if (cdev->ll2->frags_mapped)
111 /* Case where mapped frags were received, need to
112 * free skb with nr_frags marked as 0
113 */
114 skb_shinfo(skb)->nr_frags = 0;
115
116 dev_kfree_skb_any(skb);
117}
118
119static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
120 u8 **data, dma_addr_t *phys_addr)
121{
122 *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
123 if (!(*data)) {
124 DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
125 return -ENOMEM;
126 }
127
128 *phys_addr = dma_map_single(&cdev->pdev->dev,
129 ((*data) + NET_SKB_PAD),
130 cdev->ll2->rx_size, DMA_FROM_DEVICE);
131 if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
132 DP_INFO(cdev, "Failed to map LL2 buffer data\n");
133 kfree((*data));
134 return -ENOMEM;
135 }
136
137 return 0;
138}
139
140static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
141 struct qed_ll2_buffer *buffer)
142{
143 spin_lock_bh(&cdev->ll2->lock);
144
145 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
146 cdev->ll2->rx_size, DMA_FROM_DEVICE);
147 kfree(buffer->data);
148 list_del(&buffer->list);
149
150 cdev->ll2->rx_cnt--;
151 if (!cdev->ll2->rx_cnt)
152 DP_INFO(cdev, "All LL2 entries were removed\n");
153
154 spin_unlock_bh(&cdev->ll2->lock);
155
156 return 0;
157}
158
159static void qed_ll2_kill_buffers(struct qed_dev *cdev)
160{
161 struct qed_ll2_buffer *buffer, *tmp_buffer;
162
163 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
164 qed_ll2_dealloc_buffer(cdev, buffer);
165}
166
Yuval Mintz8c93bea2016-10-13 22:57:03 +0300167static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
168 u8 connection_handle,
169 struct qed_ll2_rx_packet *p_pkt,
170 struct core_rx_fast_path_cqe *p_cqe,
171 bool b_last_packet)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300172{
173 u16 packet_length = le16_to_cpu(p_cqe->packet_length);
174 struct qed_ll2_buffer *buffer = p_pkt->cookie;
175 struct qed_dev *cdev = p_hwfn->cdev;
176 u16 vlan = le16_to_cpu(p_cqe->vlan);
177 u32 opaque_data_0, opaque_data_1;
178 u8 pad = p_cqe->placement_offset;
179 dma_addr_t new_phys_addr;
180 struct sk_buff *skb;
181 bool reuse = false;
182 int rc = -EINVAL;
183 u8 *new_data;
184
185 opaque_data_0 = le32_to_cpu(p_cqe->opaque_data.data[0]);
186 opaque_data_1 = le32_to_cpu(p_cqe->opaque_data.data[1]);
187
188 DP_VERBOSE(p_hwfn,
189 (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
190 "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
191 (u64)p_pkt->rx_buf_addr, pad, packet_length,
192 le16_to_cpu(p_cqe->parse_flags.flags), vlan,
193 opaque_data_0, opaque_data_1);
194
195 if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
196 print_hex_dump(KERN_INFO, "",
197 DUMP_PREFIX_OFFSET, 16, 1,
198 buffer->data, packet_length, false);
199 }
200
201 /* Determine if data is valid */
202 if (packet_length < ETH_HLEN)
203 reuse = true;
204
205 /* Allocate a replacement for buffer; Reuse upon failure */
206 if (!reuse)
207 rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
208 &new_phys_addr);
209
210 /* If need to reuse or there's no replacement buffer, repost this */
211 if (rc)
212 goto out_post;
Mintz, Yuval752ecb22017-03-14 15:26:00 +0200213 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
214 cdev->ll2->rx_size, DMA_FROM_DEVICE);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300215
216 skb = build_skb(buffer->data, 0);
217 if (!skb) {
218 rc = -ENOMEM;
219 goto out_post;
220 }
221
222 pad += NET_SKB_PAD;
223 skb_reserve(skb, pad);
224 skb_put(skb, packet_length);
225 skb_checksum_none_assert(skb);
226
227 /* Get parital ethernet information instead of eth_type_trans(),
228 * Since we don't have an associated net_device.
229 */
230 skb_reset_mac_header(skb);
231 skb->protocol = eth_hdr(skb)->h_proto;
232
233 /* Pass SKB onward */
234 if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
235 if (vlan)
236 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
237 cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
238 opaque_data_0, opaque_data_1);
239 }
240
241 /* Update Buffer information and update FW producer */
242 buffer->data = new_data;
243 buffer->phys_addr = new_phys_addr;
244
245out_post:
246 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
247 buffer->phys_addr, 0, buffer, 1);
248
249 if (rc)
250 qed_ll2_dealloc_buffer(cdev, buffer);
251}
252
253static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
254 u8 connection_handle,
255 bool b_lock,
256 bool b_only_active)
257{
258 struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
259
260 if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
261 return NULL;
262
263 if (!p_hwfn->p_ll2_info)
264 return NULL;
265
266 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
267
268 if (b_only_active) {
269 if (b_lock)
270 mutex_lock(&p_ll2_conn->mutex);
271 if (p_ll2_conn->b_active)
272 p_ret = p_ll2_conn;
273 if (b_lock)
274 mutex_unlock(&p_ll2_conn->mutex);
275 } else {
276 p_ret = p_ll2_conn;
277 }
278
279 return p_ret;
280}
281
282static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
283 u8 connection_handle)
284{
285 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
286}
287
288static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
289 u8 connection_handle)
290{
291 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
292}
293
294static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
295 *p_hwfn,
296 u8 connection_handle)
297{
298 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
299}
300
301static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
302{
303 bool b_last_packet = false, b_last_frag = false;
304 struct qed_ll2_tx_packet *p_pkt = NULL;
305 struct qed_ll2_info *p_ll2_conn;
306 struct qed_ll2_tx_queue *p_tx;
Ram Amraniabd49672016-10-01 22:00:01 +0300307 dma_addr_t tx_frag;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300308
309 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
310 if (!p_ll2_conn)
311 return;
312
313 p_tx = &p_ll2_conn->tx_queue;
314
315 while (!list_empty(&p_tx->active_descq)) {
316 p_pkt = list_first_entry(&p_tx->active_descq,
317 struct qed_ll2_tx_packet, list_entry);
318 if (!p_pkt)
319 break;
320
321 list_del(&p_pkt->list_entry);
322 b_last_packet = list_empty(&p_tx->active_descq);
323 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
Arnd Bergmann0629a332017-01-18 15:52:52 +0100324 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800325 struct qed_ooo_buffer *p_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300326
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800327 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
328 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
329 p_buffer);
330 } else {
331 p_tx->cur_completing_packet = *p_pkt;
332 p_tx->cur_completing_bd_idx = 1;
333 b_last_frag =
334 p_tx->cur_completing_bd_idx == p_pkt->bd_used;
335 tx_frag = p_pkt->bds_set[0].tx_frag;
Arnd Bergmann0629a332017-01-18 15:52:52 +0100336 if (p_ll2_conn->conn.gsi_enable)
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800337 qed_ll2b_release_tx_gsi_packet(p_hwfn,
338 p_ll2_conn->
339 my_id,
340 p_pkt->cookie,
341 tx_frag,
342 b_last_frag,
343 b_last_packet);
344 else
345 qed_ll2b_complete_tx_packet(p_hwfn,
346 p_ll2_conn->my_id,
347 p_pkt->cookie,
348 tx_frag,
349 b_last_frag,
350 b_last_packet);
351 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300352 }
353}
354
355static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
356{
357 struct qed_ll2_info *p_ll2_conn = p_cookie;
358 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
359 u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
360 struct qed_ll2_tx_packet *p_pkt;
361 bool b_last_frag = false;
362 unsigned long flags;
Ram Amraniabd49672016-10-01 22:00:01 +0300363 dma_addr_t tx_frag;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300364 int rc = -EINVAL;
365
366 spin_lock_irqsave(&p_tx->lock, flags);
367 if (p_tx->b_completing_packet) {
368 rc = -EBUSY;
369 goto out;
370 }
371
372 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
373 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
374 while (num_bds) {
375 if (list_empty(&p_tx->active_descq))
376 goto out;
377
378 p_pkt = list_first_entry(&p_tx->active_descq,
379 struct qed_ll2_tx_packet, list_entry);
380 if (!p_pkt)
381 goto out;
382
383 p_tx->b_completing_packet = true;
384 p_tx->cur_completing_packet = *p_pkt;
385 num_bds_in_packet = p_pkt->bd_used;
386 list_del(&p_pkt->list_entry);
387
388 if (num_bds < num_bds_in_packet) {
389 DP_NOTICE(p_hwfn,
390 "Rest of BDs does not cover whole packet\n");
391 goto out;
392 }
393
394 num_bds -= num_bds_in_packet;
395 p_tx->bds_idx += num_bds_in_packet;
396 while (num_bds_in_packet--)
397 qed_chain_consume(&p_tx->txq_chain);
398
399 p_tx->cur_completing_bd_idx = 1;
400 b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
401 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
402
403 spin_unlock_irqrestore(&p_tx->lock, flags);
Ram Amraniabd49672016-10-01 22:00:01 +0300404 tx_frag = p_pkt->bds_set[0].tx_frag;
Arnd Bergmann0629a332017-01-18 15:52:52 +0100405 if (p_ll2_conn->conn.gsi_enable)
Ram Amraniabd49672016-10-01 22:00:01 +0300406 qed_ll2b_complete_tx_gsi_packet(p_hwfn,
407 p_ll2_conn->my_id,
408 p_pkt->cookie,
409 tx_frag,
410 b_last_frag, !num_bds);
411 else
412 qed_ll2b_complete_tx_packet(p_hwfn,
413 p_ll2_conn->my_id,
414 p_pkt->cookie,
415 tx_frag,
416 b_last_frag, !num_bds);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300417 spin_lock_irqsave(&p_tx->lock, flags);
418 }
419
420 p_tx->b_completing_packet = false;
421 rc = 0;
422out:
423 spin_unlock_irqrestore(&p_tx->lock, flags);
424 return rc;
425}
426
Ram Amraniabd49672016-10-01 22:00:01 +0300427static int
428qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
429 struct qed_ll2_info *p_ll2_info,
430 union core_rx_cqe_union *p_cqe,
431 unsigned long lock_flags, bool b_last_cqe)
432{
433 struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue;
434 struct qed_ll2_rx_packet *p_pkt = NULL;
435 u16 packet_length, parse_flags, vlan;
436 u32 src_mac_addrhi;
437 u16 src_mac_addrlo;
438
439 if (!list_empty(&p_rx->active_descq))
440 p_pkt = list_first_entry(&p_rx->active_descq,
441 struct qed_ll2_rx_packet, list_entry);
442 if (!p_pkt) {
443 DP_NOTICE(p_hwfn,
444 "GSI Rx completion but active_descq is empty\n");
445 return -EIO;
446 }
447
448 list_del(&p_pkt->list_entry);
449 parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
450 packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
451 vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
452 src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
453 src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
454 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
455 DP_NOTICE(p_hwfn,
456 "Mismatch between active_descq and the LL2 Rx chain\n");
457 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
458
459 spin_unlock_irqrestore(&p_rx->lock, lock_flags);
460 qed_ll2b_complete_rx_gsi_packet(p_hwfn,
461 p_ll2_info->my_id,
462 p_pkt->cookie,
463 p_pkt->rx_buf_addr,
464 packet_length,
465 p_cqe->rx_cqe_gsi.data_length_error,
466 parse_flags,
467 vlan,
468 src_mac_addrhi,
469 src_mac_addrlo, b_last_cqe);
470 spin_lock_irqsave(&p_rx->lock, lock_flags);
471
472 return 0;
473}
474
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300475static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
476 struct qed_ll2_info *p_ll2_conn,
477 union core_rx_cqe_union *p_cqe,
Ram Amrani1df2ade2017-03-14 15:26:02 +0200478 unsigned long *p_lock_flags,
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300479 bool b_last_cqe)
480{
481 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
482 struct qed_ll2_rx_packet *p_pkt = NULL;
483
484 if (!list_empty(&p_rx->active_descq))
485 p_pkt = list_first_entry(&p_rx->active_descq,
486 struct qed_ll2_rx_packet, list_entry);
487 if (!p_pkt) {
488 DP_NOTICE(p_hwfn,
489 "LL2 Rx completion but active_descq is empty\n");
490 return -EIO;
491 }
492 list_del(&p_pkt->list_entry);
493
494 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
495 DP_NOTICE(p_hwfn,
496 "Mismatch between active_descq and the LL2 Rx chain\n");
497 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
498
Ram Amrani1df2ade2017-03-14 15:26:02 +0200499 spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300500 qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
501 p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
Ram Amrani1df2ade2017-03-14 15:26:02 +0200502 spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300503
504 return 0;
505}
506
507static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
508{
509 struct qed_ll2_info *p_ll2_conn = cookie;
510 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
511 union core_rx_cqe_union *cqe = NULL;
512 u16 cq_new_idx = 0, cq_old_idx = 0;
513 unsigned long flags = 0;
514 int rc = 0;
515
516 spin_lock_irqsave(&p_rx->lock, flags);
517 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
518 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
519
520 while (cq_new_idx != cq_old_idx) {
521 bool b_last_cqe = (cq_new_idx == cq_old_idx);
522
523 cqe = qed_chain_consume(&p_rx->rcq_chain);
524 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
525
526 DP_VERBOSE(p_hwfn,
527 QED_MSG_LL2,
528 "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
529 cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
530
531 switch (cqe->rx_cqe_sp.type) {
532 case CORE_RX_CQE_TYPE_SLOW_PATH:
533 DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
534 rc = -EINVAL;
535 break;
Ram Amraniabd49672016-10-01 22:00:01 +0300536 case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
537 rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn,
538 cqe, flags, b_last_cqe);
539 break;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300540 case CORE_RX_CQE_TYPE_REGULAR:
541 rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
Ram Amrani1df2ade2017-03-14 15:26:02 +0200542 cqe, &flags,
543 b_last_cqe);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300544 break;
545 default:
546 rc = -EIO;
547 }
548 }
549
550 spin_unlock_irqrestore(&p_rx->lock, flags);
551 return rc;
552}
553
Yuval Mintz8c93bea2016-10-13 22:57:03 +0300554static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300555{
556 struct qed_ll2_info *p_ll2_conn = NULL;
557 struct qed_ll2_rx_packet *p_pkt = NULL;
558 struct qed_ll2_rx_queue *p_rx;
559
560 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
561 if (!p_ll2_conn)
562 return;
563
564 p_rx = &p_ll2_conn->rx_queue;
565
566 while (!list_empty(&p_rx->active_descq)) {
567 dma_addr_t rx_buf_addr;
568 void *cookie;
569 bool b_last;
570
571 p_pkt = list_first_entry(&p_rx->active_descq,
572 struct qed_ll2_rx_packet, list_entry);
573 if (!p_pkt)
574 break;
575
Wei Yongjunb4f0fd42016-10-17 15:17:51 +0000576 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300577
Arnd Bergmann0629a332017-01-18 15:52:52 +0100578 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800579 struct qed_ooo_buffer *p_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300580
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800581 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
582 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
583 p_buffer);
584 } else {
585 rx_buf_addr = p_pkt->rx_buf_addr;
586 cookie = p_pkt->cookie;
587
588 b_last = list_empty(&p_rx->active_descq);
589 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300590 }
591}
592
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800593#if IS_ENABLED(CONFIG_QED_ISCSI)
594static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
595{
596 u8 bd_flags = 0;
597
598 if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200599 SET_FIELD(bd_flags, CORE_TX_BD_DATA_VLAN_INSERTION, 1);
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800600
601 return bd_flags;
602}
603
604static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
605 struct qed_ll2_info *p_ll2_conn)
606{
607 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
608 u16 packet_length = 0, parse_flags = 0, vlan = 0;
609 struct qed_ll2_rx_packet *p_pkt = NULL;
610 u32 num_ooo_add_to_peninsula = 0, cid;
611 union core_rx_cqe_union *cqe = NULL;
612 u16 cq_new_idx = 0, cq_old_idx = 0;
613 struct qed_ooo_buffer *p_buffer;
614 struct ooo_opaque *iscsi_ooo;
615 u8 placement_offset = 0;
616 u8 cqe_type;
617
618 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
619 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
620 if (cq_new_idx == cq_old_idx)
621 return 0;
622
623 while (cq_new_idx != cq_old_idx) {
624 struct core_rx_fast_path_cqe *p_cqe_fp;
625
626 cqe = qed_chain_consume(&p_rx->rcq_chain);
627 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
628 cqe_type = cqe->rx_cqe_sp.type;
629
630 if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
631 DP_NOTICE(p_hwfn,
632 "Got a non-regular LB LL2 completion [type 0x%02x]\n",
633 cqe_type);
634 return -EINVAL;
635 }
636 p_cqe_fp = &cqe->rx_cqe_fp;
637
638 placement_offset = p_cqe_fp->placement_offset;
639 parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
640 packet_length = le16_to_cpu(p_cqe_fp->packet_length);
641 vlan = le16_to_cpu(p_cqe_fp->vlan);
642 iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
643 qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
644 iscsi_ooo);
645 cid = le32_to_cpu(iscsi_ooo->cid);
646
647 /* Process delete isle first */
648 if (iscsi_ooo->drop_size)
649 qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
650 iscsi_ooo->drop_isle,
651 iscsi_ooo->drop_size);
652
653 if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
654 continue;
655
656 /* Now process create/add/join isles */
657 if (list_empty(&p_rx->active_descq)) {
658 DP_NOTICE(p_hwfn,
659 "LL2 OOO RX chain has no submitted buffers\n"
660 );
661 return -EIO;
662 }
663
664 p_pkt = list_first_entry(&p_rx->active_descq,
665 struct qed_ll2_rx_packet, list_entry);
666
667 if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
668 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
669 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
670 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
671 (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
672 if (!p_pkt) {
673 DP_NOTICE(p_hwfn,
674 "LL2 OOO RX packet is not valid\n");
675 return -EIO;
676 }
677 list_del(&p_pkt->list_entry);
678 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
679 p_buffer->packet_length = packet_length;
680 p_buffer->parse_flags = parse_flags;
681 p_buffer->vlan = vlan;
682 p_buffer->placement_offset = placement_offset;
683 qed_chain_consume(&p_rx->rxq_chain);
684 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
685
686 switch (iscsi_ooo->ooo_opcode) {
687 case TCP_EVENT_ADD_NEW_ISLE:
688 qed_ooo_add_new_isle(p_hwfn,
689 p_hwfn->p_ooo_info,
690 cid,
691 iscsi_ooo->ooo_isle,
692 p_buffer);
693 break;
694 case TCP_EVENT_ADD_ISLE_RIGHT:
695 qed_ooo_add_new_buffer(p_hwfn,
696 p_hwfn->p_ooo_info,
697 cid,
698 iscsi_ooo->ooo_isle,
699 p_buffer,
700 QED_OOO_RIGHT_BUF);
701 break;
702 case TCP_EVENT_ADD_ISLE_LEFT:
703 qed_ooo_add_new_buffer(p_hwfn,
704 p_hwfn->p_ooo_info,
705 cid,
706 iscsi_ooo->ooo_isle,
707 p_buffer,
708 QED_OOO_LEFT_BUF);
709 break;
710 case TCP_EVENT_JOIN:
711 qed_ooo_add_new_buffer(p_hwfn,
712 p_hwfn->p_ooo_info,
713 cid,
714 iscsi_ooo->ooo_isle +
715 1,
716 p_buffer,
717 QED_OOO_LEFT_BUF);
718 qed_ooo_join_isles(p_hwfn,
719 p_hwfn->p_ooo_info,
720 cid, iscsi_ooo->ooo_isle);
721 break;
722 case TCP_EVENT_ADD_PEN:
723 num_ooo_add_to_peninsula++;
724 qed_ooo_put_ready_buffer(p_hwfn,
725 p_hwfn->p_ooo_info,
726 p_buffer, true);
727 break;
728 }
729 } else {
730 DP_NOTICE(p_hwfn,
731 "Unexpected event (%d) TX OOO completion\n",
732 iscsi_ooo->ooo_opcode);
733 }
734 }
735
736 return 0;
737}
738
739static void
740qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
741 struct qed_ll2_info *p_ll2_conn)
742{
Mintz, Yuval7c7973b2017-06-09 17:13:18 +0300743 struct qed_ll2_tx_pkt_info tx_pkt;
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800744 struct qed_ooo_buffer *p_buffer;
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800745 u16 l4_hdr_offset_w;
746 dma_addr_t first_frag;
747 u16 parse_flags;
748 u8 bd_flags;
Mintz, Yuval7c7973b2017-06-09 17:13:18 +0300749 int rc;
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800750
751 /* Submit Tx buffers here */
752 while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
753 p_hwfn->p_ooo_info))) {
754 l4_hdr_offset_w = 0;
755 bd_flags = 0;
756
757 first_frag = p_buffer->rx_buffer_phys_addr +
758 p_buffer->placement_offset;
759 parse_flags = p_buffer->parse_flags;
760 bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200761 SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
762 SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800763
Mintz, Yuval7c7973b2017-06-09 17:13:18 +0300764 memset(&tx_pkt, 0, sizeof(tx_pkt));
765 tx_pkt.num_of_bds = 1;
766 tx_pkt.vlan = p_buffer->vlan;
767 tx_pkt.bd_flags = bd_flags;
768 tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w;
769 tx_pkt.tx_dest = p_ll2_conn->conn.tx_dest;
770 tx_pkt.first_frag = first_frag;
771 tx_pkt.first_frag_len = p_buffer->packet_length;
772 tx_pkt.cookie = p_buffer;
773
774 rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id,
775 &tx_pkt, true);
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800776 if (rc) {
777 qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
778 p_buffer, false);
779 break;
780 }
781 }
782}
783
784static void
785qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn,
786 struct qed_ll2_info *p_ll2_conn)
787{
788 struct qed_ooo_buffer *p_buffer;
789 int rc;
790
791 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
792 p_hwfn->p_ooo_info))) {
793 rc = qed_ll2_post_rx_buffer(p_hwfn,
794 p_ll2_conn->my_id,
795 p_buffer->rx_buffer_phys_addr,
796 0, p_buffer, true);
797 if (rc) {
798 qed_ooo_put_free_buffer(p_hwfn,
799 p_hwfn->p_ooo_info, p_buffer);
800 break;
801 }
802 }
803}
804
805static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
806{
807 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
808 int rc;
809
810 rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
811 if (rc)
812 return rc;
813
814 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
815 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
816
817 return 0;
818}
819
820static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
821{
822 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
823 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
824 struct qed_ll2_tx_packet *p_pkt = NULL;
825 struct qed_ooo_buffer *p_buffer;
826 bool b_dont_submit_rx = false;
827 u16 new_idx = 0, num_bds = 0;
828 int rc;
829
830 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
831 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
832
833 if (!num_bds)
834 return 0;
835
836 while (num_bds) {
837 if (list_empty(&p_tx->active_descq))
838 return -EINVAL;
839
840 p_pkt = list_first_entry(&p_tx->active_descq,
841 struct qed_ll2_tx_packet, list_entry);
842 if (!p_pkt)
843 return -EINVAL;
844
845 if (p_pkt->bd_used != 1) {
846 DP_NOTICE(p_hwfn,
847 "Unexpectedly many BDs(%d) in TX OOO completion\n",
848 p_pkt->bd_used);
849 return -EINVAL;
850 }
851
852 list_del(&p_pkt->list_entry);
853
854 num_bds--;
855 p_tx->bds_idx++;
856 qed_chain_consume(&p_tx->txq_chain);
857
858 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
859 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
860
861 if (b_dont_submit_rx) {
862 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
863 p_buffer);
864 continue;
865 }
866
867 rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
868 p_buffer->rx_buffer_phys_addr, 0,
869 p_buffer, true);
870 if (rc != 0) {
871 qed_ooo_put_free_buffer(p_hwfn,
872 p_hwfn->p_ooo_info, p_buffer);
873 b_dont_submit_rx = true;
874 }
875 }
876
877 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
878
879 return 0;
880}
881
882static int
883qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
884 struct qed_ll2_info *p_ll2_info,
885 u16 rx_num_ooo_buffers, u16 mtu)
886{
887 struct qed_ooo_buffer *p_buf = NULL;
888 void *p_virt;
889 u16 buf_idx;
890 int rc = 0;
891
Arnd Bergmann0629a332017-01-18 15:52:52 +0100892 if (p_ll2_info->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800893 return rc;
894
895 if (!rx_num_ooo_buffers)
896 return -EINVAL;
897
898 for (buf_idx = 0; buf_idx < rx_num_ooo_buffers; buf_idx++) {
899 p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
900 if (!p_buf) {
901 rc = -ENOMEM;
902 goto out;
903 }
904
905 p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
906 p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
907 ETH_CACHE_LINE_SIZE - 1) &
908 ~(ETH_CACHE_LINE_SIZE - 1);
909 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
910 p_buf->rx_buffer_size,
911 &p_buf->rx_buffer_phys_addr,
912 GFP_KERNEL);
913 if (!p_virt) {
914 kfree(p_buf);
915 rc = -ENOMEM;
916 goto out;
917 }
918
919 p_buf->rx_buffer_virt_addr = p_virt;
920 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
921 }
922
923 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
924 "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
925 rx_num_ooo_buffers, p_buf->rx_buffer_size);
926
927out:
928 return rc;
929}
930
931static void
932qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
933 struct qed_ll2_info *p_ll2_conn)
934{
Arnd Bergmann0629a332017-01-18 15:52:52 +0100935 if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800936 return;
937
938 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
939 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
940}
941
942static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
943 struct qed_ll2_info *p_ll2_conn)
944{
945 struct qed_ooo_buffer *p_buffer;
946
Arnd Bergmann0629a332017-01-18 15:52:52 +0100947 if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800948 return;
949
950 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
951 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
952 p_hwfn->p_ooo_info))) {
953 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
954 p_buffer->rx_buffer_size,
955 p_buffer->rx_buffer_virt_addr,
956 p_buffer->rx_buffer_phys_addr);
957 kfree(p_buffer);
958 }
959}
960
961static void qed_ll2_stop_ooo(struct qed_dev *cdev)
962{
963 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
964 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
965
966 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n",
967 *handle);
968
969 qed_ll2_terminate_connection(hwfn, *handle);
970 qed_ll2_release_connection(hwfn, *handle);
971 *handle = QED_LL2_UNUSED_HANDLE;
972}
973
974static int qed_ll2_start_ooo(struct qed_dev *cdev,
975 struct qed_ll2_params *params)
976{
977 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
978 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
robert.foss@collabora.com8aad6f12017-03-07 11:46:25 -0500979 struct qed_ll2_conn ll2_info = { 0 };
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800980 int rc;
981
Arnd Bergmann0629a332017-01-18 15:52:52 +0100982 ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
983 ll2_info.mtu = params->mtu;
984 ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
985 ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
986 ll2_info.tx_tc = OOO_LB_TC;
987 ll2_info.tx_dest = CORE_TX_DEST_LB;
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800988
Arnd Bergmann0629a332017-01-18 15:52:52 +0100989 rc = qed_ll2_acquire_connection(hwfn, &ll2_info,
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800990 QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
991 handle);
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800992 if (rc) {
993 DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
994 goto out;
995 }
996
997 rc = qed_ll2_establish_connection(hwfn, *handle);
998 if (rc) {
999 DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
1000 goto fail;
1001 }
1002
1003 return 0;
1004
1005fail:
1006 qed_ll2_release_connection(hwfn, *handle);
1007out:
1008 *handle = QED_LL2_UNUSED_HANDLE;
1009 return rc;
1010}
1011#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
1012static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn,
1013 void *p_cookie) { return -EINVAL; }
1014static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn,
1015 void *p_cookie) { return -EINVAL; }
1016static inline int
1017qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
1018 struct qed_ll2_info *p_ll2_info,
1019 u16 rx_num_ooo_buffers, u16 mtu) { return 0; }
1020static inline void
1021qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
1022 struct qed_ll2_info *p_ll2_conn) { return; }
1023static inline void
1024qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
1025 struct qed_ll2_info *p_ll2_conn) { return; }
1026static inline void qed_ll2_stop_ooo(struct qed_dev *cdev) { return; }
1027static inline int qed_ll2_start_ooo(struct qed_dev *cdev,
1028 struct qed_ll2_params *params)
1029 { return -EINVAL; }
1030#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
1031
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001032static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
1033 struct qed_ll2_info *p_ll2_conn,
1034 u8 action_on_error)
1035{
Arnd Bergmann0629a332017-01-18 15:52:52 +01001036 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001037 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
1038 struct core_rx_start_ramrod_data *p_ramrod = NULL;
1039 struct qed_spq_entry *p_ent = NULL;
1040 struct qed_sp_init_data init_data;
1041 u16 cqe_pbl_size;
1042 int rc = 0;
1043
1044 /* Get SPQ entry */
1045 memset(&init_data, 0, sizeof(init_data));
1046 init_data.cid = p_ll2_conn->cid;
1047 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1048 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1049
1050 rc = qed_sp_init_request(p_hwfn, &p_ent,
1051 CORE_RAMROD_RX_QUEUE_START,
1052 PROTOCOLID_CORE, &init_data);
1053 if (rc)
1054 return rc;
1055
1056 p_ramrod = &p_ent->ramrod.core_rx_queue_start;
1057
1058 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
1059 p_ramrod->sb_index = p_rx->rx_sb_index;
1060 p_ramrod->complete_event_flg = 1;
1061
Arnd Bergmann0629a332017-01-18 15:52:52 +01001062 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001063 DMA_REGPAIR_LE(p_ramrod->bd_base,
1064 p_rx->rxq_chain.p_phys_addr);
1065 cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
1066 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
1067 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
1068 qed_chain_get_pbl_phys(&p_rx->rcq_chain));
1069
Arnd Bergmann0629a332017-01-18 15:52:52 +01001070 p_ramrod->drop_ttl0_flg = p_ll2_conn->conn.rx_drop_ttl0_flg;
1071 p_ramrod->inner_vlan_removal_en = p_ll2_conn->conn.rx_vlan_removal_en;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001072 p_ramrod->queue_id = p_ll2_conn->queue_id;
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001073 p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
1074 : 1;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001075
1076 if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
1077 p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
1078 p_ramrod->mf_si_bcast_accept_all = 1;
1079 p_ramrod->mf_si_mcast_accept_all = 1;
1080 } else {
1081 p_ramrod->mf_si_bcast_accept_all = 0;
1082 p_ramrod->mf_si_mcast_accept_all = 0;
1083 }
1084
1085 p_ramrod->action_on_error.error_type = action_on_error;
Arnd Bergmann0629a332017-01-18 15:52:52 +01001086 p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001087 return qed_spq_post(p_hwfn, p_ent, NULL);
1088}
1089
1090static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
1091 struct qed_ll2_info *p_ll2_conn)
1092{
Arnd Bergmann0629a332017-01-18 15:52:52 +01001093 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001094 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1095 struct core_tx_start_ramrod_data *p_ramrod = NULL;
1096 struct qed_spq_entry *p_ent = NULL;
1097 struct qed_sp_init_data init_data;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001098 u16 pq_id = 0, pbl_size;
1099 int rc = -EINVAL;
1100
1101 if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
1102 return 0;
1103
Arnd Bergmann0629a332017-01-18 15:52:52 +01001104 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001105 p_ll2_conn->tx_stats_en = 0;
1106 else
1107 p_ll2_conn->tx_stats_en = 1;
1108
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001109 /* Get SPQ entry */
1110 memset(&init_data, 0, sizeof(init_data));
1111 init_data.cid = p_ll2_conn->cid;
1112 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1113 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1114
1115 rc = qed_sp_init_request(p_hwfn, &p_ent,
1116 CORE_RAMROD_TX_QUEUE_START,
1117 PROTOCOLID_CORE, &init_data);
1118 if (rc)
1119 return rc;
1120
1121 p_ramrod = &p_ent->ramrod.core_tx_queue_start;
1122
1123 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
1124 p_ramrod->sb_index = p_tx->tx_sb_index;
Arnd Bergmann0629a332017-01-18 15:52:52 +01001125 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001126 p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
1127 p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
1128
1129 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
1130 qed_chain_get_pbl_phys(&p_tx->txq_chain));
1131 pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
1132 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
1133
Ariel Eliorb5a9ee72017-04-03 12:21:09 +03001134 switch (p_ll2_conn->conn.tx_tc) {
1135 case LB_TC:
1136 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
1137 break;
1138 case OOO_LB_TC:
1139 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO);
Colin Ian King827d2402017-04-05 13:35:44 +01001140 break;
Ariel Eliorb5a9ee72017-04-03 12:21:09 +03001141 default:
1142 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
1143 break;
1144 }
1145
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001146 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
1147
1148 switch (conn_type) {
Arun Easi1e128c82017-02-15 06:28:22 -08001149 case QED_LL2_TYPE_FCOE:
1150 p_ramrod->conn_type = PROTOCOLID_FCOE;
1151 break;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001152 case QED_LL2_TYPE_ISCSI:
1153 case QED_LL2_TYPE_ISCSI_OOO:
1154 p_ramrod->conn_type = PROTOCOLID_ISCSI;
1155 break;
1156 case QED_LL2_TYPE_ROCE:
1157 p_ramrod->conn_type = PROTOCOLID_ROCE;
1158 break;
1159 default:
1160 p_ramrod->conn_type = PROTOCOLID_ETH;
1161 DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
1162 }
1163
Arnd Bergmann0629a332017-01-18 15:52:52 +01001164 p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001165 return qed_spq_post(p_hwfn, p_ent, NULL);
1166}
1167
1168static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
1169 struct qed_ll2_info *p_ll2_conn)
1170{
1171 struct core_rx_stop_ramrod_data *p_ramrod = NULL;
1172 struct qed_spq_entry *p_ent = NULL;
1173 struct qed_sp_init_data init_data;
1174 int rc = -EINVAL;
1175
1176 /* Get SPQ entry */
1177 memset(&init_data, 0, sizeof(init_data));
1178 init_data.cid = p_ll2_conn->cid;
1179 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1180 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1181
1182 rc = qed_sp_init_request(p_hwfn, &p_ent,
1183 CORE_RAMROD_RX_QUEUE_STOP,
1184 PROTOCOLID_CORE, &init_data);
1185 if (rc)
1186 return rc;
1187
1188 p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
1189
1190 p_ramrod->complete_event_flg = 1;
1191 p_ramrod->queue_id = p_ll2_conn->queue_id;
1192
1193 return qed_spq_post(p_hwfn, p_ent, NULL);
1194}
1195
1196static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
1197 struct qed_ll2_info *p_ll2_conn)
1198{
1199 struct qed_spq_entry *p_ent = NULL;
1200 struct qed_sp_init_data init_data;
1201 int rc = -EINVAL;
1202
1203 /* Get SPQ entry */
1204 memset(&init_data, 0, sizeof(init_data));
1205 init_data.cid = p_ll2_conn->cid;
1206 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1207 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1208
1209 rc = qed_sp_init_request(p_hwfn, &p_ent,
1210 CORE_RAMROD_TX_QUEUE_STOP,
1211 PROTOCOLID_CORE, &init_data);
1212 if (rc)
1213 return rc;
1214
1215 return qed_spq_post(p_hwfn, p_ent, NULL);
1216}
1217
1218static int
1219qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
1220 struct qed_ll2_info *p_ll2_info, u16 rx_num_desc)
1221{
1222 struct qed_ll2_rx_packet *p_descq;
1223 u32 capacity;
1224 int rc = 0;
1225
1226 if (!rx_num_desc)
1227 goto out;
1228
1229 rc = qed_chain_alloc(p_hwfn->cdev,
1230 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1231 QED_CHAIN_MODE_NEXT_PTR,
1232 QED_CHAIN_CNT_TYPE_U16,
1233 rx_num_desc,
1234 sizeof(struct core_rx_bd),
1235 &p_ll2_info->rx_queue.rxq_chain);
1236 if (rc) {
1237 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
1238 goto out;
1239 }
1240
1241 capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
1242 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
1243 GFP_KERNEL);
1244 if (!p_descq) {
1245 rc = -ENOMEM;
1246 DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
1247 goto out;
1248 }
1249 p_ll2_info->rx_queue.descq_array = p_descq;
1250
1251 rc = qed_chain_alloc(p_hwfn->cdev,
1252 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1253 QED_CHAIN_MODE_PBL,
1254 QED_CHAIN_CNT_TYPE_U16,
1255 rx_num_desc,
1256 sizeof(struct core_rx_fast_path_cqe),
1257 &p_ll2_info->rx_queue.rcq_chain);
1258 if (rc) {
1259 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
1260 goto out;
1261 }
1262
1263 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1264 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
Arnd Bergmann0629a332017-01-18 15:52:52 +01001265 p_ll2_info->conn.conn_type, rx_num_desc);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001266
1267out:
1268 return rc;
1269}
1270
1271static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
1272 struct qed_ll2_info *p_ll2_info,
1273 u16 tx_num_desc)
1274{
1275 struct qed_ll2_tx_packet *p_descq;
1276 u32 capacity;
1277 int rc = 0;
1278
1279 if (!tx_num_desc)
1280 goto out;
1281
1282 rc = qed_chain_alloc(p_hwfn->cdev,
1283 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1284 QED_CHAIN_MODE_PBL,
1285 QED_CHAIN_CNT_TYPE_U16,
1286 tx_num_desc,
1287 sizeof(struct core_tx_bd),
1288 &p_ll2_info->tx_queue.txq_chain);
1289 if (rc)
1290 goto out;
1291
1292 capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
1293 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet),
1294 GFP_KERNEL);
1295 if (!p_descq) {
1296 rc = -ENOMEM;
1297 goto out;
1298 }
1299 p_ll2_info->tx_queue.descq_array = p_descq;
1300
1301 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1302 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
Arnd Bergmann0629a332017-01-18 15:52:52 +01001303 p_ll2_info->conn.conn_type, tx_num_desc);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001304
1305out:
1306 if (rc)
1307 DP_NOTICE(p_hwfn,
1308 "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
1309 tx_num_desc);
1310 return rc;
1311}
1312
1313int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
Arnd Bergmann0629a332017-01-18 15:52:52 +01001314 struct qed_ll2_conn *p_params,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001315 u16 rx_num_desc,
1316 u16 tx_num_desc,
1317 u8 *p_connection_handle)
1318{
1319 qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
1320 struct qed_ll2_info *p_ll2_info = NULL;
1321 int rc;
1322 u8 i;
1323
1324 if (!p_connection_handle || !p_hwfn->p_ll2_info)
1325 return -EINVAL;
1326
1327 /* Find a free connection to be used */
1328 for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
1329 mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
1330 if (p_hwfn->p_ll2_info[i].b_active) {
1331 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1332 continue;
1333 }
1334
1335 p_hwfn->p_ll2_info[i].b_active = true;
1336 p_ll2_info = &p_hwfn->p_ll2_info[i];
1337 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1338 break;
1339 }
1340 if (!p_ll2_info)
1341 return -EBUSY;
1342
Arnd Bergmann0629a332017-01-18 15:52:52 +01001343 p_ll2_info->conn = *p_params;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001344
1345 rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
1346 if (rc)
1347 goto q_allocate_fail;
1348
1349 rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info, tx_num_desc);
1350 if (rc)
1351 goto q_allocate_fail;
1352
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001353 rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
1354 rx_num_desc * 2, p_params->mtu);
1355 if (rc)
1356 goto q_allocate_fail;
1357
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001358 /* Register callbacks for the Rx/Tx queues */
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001359 if (p_params->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
1360 comp_rx_cb = qed_ll2_lb_rxq_completion;
1361 comp_tx_cb = qed_ll2_lb_txq_completion;
1362 } else {
1363 comp_rx_cb = qed_ll2_rxq_completion;
1364 comp_tx_cb = qed_ll2_txq_completion;
1365 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001366
1367 if (rx_num_desc) {
1368 qed_int_register_cb(p_hwfn, comp_rx_cb,
1369 &p_hwfn->p_ll2_info[i],
1370 &p_ll2_info->rx_queue.rx_sb_index,
1371 &p_ll2_info->rx_queue.p_fw_cons);
1372 p_ll2_info->rx_queue.b_cb_registred = true;
1373 }
1374
1375 if (tx_num_desc) {
1376 qed_int_register_cb(p_hwfn,
1377 comp_tx_cb,
1378 &p_hwfn->p_ll2_info[i],
1379 &p_ll2_info->tx_queue.tx_sb_index,
1380 &p_ll2_info->tx_queue.p_fw_cons);
1381 p_ll2_info->tx_queue.b_cb_registred = true;
1382 }
1383
1384 *p_connection_handle = i;
1385 return rc;
1386
1387q_allocate_fail:
1388 qed_ll2_release_connection(p_hwfn, i);
1389 return -ENOMEM;
1390}
1391
1392static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
1393 struct qed_ll2_info *p_ll2_conn)
1394{
1395 u8 action_on_error = 0;
1396
1397 if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
1398 return 0;
1399
1400 DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
1401
1402 SET_FIELD(action_on_error,
1403 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
Arnd Bergmann0629a332017-01-18 15:52:52 +01001404 p_ll2_conn->conn.ai_err_packet_too_big);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001405 SET_FIELD(action_on_error,
Arnd Bergmann0629a332017-01-18 15:52:52 +01001406 CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->conn.ai_err_no_buf);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001407
1408 return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
1409}
1410
1411int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1412{
1413 struct qed_ll2_info *p_ll2_conn;
1414 struct qed_ll2_rx_queue *p_rx;
1415 struct qed_ll2_tx_queue *p_tx;
Rahul Verma15582962017-04-06 15:58:29 +03001416 struct qed_ptt *p_ptt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001417 int rc = -EINVAL;
1418 u32 i, capacity;
1419 u8 qid;
1420
Rahul Verma15582962017-04-06 15:58:29 +03001421 p_ptt = qed_ptt_acquire(p_hwfn);
1422 if (!p_ptt)
1423 return -EAGAIN;
1424
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001425 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
Rahul Verma15582962017-04-06 15:58:29 +03001426 if (!p_ll2_conn) {
1427 rc = -EINVAL;
1428 goto out;
1429 }
1430
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001431 p_rx = &p_ll2_conn->rx_queue;
1432 p_tx = &p_ll2_conn->tx_queue;
1433
1434 qed_chain_reset(&p_rx->rxq_chain);
1435 qed_chain_reset(&p_rx->rcq_chain);
1436 INIT_LIST_HEAD(&p_rx->active_descq);
1437 INIT_LIST_HEAD(&p_rx->free_descq);
1438 INIT_LIST_HEAD(&p_rx->posting_descq);
1439 spin_lock_init(&p_rx->lock);
1440 capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
1441 for (i = 0; i < capacity; i++)
1442 list_add_tail(&p_rx->descq_array[i].list_entry,
1443 &p_rx->free_descq);
1444 *p_rx->p_fw_cons = 0;
1445
1446 qed_chain_reset(&p_tx->txq_chain);
1447 INIT_LIST_HEAD(&p_tx->active_descq);
1448 INIT_LIST_HEAD(&p_tx->free_descq);
1449 INIT_LIST_HEAD(&p_tx->sending_descq);
1450 spin_lock_init(&p_tx->lock);
1451 capacity = qed_chain_get_capacity(&p_tx->txq_chain);
1452 for (i = 0; i < capacity; i++)
1453 list_add_tail(&p_tx->descq_array[i].list_entry,
1454 &p_tx->free_descq);
1455 p_tx->cur_completing_bd_idx = 0;
1456 p_tx->bds_idx = 0;
1457 p_tx->b_completing_packet = false;
1458 p_tx->cur_send_packet = NULL;
1459 p_tx->cur_send_frag_num = 0;
1460 p_tx->cur_completing_frag_num = 0;
1461 *p_tx->p_fw_cons = 0;
1462
Rahul Verma15582962017-04-06 15:58:29 +03001463 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
1464 if (rc)
1465 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001466
1467 qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
1468 p_ll2_conn->queue_id = qid;
1469 p_ll2_conn->tx_stats_id = qid;
1470 p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
1471 GTT_BAR0_MAP_REG_TSDM_RAM +
1472 TSTORM_LL2_RX_PRODS_OFFSET(qid);
1473 p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
1474 qed_db_addr(p_ll2_conn->cid,
1475 DQ_DEMS_LEGACY);
1476
1477 rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
1478 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001479 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001480
1481 rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
1482 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001483 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001484
1485 if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
Rahul Verma15582962017-04-06 15:58:29 +03001486 qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001487
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001488 qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
1489
Arun Easi1e128c82017-02-15 06:28:22 -08001490 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
Rahul Verma15582962017-04-06 15:58:29 +03001491 qed_llh_add_protocol_filter(p_hwfn, p_ptt,
Arun Easi1e128c82017-02-15 06:28:22 -08001492 0x8906, 0,
1493 QED_LLH_FILTER_ETHERTYPE);
Rahul Verma15582962017-04-06 15:58:29 +03001494 qed_llh_add_protocol_filter(p_hwfn, p_ptt,
Arun Easi1e128c82017-02-15 06:28:22 -08001495 0x8914, 0,
1496 QED_LLH_FILTER_ETHERTYPE);
1497 }
1498
Rahul Verma15582962017-04-06 15:58:29 +03001499out:
1500 qed_ptt_release(p_hwfn, p_ptt);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001501 return rc;
1502}
1503
1504static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
1505 struct qed_ll2_rx_queue *p_rx,
1506 struct qed_ll2_rx_packet *p_curp)
1507{
1508 struct qed_ll2_rx_packet *p_posting_packet = NULL;
1509 struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
1510 bool b_notify_fw = false;
1511 u16 bd_prod, cq_prod;
1512
1513 /* This handles the flushing of already posted buffers */
1514 while (!list_empty(&p_rx->posting_descq)) {
1515 p_posting_packet = list_first_entry(&p_rx->posting_descq,
1516 struct qed_ll2_rx_packet,
1517 list_entry);
Wei Yongjunb4f0fd42016-10-17 15:17:51 +00001518 list_move_tail(&p_posting_packet->list_entry,
1519 &p_rx->active_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001520 b_notify_fw = true;
1521 }
1522
1523 /* This handles the supplied packet [if there is one] */
1524 if (p_curp) {
1525 list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
1526 b_notify_fw = true;
1527 }
1528
1529 if (!b_notify_fw)
1530 return;
1531
1532 bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
1533 cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1534 rx_prod.bd_prod = cpu_to_le16(bd_prod);
1535 rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1536 DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1537}
1538
1539int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
1540 u8 connection_handle,
1541 dma_addr_t addr,
1542 u16 buf_len, void *cookie, u8 notify_fw)
1543{
1544 struct core_rx_bd_with_buff_len *p_curb = NULL;
1545 struct qed_ll2_rx_packet *p_curp = NULL;
1546 struct qed_ll2_info *p_ll2_conn;
1547 struct qed_ll2_rx_queue *p_rx;
1548 unsigned long flags;
1549 void *p_data;
1550 int rc = 0;
1551
1552 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1553 if (!p_ll2_conn)
1554 return -EINVAL;
1555 p_rx = &p_ll2_conn->rx_queue;
1556
1557 spin_lock_irqsave(&p_rx->lock, flags);
1558 if (!list_empty(&p_rx->free_descq))
1559 p_curp = list_first_entry(&p_rx->free_descq,
1560 struct qed_ll2_rx_packet, list_entry);
1561 if (p_curp) {
1562 if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
1563 qed_chain_get_elem_left(&p_rx->rcq_chain)) {
1564 p_data = qed_chain_produce(&p_rx->rxq_chain);
1565 p_curb = (struct core_rx_bd_with_buff_len *)p_data;
1566 qed_chain_produce(&p_rx->rcq_chain);
1567 }
1568 }
1569
1570 /* If we're lacking entires, let's try to flush buffers to FW */
1571 if (!p_curp || !p_curb) {
1572 rc = -EBUSY;
1573 p_curp = NULL;
1574 goto out_notify;
1575 }
1576
1577 /* We have an Rx packet we can fill */
1578 DMA_REGPAIR_LE(p_curb->addr, addr);
1579 p_curb->buff_length = cpu_to_le16(buf_len);
1580 p_curp->rx_buf_addr = addr;
1581 p_curp->cookie = cookie;
1582 p_curp->rxq_bd = p_curb;
1583 p_curp->buf_length = buf_len;
1584 list_del(&p_curp->list_entry);
1585
1586 /* Check if we only want to enqueue this packet without informing FW */
1587 if (!notify_fw) {
1588 list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
1589 goto out;
1590 }
1591
1592out_notify:
1593 qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
1594out:
1595 spin_unlock_irqrestore(&p_rx->lock, flags);
1596 return rc;
1597}
1598
1599static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
1600 struct qed_ll2_tx_queue *p_tx,
1601 struct qed_ll2_tx_packet *p_curp,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001602 struct qed_ll2_tx_pkt_info *pkt,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001603 u8 notify_fw)
1604{
1605 list_del(&p_curp->list_entry);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001606 p_curp->cookie = pkt->cookie;
1607 p_curp->bd_used = pkt->num_of_bds;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001608 p_curp->notify_fw = notify_fw;
1609 p_tx->cur_send_packet = p_curp;
1610 p_tx->cur_send_frag_num = 0;
1611
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001612 p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = pkt->first_frag;
1613 p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = pkt->first_frag_len;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001614 p_tx->cur_send_frag_num++;
1615}
1616
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001617static void
1618qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1619 struct qed_ll2_info *p_ll2,
1620 struct qed_ll2_tx_packet *p_curp,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001621 struct qed_ll2_tx_pkt_info *pkt)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001622{
1623 struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
1624 u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
1625 struct core_tx_bd *start_bd = NULL;
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001626 enum core_roce_flavor_type roce_flavor;
1627 enum core_tx_dest tx_dest;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001628 u16 bd_data = 0, frag_idx;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001629
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001630 roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE
1631 : CORE_RROCE;
1632
1633 tx_dest = (pkt->tx_dest == QED_LL2_TX_DEST_NW) ? CORE_TX_DEST_NW
1634 : CORE_TX_DEST_LB;
1635
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001636 start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001637 start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001638 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001639 cpu_to_le16(pkt->l4_hdr_offset_w));
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001640 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001641 bd_data |= pkt->bd_flags;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001642 SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001643 SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001644 SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
1645 start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001646 DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag);
1647 start_bd->nbytes = cpu_to_le16(pkt->first_frag_len);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001648
1649 DP_VERBOSE(p_hwfn,
1650 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1651 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1652 p_ll2->queue_id,
1653 p_ll2->cid,
Arnd Bergmann0629a332017-01-18 15:52:52 +01001654 p_ll2->conn.conn_type,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001655 prod_idx,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001656 pkt->first_frag_len,
1657 pkt->num_of_bds,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001658 le32_to_cpu(start_bd->addr.hi),
1659 le32_to_cpu(start_bd->addr.lo));
1660
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001661 if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001662 return;
1663
1664 /* Need to provide the packet with additional BDs for frags */
1665 for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001666 frag_idx < pkt->num_of_bds; frag_idx++) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001667 struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
1668
1669 *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02001670 (*p_bd)->bd_data.as_bitfield = 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001671 (*p_bd)->bitfield1 = 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001672 p_curp->bds_set[frag_idx].tx_frag = 0;
1673 p_curp->bds_set[frag_idx].frag_len = 0;
1674 }
1675}
1676
1677/* This should be called while the Txq spinlock is being held */
1678static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
1679 struct qed_ll2_info *p_ll2_conn)
1680{
1681 bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
1682 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1683 struct qed_ll2_tx_packet *p_pkt = NULL;
1684 struct core_db_data db_msg = { 0, 0, 0 };
1685 u16 bd_prod;
1686
1687 /* If there are missing BDs, don't do anything now */
1688 if (p_ll2_conn->tx_queue.cur_send_frag_num !=
1689 p_ll2_conn->tx_queue.cur_send_packet->bd_used)
1690 return;
1691
1692 /* Push the current packet to the list and clean after it */
1693 list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
1694 &p_ll2_conn->tx_queue.sending_descq);
1695 p_ll2_conn->tx_queue.cur_send_packet = NULL;
1696 p_ll2_conn->tx_queue.cur_send_frag_num = 0;
1697
1698 /* Notify FW of packet only if requested to */
1699 if (!b_notify)
1700 return;
1701
1702 bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
1703
1704 while (!list_empty(&p_tx->sending_descq)) {
1705 p_pkt = list_first_entry(&p_tx->sending_descq,
1706 struct qed_ll2_tx_packet, list_entry);
1707 if (!p_pkt)
1708 break;
1709
Wei Yongjunb4f0fd42016-10-17 15:17:51 +00001710 list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001711 }
1712
1713 SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
1714 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1715 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
1716 DQ_XCM_CORE_TX_BD_PROD_CMD);
1717 db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
1718 db_msg.spq_prod = cpu_to_le16(bd_prod);
1719
1720 /* Make sure the BDs data is updated before ringing the doorbell */
1721 wmb();
1722
1723 DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
1724
1725 DP_VERBOSE(p_hwfn,
1726 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1727 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1728 p_ll2_conn->queue_id,
Arnd Bergmann0629a332017-01-18 15:52:52 +01001729 p_ll2_conn->cid, p_ll2_conn->conn.conn_type, db_msg.spq_prod);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001730}
1731
1732int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
1733 u8 connection_handle,
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001734 struct qed_ll2_tx_pkt_info *pkt,
1735 bool notify_fw)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001736{
1737 struct qed_ll2_tx_packet *p_curp = NULL;
1738 struct qed_ll2_info *p_ll2_conn = NULL;
1739 struct qed_ll2_tx_queue *p_tx;
1740 struct qed_chain *p_tx_chain;
1741 unsigned long flags;
1742 int rc = 0;
1743
1744 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1745 if (!p_ll2_conn)
1746 return -EINVAL;
1747 p_tx = &p_ll2_conn->tx_queue;
1748 p_tx_chain = &p_tx->txq_chain;
1749
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001750 if (pkt->num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001751 return -EIO;
1752
1753 spin_lock_irqsave(&p_tx->lock, flags);
1754 if (p_tx->cur_send_packet) {
1755 rc = -EEXIST;
1756 goto out;
1757 }
1758
1759 /* Get entry, but only if we have tx elements for it */
1760 if (!list_empty(&p_tx->free_descq))
1761 p_curp = list_first_entry(&p_tx->free_descq,
1762 struct qed_ll2_tx_packet, list_entry);
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001763 if (p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001764 p_curp = NULL;
1765
1766 if (!p_curp) {
1767 rc = -EBUSY;
1768 goto out;
1769 }
1770
1771 /* Prepare packet and BD, and perhaps send a doorbell to FW */
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03001772 qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, pkt, notify_fw);
1773
1774 qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, pkt);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001775
1776 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1777
1778out:
1779 spin_unlock_irqrestore(&p_tx->lock, flags);
1780 return rc;
1781}
1782
1783int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
1784 u8 connection_handle,
1785 dma_addr_t addr, u16 nbytes)
1786{
1787 struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
1788 struct qed_ll2_info *p_ll2_conn = NULL;
1789 u16 cur_send_frag_num = 0;
1790 struct core_tx_bd *p_bd;
1791 unsigned long flags;
1792
1793 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1794 if (!p_ll2_conn)
1795 return -EINVAL;
1796
1797 if (!p_ll2_conn->tx_queue.cur_send_packet)
1798 return -EINVAL;
1799
1800 p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
1801 cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
1802
1803 if (cur_send_frag_num >= p_cur_send_packet->bd_used)
1804 return -EINVAL;
1805
1806 /* Fill the BD information, and possibly notify FW */
1807 p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
1808 DMA_REGPAIR_LE(p_bd->addr, addr);
1809 p_bd->nbytes = cpu_to_le16(nbytes);
1810 p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
1811 p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
1812
1813 p_ll2_conn->tx_queue.cur_send_frag_num++;
1814
1815 spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
1816 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1817 spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
1818
1819 return 0;
1820}
1821
1822int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1823{
1824 struct qed_ll2_info *p_ll2_conn = NULL;
1825 int rc = -EINVAL;
Rahul Verma15582962017-04-06 15:58:29 +03001826 struct qed_ptt *p_ptt;
1827
1828 p_ptt = qed_ptt_acquire(p_hwfn);
1829 if (!p_ptt)
1830 return -EAGAIN;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001831
1832 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
Rahul Verma15582962017-04-06 15:58:29 +03001833 if (!p_ll2_conn) {
1834 rc = -EINVAL;
1835 goto out;
1836 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001837
1838 /* Stop Tx & Rx of connection, if needed */
1839 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1840 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
1841 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001842 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001843 qed_ll2_txq_flush(p_hwfn, connection_handle);
1844 }
1845
1846 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1847 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
1848 if (rc)
Rahul Verma15582962017-04-06 15:58:29 +03001849 goto out;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001850 qed_ll2_rxq_flush(p_hwfn, connection_handle);
1851 }
1852
Arnd Bergmann0629a332017-01-18 15:52:52 +01001853 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001854 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1855
Arun Easi1e128c82017-02-15 06:28:22 -08001856 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
Rahul Verma15582962017-04-06 15:58:29 +03001857 qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
Arun Easi1e128c82017-02-15 06:28:22 -08001858 0x8906, 0,
1859 QED_LLH_FILTER_ETHERTYPE);
Rahul Verma15582962017-04-06 15:58:29 +03001860 qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
Arun Easi1e128c82017-02-15 06:28:22 -08001861 0x8914, 0,
1862 QED_LLH_FILTER_ETHERTYPE);
1863 }
1864
Rahul Verma15582962017-04-06 15:58:29 +03001865out:
1866 qed_ptt_release(p_hwfn, p_ptt);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001867 return rc;
1868}
1869
1870void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1871{
1872 struct qed_ll2_info *p_ll2_conn = NULL;
1873
1874 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1875 if (!p_ll2_conn)
1876 return;
1877
1878 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1879 p_ll2_conn->rx_queue.b_cb_registred = false;
1880 qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
1881 }
1882
1883 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1884 p_ll2_conn->tx_queue.b_cb_registred = false;
1885 qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
1886 }
1887
1888 kfree(p_ll2_conn->tx_queue.descq_array);
1889 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
1890
1891 kfree(p_ll2_conn->rx_queue.descq_array);
1892 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
1893 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
1894
1895 qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
1896
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001897 qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
1898
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001899 mutex_lock(&p_ll2_conn->mutex);
1900 p_ll2_conn->b_active = false;
1901 mutex_unlock(&p_ll2_conn->mutex);
1902}
1903
Tomer Tayar3587cb82017-05-21 12:10:56 +03001904int qed_ll2_alloc(struct qed_hwfn *p_hwfn)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001905{
1906 struct qed_ll2_info *p_ll2_connections;
1907 u8 i;
1908
1909 /* Allocate LL2's set struct */
1910 p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
1911 sizeof(struct qed_ll2_info), GFP_KERNEL);
1912 if (!p_ll2_connections) {
1913 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
Tomer Tayar3587cb82017-05-21 12:10:56 +03001914 return -ENOMEM;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001915 }
1916
1917 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1918 p_ll2_connections[i].my_id = i;
1919
Tomer Tayar3587cb82017-05-21 12:10:56 +03001920 p_hwfn->p_ll2_info = p_ll2_connections;
1921 return 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001922}
1923
Tomer Tayar3587cb82017-05-21 12:10:56 +03001924void qed_ll2_setup(struct qed_hwfn *p_hwfn)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001925{
1926 int i;
1927
1928 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
Tomer Tayar3587cb82017-05-21 12:10:56 +03001929 mutex_init(&p_hwfn->p_ll2_info[i].mutex);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001930}
1931
Tomer Tayar3587cb82017-05-21 12:10:56 +03001932void qed_ll2_free(struct qed_hwfn *p_hwfn)
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001933{
Tomer Tayar3587cb82017-05-21 12:10:56 +03001934 if (!p_hwfn->p_ll2_info)
1935 return;
1936
1937 kfree(p_hwfn->p_ll2_info);
1938 p_hwfn->p_ll2_info = NULL;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001939}
1940
1941static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
1942 struct qed_ptt *p_ptt,
1943 struct qed_ll2_info *p_ll2_conn,
1944 struct qed_ll2_stats *p_stats)
1945{
1946 struct core_ll2_tstorm_per_queue_stat tstats;
1947 u8 qid = p_ll2_conn->queue_id;
1948 u32 tstats_addr;
1949
1950 memset(&tstats, 0, sizeof(tstats));
1951 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1952 CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
1953 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
1954
1955 p_stats->packet_too_big_discard =
1956 HILO_64_REGPAIR(tstats.packet_too_big_discard);
1957 p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
1958}
1959
1960static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
1961 struct qed_ptt *p_ptt,
1962 struct qed_ll2_info *p_ll2_conn,
1963 struct qed_ll2_stats *p_stats)
1964{
1965 struct core_ll2_ustorm_per_queue_stat ustats;
1966 u8 qid = p_ll2_conn->queue_id;
1967 u32 ustats_addr;
1968
1969 memset(&ustats, 0, sizeof(ustats));
1970 ustats_addr = BAR0_MAP_REG_USDM_RAM +
1971 CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
1972 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
1973
1974 p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1975 p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1976 p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1977 p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1978 p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1979 p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1980}
1981
1982static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
1983 struct qed_ptt *p_ptt,
1984 struct qed_ll2_info *p_ll2_conn,
1985 struct qed_ll2_stats *p_stats)
1986{
1987 struct core_ll2_pstorm_per_queue_stat pstats;
1988 u8 stats_id = p_ll2_conn->tx_stats_id;
1989 u32 pstats_addr;
1990
1991 memset(&pstats, 0, sizeof(pstats));
1992 pstats_addr = BAR0_MAP_REG_PSDM_RAM +
1993 CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
1994 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
1995
1996 p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1997 p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1998 p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1999 p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
2000 p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
2001 p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
2002}
2003
2004int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
2005 u8 connection_handle, struct qed_ll2_stats *p_stats)
2006{
2007 struct qed_ll2_info *p_ll2_conn = NULL;
2008 struct qed_ptt *p_ptt;
2009
2010 memset(p_stats, 0, sizeof(*p_stats));
2011
2012 if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
2013 !p_hwfn->p_ll2_info)
2014 return -EINVAL;
2015
2016 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
2017
2018 p_ptt = qed_ptt_acquire(p_hwfn);
2019 if (!p_ptt) {
2020 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
2021 return -EINVAL;
2022 }
2023
2024 _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2025 _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2026 if (p_ll2_conn->tx_stats_en)
2027 _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2028
2029 qed_ptt_release(p_hwfn, p_ptt);
2030 return 0;
2031}
2032
2033static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
2034 const struct qed_ll2_cb_ops *ops,
2035 void *cookie)
2036{
2037 cdev->ll2->cbs = ops;
2038 cdev->ll2->cb_cookie = cookie;
2039}
2040
2041static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
2042{
Arnd Bergmann0629a332017-01-18 15:52:52 +01002043 struct qed_ll2_conn ll2_info;
Wei Yongjun88a24282016-10-10 14:08:28 +00002044 struct qed_ll2_buffer *buffer, *tmp_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002045 enum qed_ll2_conn_type conn_type;
2046 struct qed_ptt *p_ptt;
2047 int rc, i;
Yuval Mintzfc831822016-12-01 00:21:06 -08002048 u8 gsi_enable = 1;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002049
2050 /* Initialize LL2 locks & lists */
2051 INIT_LIST_HEAD(&cdev->ll2->list);
2052 spin_lock_init(&cdev->ll2->lock);
2053 cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
2054 L1_CACHE_BYTES + params->mtu;
2055 cdev->ll2->frags_mapped = params->frags_mapped;
2056
2057 /*Allocate memory for LL2 */
2058 DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
2059 cdev->ll2->rx_size);
2060 for (i = 0; i < QED_LL2_RX_SIZE; i++) {
2061 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2062 if (!buffer) {
2063 DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
2064 goto fail;
2065 }
2066
2067 rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
2068 &buffer->phys_addr);
2069 if (rc) {
2070 kfree(buffer);
2071 goto fail;
2072 }
2073
2074 list_add_tail(&buffer->list, &cdev->ll2->list);
2075 }
2076
2077 switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
Arun Easi1e128c82017-02-15 06:28:22 -08002078 case QED_PCI_FCOE:
2079 conn_type = QED_LL2_TYPE_FCOE;
2080 gsi_enable = 0;
2081 break;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002082 case QED_PCI_ISCSI:
2083 conn_type = QED_LL2_TYPE_ISCSI;
Yuval Mintzfc831822016-12-01 00:21:06 -08002084 gsi_enable = 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002085 break;
2086 case QED_PCI_ETH_ROCE:
2087 conn_type = QED_LL2_TYPE_ROCE;
2088 break;
2089 default:
2090 conn_type = QED_LL2_TYPE_TEST;
2091 }
2092
2093 /* Prepare the temporary ll2 information */
2094 memset(&ll2_info, 0, sizeof(ll2_info));
Arnd Bergmann0629a332017-01-18 15:52:52 +01002095
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002096 ll2_info.conn_type = conn_type;
2097 ll2_info.mtu = params->mtu;
2098 ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
2099 ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
2100 ll2_info.tx_tc = 0;
2101 ll2_info.tx_dest = CORE_TX_DEST_NW;
Yuval Mintzfc831822016-12-01 00:21:06 -08002102 ll2_info.gsi_enable = gsi_enable;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002103
2104 rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info,
2105 QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
2106 &cdev->ll2->handle);
2107 if (rc) {
2108 DP_INFO(cdev, "Failed to acquire LL2 connection\n");
2109 goto fail;
2110 }
2111
2112 rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
2113 cdev->ll2->handle);
2114 if (rc) {
2115 DP_INFO(cdev, "Failed to establish LL2 connection\n");
2116 goto release_fail;
2117 }
2118
2119 /* Post all Rx buffers to FW */
2120 spin_lock_bh(&cdev->ll2->lock);
Wei Yongjun88a24282016-10-10 14:08:28 +00002121 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002122 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
2123 cdev->ll2->handle,
2124 buffer->phys_addr, 0, buffer, 1);
2125 if (rc) {
2126 DP_INFO(cdev,
2127 "Failed to post an Rx buffer; Deleting it\n");
2128 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
2129 cdev->ll2->rx_size, DMA_FROM_DEVICE);
2130 kfree(buffer->data);
2131 list_del(&buffer->list);
2132 kfree(buffer);
2133 } else {
2134 cdev->ll2->rx_cnt++;
2135 }
2136 }
2137 spin_unlock_bh(&cdev->ll2->lock);
2138
2139 if (!cdev->ll2->rx_cnt) {
2140 DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
2141 goto release_terminate;
2142 }
2143
2144 if (!is_valid_ether_addr(params->ll2_mac_address)) {
2145 DP_INFO(cdev, "Invalid Ethernet address\n");
2146 goto release_terminate;
2147 }
2148
Yuval Mintz1d6cff42016-12-01 00:21:07 -08002149 if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
2150 cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
2151 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
2152 rc = qed_ll2_start_ooo(cdev, params);
2153 if (rc) {
2154 DP_INFO(cdev,
2155 "Failed to initialize the OOO LL2 queue\n");
2156 goto release_terminate;
2157 }
2158 }
2159
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002160 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2161 if (!p_ptt) {
2162 DP_INFO(cdev, "Failed to acquire PTT\n");
2163 goto release_terminate;
2164 }
2165
2166 rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2167 params->ll2_mac_address);
2168 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2169 if (rc) {
2170 DP_ERR(cdev, "Failed to allocate LLH filter\n");
2171 goto release_terminate_all;
2172 }
2173
2174 ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002175 return 0;
2176
2177release_terminate_all:
2178
2179release_terminate:
2180 qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2181release_fail:
2182 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2183fail:
2184 qed_ll2_kill_buffers(cdev);
2185 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2186 return -EINVAL;
2187}
2188
2189static int qed_ll2_stop(struct qed_dev *cdev)
2190{
2191 struct qed_ptt *p_ptt;
2192 int rc;
2193
2194 if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
2195 return 0;
2196
2197 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2198 if (!p_ptt) {
2199 DP_INFO(cdev, "Failed to acquire PTT\n");
2200 goto fail;
2201 }
2202
2203 qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2204 cdev->ll2_mac_address);
2205 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2206 eth_zero_addr(cdev->ll2_mac_address);
2207
Yuval Mintz1d6cff42016-12-01 00:21:07 -08002208 if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
2209 cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
2210 qed_ll2_stop_ooo(cdev);
2211
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002212 rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
2213 cdev->ll2->handle);
2214 if (rc)
2215 DP_INFO(cdev, "Failed to terminate LL2 connection\n");
2216
2217 qed_ll2_kill_buffers(cdev);
2218
2219 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2220 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2221
2222 return rc;
2223fail:
2224 return -EINVAL;
2225}
2226
2227static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
2228{
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03002229 struct qed_ll2_tx_pkt_info pkt;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002230 const skb_frag_t *frag;
2231 int rc = -EINVAL, i;
2232 dma_addr_t mapping;
2233 u16 vlan = 0;
2234 u8 flags = 0;
2235
2236 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
2237 DP_INFO(cdev, "Cannot transmit a checksumed packet\n");
2238 return -EINVAL;
2239 }
2240
2241 if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
2242 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
2243 1 + skb_shinfo(skb)->nr_frags);
2244 return -EINVAL;
2245 }
2246
2247 mapping = dma_map_single(&cdev->pdev->dev, skb->data,
2248 skb->len, DMA_TO_DEVICE);
2249 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2250 DP_NOTICE(cdev, "SKB mapping failed\n");
2251 return -EINVAL;
2252 }
2253
2254 /* Request HW to calculate IP csum */
2255 if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
2256 ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002257 flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002258
2259 if (skb_vlan_tag_present(skb)) {
2260 vlan = skb_vlan_tag_get(skb);
Mintz, Yuvalbe086e72017-03-11 18:39:18 +02002261 flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002262 }
2263
Mintz, Yuval7c7973b2017-06-09 17:13:18 +03002264 memset(&pkt, 0, sizeof(pkt));
2265 pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags;
2266 pkt.vlan = vlan;
2267 pkt.bd_flags = flags;
2268 pkt.tx_dest = QED_LL2_TX_DEST_NW;
2269 pkt.first_frag = mapping;
2270 pkt.first_frag_len = skb->len;
2271 pkt.cookie = skb;
2272
2273 rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
2274 &pkt, 1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002275 if (rc)
2276 goto err;
2277
2278 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2279 frag = &skb_shinfo(skb)->frags[i];
2280 if (!cdev->ll2->frags_mapped) {
2281 mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
2282 skb_frag_size(frag),
2283 DMA_TO_DEVICE);
2284
2285 if (unlikely(dma_mapping_error(&cdev->pdev->dev,
2286 mapping))) {
2287 DP_NOTICE(cdev,
2288 "Unable to map frag - dropping packet\n");
Pan Bian0ff18d22016-12-04 13:53:53 +08002289 rc = -ENOMEM;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002290 goto err;
2291 }
2292 } else {
2293 mapping = page_to_phys(skb_frag_page(frag)) |
2294 frag->page_offset;
2295 }
2296
2297 rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
2298 cdev->ll2->handle,
2299 mapping,
2300 skb_frag_size(frag));
2301
2302 /* if failed not much to do here, partial packet has been posted
2303 * we can't free memory, will need to wait for completion.
2304 */
2305 if (rc)
2306 goto err2;
2307 }
2308
2309 return 0;
2310
2311err:
2312 dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
2313
2314err2:
2315 return rc;
2316}
2317
2318static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
2319{
2320 if (!cdev->ll2)
2321 return -EINVAL;
2322
2323 return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
2324 cdev->ll2->handle, stats);
2325}
2326
2327const struct qed_ll2_ops qed_ll2_ops_pass = {
2328 .start = &qed_ll2_start,
2329 .stop = &qed_ll2_stop,
2330 .start_xmit = &qed_ll2_start_xmit,
2331 .register_cb_ops = &qed_ll2_register_cb_ops,
2332 .get_stats = &qed_ll2_stats,
2333};
2334
2335int qed_ll2_alloc_if(struct qed_dev *cdev)
2336{
2337 cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
2338 return cdev->ll2 ? 0 : -ENOMEM;
2339}
2340
2341void qed_ll2_dealloc_if(struct qed_dev *cdev)
2342{
2343 kfree(cdev->ll2);
2344 cdev->ll2 = NULL;
2345}