blob: 29ae5ec2ac2db6a5d9905c42817a505183e5c0c0 [file] [log] [blame]
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001/* QLogic qed NIC Driver
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02002 * Copyright (c) 2015-2017 QLogic Corporation
Yuval Mintz0a7fb112016-10-01 21:59:55 +03003 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
Yuval Mintz0a7fb112016-10-01 21:59:55 +03009 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +020010 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Yuval Mintz0a7fb112016-10-01 21:59:55 +030031 */
32
33#include <linux/types.h>
34#include <asm/byteorder.h>
35#include <linux/dma-mapping.h>
36#include <linux/if_vlan.h>
37#include <linux/kernel.h>
38#include <linux/pci.h>
39#include <linux/slab.h>
40#include <linux/stddef.h>
41#include <linux/version.h>
42#include <linux/workqueue.h>
43#include <net/ipv6.h>
44#include <linux/bitops.h>
45#include <linux/delay.h>
46#include <linux/errno.h>
47#include <linux/etherdevice.h>
48#include <linux/io.h>
49#include <linux/list.h>
50#include <linux/mutex.h>
51#include <linux/spinlock.h>
52#include <linux/string.h>
53#include <linux/qed/qed_ll2_if.h>
54#include "qed.h"
55#include "qed_cxt.h"
56#include "qed_dev_api.h"
57#include "qed_hsi.h"
58#include "qed_hw.h"
59#include "qed_int.h"
60#include "qed_ll2.h"
61#include "qed_mcp.h"
Yuval Mintz1d6cff42016-12-01 00:21:07 -080062#include "qed_ooo.h"
Yuval Mintz0a7fb112016-10-01 21:59:55 +030063#include "qed_reg_addr.h"
64#include "qed_sp.h"
Yuval Mintz0189efb2016-10-13 22:57:02 +030065#include "qed_roce.h"
Yuval Mintz0a7fb112016-10-01 21:59:55 +030066
67#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
68#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
69
70#define QED_LL2_TX_SIZE (256)
71#define QED_LL2_RX_SIZE (4096)
72
73struct qed_cb_ll2_info {
74 int rx_cnt;
75 u32 rx_size;
76 u8 handle;
77 bool frags_mapped;
78
79 /* Lock protecting LL2 buffer lists in sleepless context */
80 spinlock_t lock;
81 struct list_head list;
82
83 const struct qed_ll2_cb_ops *cbs;
84 void *cb_cookie;
85};
86
87struct qed_ll2_buffer {
88 struct list_head list;
89 void *data;
90 dma_addr_t phys_addr;
91};
92
93static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn,
94 u8 connection_handle,
95 void *cookie,
96 dma_addr_t first_frag_addr,
97 bool b_last_fragment,
98 bool b_last_packet)
99{
100 struct qed_dev *cdev = p_hwfn->cdev;
101 struct sk_buff *skb = cookie;
102
103 /* All we need to do is release the mapping */
104 dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
105 skb_headlen(skb), DMA_TO_DEVICE);
106
107 if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
108 cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
109 b_last_fragment);
110
111 if (cdev->ll2->frags_mapped)
112 /* Case where mapped frags were received, need to
113 * free skb with nr_frags marked as 0
114 */
115 skb_shinfo(skb)->nr_frags = 0;
116
117 dev_kfree_skb_any(skb);
118}
119
120static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
121 u8 **data, dma_addr_t *phys_addr)
122{
123 *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
124 if (!(*data)) {
125 DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
126 return -ENOMEM;
127 }
128
129 *phys_addr = dma_map_single(&cdev->pdev->dev,
130 ((*data) + NET_SKB_PAD),
131 cdev->ll2->rx_size, DMA_FROM_DEVICE);
132 if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
133 DP_INFO(cdev, "Failed to map LL2 buffer data\n");
134 kfree((*data));
135 return -ENOMEM;
136 }
137
138 return 0;
139}
140
141static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
142 struct qed_ll2_buffer *buffer)
143{
144 spin_lock_bh(&cdev->ll2->lock);
145
146 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
147 cdev->ll2->rx_size, DMA_FROM_DEVICE);
148 kfree(buffer->data);
149 list_del(&buffer->list);
150
151 cdev->ll2->rx_cnt--;
152 if (!cdev->ll2->rx_cnt)
153 DP_INFO(cdev, "All LL2 entries were removed\n");
154
155 spin_unlock_bh(&cdev->ll2->lock);
156
157 return 0;
158}
159
160static void qed_ll2_kill_buffers(struct qed_dev *cdev)
161{
162 struct qed_ll2_buffer *buffer, *tmp_buffer;
163
164 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
165 qed_ll2_dealloc_buffer(cdev, buffer);
166}
167
Yuval Mintz8c93bea2016-10-13 22:57:03 +0300168static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
169 u8 connection_handle,
170 struct qed_ll2_rx_packet *p_pkt,
171 struct core_rx_fast_path_cqe *p_cqe,
172 bool b_last_packet)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300173{
174 u16 packet_length = le16_to_cpu(p_cqe->packet_length);
175 struct qed_ll2_buffer *buffer = p_pkt->cookie;
176 struct qed_dev *cdev = p_hwfn->cdev;
177 u16 vlan = le16_to_cpu(p_cqe->vlan);
178 u32 opaque_data_0, opaque_data_1;
179 u8 pad = p_cqe->placement_offset;
180 dma_addr_t new_phys_addr;
181 struct sk_buff *skb;
182 bool reuse = false;
183 int rc = -EINVAL;
184 u8 *new_data;
185
186 opaque_data_0 = le32_to_cpu(p_cqe->opaque_data.data[0]);
187 opaque_data_1 = le32_to_cpu(p_cqe->opaque_data.data[1]);
188
189 DP_VERBOSE(p_hwfn,
190 (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
191 "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
192 (u64)p_pkt->rx_buf_addr, pad, packet_length,
193 le16_to_cpu(p_cqe->parse_flags.flags), vlan,
194 opaque_data_0, opaque_data_1);
195
196 if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
197 print_hex_dump(KERN_INFO, "",
198 DUMP_PREFIX_OFFSET, 16, 1,
199 buffer->data, packet_length, false);
200 }
201
202 /* Determine if data is valid */
203 if (packet_length < ETH_HLEN)
204 reuse = true;
205
206 /* Allocate a replacement for buffer; Reuse upon failure */
207 if (!reuse)
208 rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
209 &new_phys_addr);
210
211 /* If need to reuse or there's no replacement buffer, repost this */
212 if (rc)
213 goto out_post;
Mintz, Yuval752ecb22017-03-14 15:26:00 +0200214 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
215 cdev->ll2->rx_size, DMA_FROM_DEVICE);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300216
217 skb = build_skb(buffer->data, 0);
218 if (!skb) {
219 rc = -ENOMEM;
220 goto out_post;
221 }
222
223 pad += NET_SKB_PAD;
224 skb_reserve(skb, pad);
225 skb_put(skb, packet_length);
226 skb_checksum_none_assert(skb);
227
228 /* Get parital ethernet information instead of eth_type_trans(),
229 * Since we don't have an associated net_device.
230 */
231 skb_reset_mac_header(skb);
232 skb->protocol = eth_hdr(skb)->h_proto;
233
234 /* Pass SKB onward */
235 if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
236 if (vlan)
237 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
238 cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
239 opaque_data_0, opaque_data_1);
240 }
241
242 /* Update Buffer information and update FW producer */
243 buffer->data = new_data;
244 buffer->phys_addr = new_phys_addr;
245
246out_post:
247 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
248 buffer->phys_addr, 0, buffer, 1);
249
250 if (rc)
251 qed_ll2_dealloc_buffer(cdev, buffer);
252}
253
254static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
255 u8 connection_handle,
256 bool b_lock,
257 bool b_only_active)
258{
259 struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
260
261 if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
262 return NULL;
263
264 if (!p_hwfn->p_ll2_info)
265 return NULL;
266
267 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
268
269 if (b_only_active) {
270 if (b_lock)
271 mutex_lock(&p_ll2_conn->mutex);
272 if (p_ll2_conn->b_active)
273 p_ret = p_ll2_conn;
274 if (b_lock)
275 mutex_unlock(&p_ll2_conn->mutex);
276 } else {
277 p_ret = p_ll2_conn;
278 }
279
280 return p_ret;
281}
282
283static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
284 u8 connection_handle)
285{
286 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
287}
288
289static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
290 u8 connection_handle)
291{
292 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
293}
294
295static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
296 *p_hwfn,
297 u8 connection_handle)
298{
299 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
300}
301
302static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
303{
304 bool b_last_packet = false, b_last_frag = false;
305 struct qed_ll2_tx_packet *p_pkt = NULL;
306 struct qed_ll2_info *p_ll2_conn;
307 struct qed_ll2_tx_queue *p_tx;
Ram Amraniabd49672016-10-01 22:00:01 +0300308 dma_addr_t tx_frag;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300309
310 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
311 if (!p_ll2_conn)
312 return;
313
314 p_tx = &p_ll2_conn->tx_queue;
315
316 while (!list_empty(&p_tx->active_descq)) {
317 p_pkt = list_first_entry(&p_tx->active_descq,
318 struct qed_ll2_tx_packet, list_entry);
319 if (!p_pkt)
320 break;
321
322 list_del(&p_pkt->list_entry);
323 b_last_packet = list_empty(&p_tx->active_descq);
324 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
Arnd Bergmann0629a332017-01-18 15:52:52 +0100325 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800326 struct qed_ooo_buffer *p_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300327
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800328 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
329 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
330 p_buffer);
331 } else {
332 p_tx->cur_completing_packet = *p_pkt;
333 p_tx->cur_completing_bd_idx = 1;
334 b_last_frag =
335 p_tx->cur_completing_bd_idx == p_pkt->bd_used;
336 tx_frag = p_pkt->bds_set[0].tx_frag;
Arnd Bergmann0629a332017-01-18 15:52:52 +0100337 if (p_ll2_conn->conn.gsi_enable)
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800338 qed_ll2b_release_tx_gsi_packet(p_hwfn,
339 p_ll2_conn->
340 my_id,
341 p_pkt->cookie,
342 tx_frag,
343 b_last_frag,
344 b_last_packet);
345 else
346 qed_ll2b_complete_tx_packet(p_hwfn,
347 p_ll2_conn->my_id,
348 p_pkt->cookie,
349 tx_frag,
350 b_last_frag,
351 b_last_packet);
352 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300353 }
354}
355
356static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
357{
358 struct qed_ll2_info *p_ll2_conn = p_cookie;
359 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
360 u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
361 struct qed_ll2_tx_packet *p_pkt;
362 bool b_last_frag = false;
363 unsigned long flags;
Ram Amraniabd49672016-10-01 22:00:01 +0300364 dma_addr_t tx_frag;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300365 int rc = -EINVAL;
366
367 spin_lock_irqsave(&p_tx->lock, flags);
368 if (p_tx->b_completing_packet) {
369 rc = -EBUSY;
370 goto out;
371 }
372
373 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
374 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
375 while (num_bds) {
376 if (list_empty(&p_tx->active_descq))
377 goto out;
378
379 p_pkt = list_first_entry(&p_tx->active_descq,
380 struct qed_ll2_tx_packet, list_entry);
381 if (!p_pkt)
382 goto out;
383
384 p_tx->b_completing_packet = true;
385 p_tx->cur_completing_packet = *p_pkt;
386 num_bds_in_packet = p_pkt->bd_used;
387 list_del(&p_pkt->list_entry);
388
389 if (num_bds < num_bds_in_packet) {
390 DP_NOTICE(p_hwfn,
391 "Rest of BDs does not cover whole packet\n");
392 goto out;
393 }
394
395 num_bds -= num_bds_in_packet;
396 p_tx->bds_idx += num_bds_in_packet;
397 while (num_bds_in_packet--)
398 qed_chain_consume(&p_tx->txq_chain);
399
400 p_tx->cur_completing_bd_idx = 1;
401 b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
402 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
403
404 spin_unlock_irqrestore(&p_tx->lock, flags);
Ram Amraniabd49672016-10-01 22:00:01 +0300405 tx_frag = p_pkt->bds_set[0].tx_frag;
Arnd Bergmann0629a332017-01-18 15:52:52 +0100406 if (p_ll2_conn->conn.gsi_enable)
Ram Amraniabd49672016-10-01 22:00:01 +0300407 qed_ll2b_complete_tx_gsi_packet(p_hwfn,
408 p_ll2_conn->my_id,
409 p_pkt->cookie,
410 tx_frag,
411 b_last_frag, !num_bds);
412 else
413 qed_ll2b_complete_tx_packet(p_hwfn,
414 p_ll2_conn->my_id,
415 p_pkt->cookie,
416 tx_frag,
417 b_last_frag, !num_bds);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300418 spin_lock_irqsave(&p_tx->lock, flags);
419 }
420
421 p_tx->b_completing_packet = false;
422 rc = 0;
423out:
424 spin_unlock_irqrestore(&p_tx->lock, flags);
425 return rc;
426}
427
Ram Amraniabd49672016-10-01 22:00:01 +0300428static int
429qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
430 struct qed_ll2_info *p_ll2_info,
431 union core_rx_cqe_union *p_cqe,
432 unsigned long lock_flags, bool b_last_cqe)
433{
434 struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue;
435 struct qed_ll2_rx_packet *p_pkt = NULL;
436 u16 packet_length, parse_flags, vlan;
437 u32 src_mac_addrhi;
438 u16 src_mac_addrlo;
439
440 if (!list_empty(&p_rx->active_descq))
441 p_pkt = list_first_entry(&p_rx->active_descq,
442 struct qed_ll2_rx_packet, list_entry);
443 if (!p_pkt) {
444 DP_NOTICE(p_hwfn,
445 "GSI Rx completion but active_descq is empty\n");
446 return -EIO;
447 }
448
449 list_del(&p_pkt->list_entry);
450 parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
451 packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
452 vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
453 src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
454 src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
455 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
456 DP_NOTICE(p_hwfn,
457 "Mismatch between active_descq and the LL2 Rx chain\n");
458 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
459
460 spin_unlock_irqrestore(&p_rx->lock, lock_flags);
461 qed_ll2b_complete_rx_gsi_packet(p_hwfn,
462 p_ll2_info->my_id,
463 p_pkt->cookie,
464 p_pkt->rx_buf_addr,
465 packet_length,
466 p_cqe->rx_cqe_gsi.data_length_error,
467 parse_flags,
468 vlan,
469 src_mac_addrhi,
470 src_mac_addrlo, b_last_cqe);
471 spin_lock_irqsave(&p_rx->lock, lock_flags);
472
473 return 0;
474}
475
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300476static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
477 struct qed_ll2_info *p_ll2_conn,
478 union core_rx_cqe_union *p_cqe,
479 unsigned long lock_flags,
480 bool b_last_cqe)
481{
482 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
483 struct qed_ll2_rx_packet *p_pkt = NULL;
484
485 if (!list_empty(&p_rx->active_descq))
486 p_pkt = list_first_entry(&p_rx->active_descq,
487 struct qed_ll2_rx_packet, list_entry);
488 if (!p_pkt) {
489 DP_NOTICE(p_hwfn,
490 "LL2 Rx completion but active_descq is empty\n");
491 return -EIO;
492 }
493 list_del(&p_pkt->list_entry);
494
495 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
496 DP_NOTICE(p_hwfn,
497 "Mismatch between active_descq and the LL2 Rx chain\n");
498 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
499
500 spin_unlock_irqrestore(&p_rx->lock, lock_flags);
501 qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
502 p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
503 spin_lock_irqsave(&p_rx->lock, lock_flags);
504
505 return 0;
506}
507
508static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
509{
510 struct qed_ll2_info *p_ll2_conn = cookie;
511 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
512 union core_rx_cqe_union *cqe = NULL;
513 u16 cq_new_idx = 0, cq_old_idx = 0;
514 unsigned long flags = 0;
515 int rc = 0;
516
517 spin_lock_irqsave(&p_rx->lock, flags);
518 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
519 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
520
521 while (cq_new_idx != cq_old_idx) {
522 bool b_last_cqe = (cq_new_idx == cq_old_idx);
523
524 cqe = qed_chain_consume(&p_rx->rcq_chain);
525 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
526
527 DP_VERBOSE(p_hwfn,
528 QED_MSG_LL2,
529 "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
530 cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
531
532 switch (cqe->rx_cqe_sp.type) {
533 case CORE_RX_CQE_TYPE_SLOW_PATH:
534 DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
535 rc = -EINVAL;
536 break;
Ram Amraniabd49672016-10-01 22:00:01 +0300537 case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
538 rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn,
539 cqe, flags, b_last_cqe);
540 break;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300541 case CORE_RX_CQE_TYPE_REGULAR:
542 rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
543 cqe, flags, b_last_cqe);
544 break;
545 default:
546 rc = -EIO;
547 }
548 }
549
550 spin_unlock_irqrestore(&p_rx->lock, flags);
551 return rc;
552}
553
Yuval Mintz8c93bea2016-10-13 22:57:03 +0300554static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300555{
556 struct qed_ll2_info *p_ll2_conn = NULL;
557 struct qed_ll2_rx_packet *p_pkt = NULL;
558 struct qed_ll2_rx_queue *p_rx;
559
560 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
561 if (!p_ll2_conn)
562 return;
563
564 p_rx = &p_ll2_conn->rx_queue;
565
566 while (!list_empty(&p_rx->active_descq)) {
567 dma_addr_t rx_buf_addr;
568 void *cookie;
569 bool b_last;
570
571 p_pkt = list_first_entry(&p_rx->active_descq,
572 struct qed_ll2_rx_packet, list_entry);
573 if (!p_pkt)
574 break;
575
Wei Yongjunb4f0fd42016-10-17 15:17:51 +0000576 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300577
Arnd Bergmann0629a332017-01-18 15:52:52 +0100578 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800579 struct qed_ooo_buffer *p_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300580
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800581 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
582 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
583 p_buffer);
584 } else {
585 rx_buf_addr = p_pkt->rx_buf_addr;
586 cookie = p_pkt->cookie;
587
588 b_last = list_empty(&p_rx->active_descq);
589 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300590 }
591}
592
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800593#if IS_ENABLED(CONFIG_QED_ISCSI)
594static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
595{
596 u8 bd_flags = 0;
597
598 if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
599 SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_VLAN_INSERTION, 1);
600
601 return bd_flags;
602}
603
604static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
605 struct qed_ll2_info *p_ll2_conn)
606{
607 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
608 u16 packet_length = 0, parse_flags = 0, vlan = 0;
609 struct qed_ll2_rx_packet *p_pkt = NULL;
610 u32 num_ooo_add_to_peninsula = 0, cid;
611 union core_rx_cqe_union *cqe = NULL;
612 u16 cq_new_idx = 0, cq_old_idx = 0;
613 struct qed_ooo_buffer *p_buffer;
614 struct ooo_opaque *iscsi_ooo;
615 u8 placement_offset = 0;
616 u8 cqe_type;
617
618 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
619 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
620 if (cq_new_idx == cq_old_idx)
621 return 0;
622
623 while (cq_new_idx != cq_old_idx) {
624 struct core_rx_fast_path_cqe *p_cqe_fp;
625
626 cqe = qed_chain_consume(&p_rx->rcq_chain);
627 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
628 cqe_type = cqe->rx_cqe_sp.type;
629
630 if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
631 DP_NOTICE(p_hwfn,
632 "Got a non-regular LB LL2 completion [type 0x%02x]\n",
633 cqe_type);
634 return -EINVAL;
635 }
636 p_cqe_fp = &cqe->rx_cqe_fp;
637
638 placement_offset = p_cqe_fp->placement_offset;
639 parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
640 packet_length = le16_to_cpu(p_cqe_fp->packet_length);
641 vlan = le16_to_cpu(p_cqe_fp->vlan);
642 iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
643 qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
644 iscsi_ooo);
645 cid = le32_to_cpu(iscsi_ooo->cid);
646
647 /* Process delete isle first */
648 if (iscsi_ooo->drop_size)
649 qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
650 iscsi_ooo->drop_isle,
651 iscsi_ooo->drop_size);
652
653 if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
654 continue;
655
656 /* Now process create/add/join isles */
657 if (list_empty(&p_rx->active_descq)) {
658 DP_NOTICE(p_hwfn,
659 "LL2 OOO RX chain has no submitted buffers\n"
660 );
661 return -EIO;
662 }
663
664 p_pkt = list_first_entry(&p_rx->active_descq,
665 struct qed_ll2_rx_packet, list_entry);
666
667 if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
668 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
669 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
670 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
671 (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
672 if (!p_pkt) {
673 DP_NOTICE(p_hwfn,
674 "LL2 OOO RX packet is not valid\n");
675 return -EIO;
676 }
677 list_del(&p_pkt->list_entry);
678 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
679 p_buffer->packet_length = packet_length;
680 p_buffer->parse_flags = parse_flags;
681 p_buffer->vlan = vlan;
682 p_buffer->placement_offset = placement_offset;
683 qed_chain_consume(&p_rx->rxq_chain);
684 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
685
686 switch (iscsi_ooo->ooo_opcode) {
687 case TCP_EVENT_ADD_NEW_ISLE:
688 qed_ooo_add_new_isle(p_hwfn,
689 p_hwfn->p_ooo_info,
690 cid,
691 iscsi_ooo->ooo_isle,
692 p_buffer);
693 break;
694 case TCP_EVENT_ADD_ISLE_RIGHT:
695 qed_ooo_add_new_buffer(p_hwfn,
696 p_hwfn->p_ooo_info,
697 cid,
698 iscsi_ooo->ooo_isle,
699 p_buffer,
700 QED_OOO_RIGHT_BUF);
701 break;
702 case TCP_EVENT_ADD_ISLE_LEFT:
703 qed_ooo_add_new_buffer(p_hwfn,
704 p_hwfn->p_ooo_info,
705 cid,
706 iscsi_ooo->ooo_isle,
707 p_buffer,
708 QED_OOO_LEFT_BUF);
709 break;
710 case TCP_EVENT_JOIN:
711 qed_ooo_add_new_buffer(p_hwfn,
712 p_hwfn->p_ooo_info,
713 cid,
714 iscsi_ooo->ooo_isle +
715 1,
716 p_buffer,
717 QED_OOO_LEFT_BUF);
718 qed_ooo_join_isles(p_hwfn,
719 p_hwfn->p_ooo_info,
720 cid, iscsi_ooo->ooo_isle);
721 break;
722 case TCP_EVENT_ADD_PEN:
723 num_ooo_add_to_peninsula++;
724 qed_ooo_put_ready_buffer(p_hwfn,
725 p_hwfn->p_ooo_info,
726 p_buffer, true);
727 break;
728 }
729 } else {
730 DP_NOTICE(p_hwfn,
731 "Unexpected event (%d) TX OOO completion\n",
732 iscsi_ooo->ooo_opcode);
733 }
734 }
735
736 return 0;
737}
738
739static void
740qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
741 struct qed_ll2_info *p_ll2_conn)
742{
743 struct qed_ooo_buffer *p_buffer;
744 int rc;
745 u16 l4_hdr_offset_w;
746 dma_addr_t first_frag;
747 u16 parse_flags;
748 u8 bd_flags;
749
750 /* Submit Tx buffers here */
751 while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
752 p_hwfn->p_ooo_info))) {
753 l4_hdr_offset_w = 0;
754 bd_flags = 0;
755
756 first_frag = p_buffer->rx_buffer_phys_addr +
757 p_buffer->placement_offset;
758 parse_flags = p_buffer->parse_flags;
759 bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
760 SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE, 1);
761 SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_L4_PROTOCOL, 1);
762
763 rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
764 p_buffer->vlan, bd_flags,
765 l4_hdr_offset_w,
Arnd Bergmann0629a332017-01-18 15:52:52 +0100766 p_ll2_conn->conn.tx_dest, 0,
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800767 first_frag,
768 p_buffer->packet_length,
769 p_buffer, true);
770 if (rc) {
771 qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
772 p_buffer, false);
773 break;
774 }
775 }
776}
777
778static void
779qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn,
780 struct qed_ll2_info *p_ll2_conn)
781{
782 struct qed_ooo_buffer *p_buffer;
783 int rc;
784
785 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
786 p_hwfn->p_ooo_info))) {
787 rc = qed_ll2_post_rx_buffer(p_hwfn,
788 p_ll2_conn->my_id,
789 p_buffer->rx_buffer_phys_addr,
790 0, p_buffer, true);
791 if (rc) {
792 qed_ooo_put_free_buffer(p_hwfn,
793 p_hwfn->p_ooo_info, p_buffer);
794 break;
795 }
796 }
797}
798
799static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
800{
801 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
802 int rc;
803
804 rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
805 if (rc)
806 return rc;
807
808 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
809 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
810
811 return 0;
812}
813
814static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
815{
816 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
817 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
818 struct qed_ll2_tx_packet *p_pkt = NULL;
819 struct qed_ooo_buffer *p_buffer;
820 bool b_dont_submit_rx = false;
821 u16 new_idx = 0, num_bds = 0;
822 int rc;
823
824 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
825 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
826
827 if (!num_bds)
828 return 0;
829
830 while (num_bds) {
831 if (list_empty(&p_tx->active_descq))
832 return -EINVAL;
833
834 p_pkt = list_first_entry(&p_tx->active_descq,
835 struct qed_ll2_tx_packet, list_entry);
836 if (!p_pkt)
837 return -EINVAL;
838
839 if (p_pkt->bd_used != 1) {
840 DP_NOTICE(p_hwfn,
841 "Unexpectedly many BDs(%d) in TX OOO completion\n",
842 p_pkt->bd_used);
843 return -EINVAL;
844 }
845
846 list_del(&p_pkt->list_entry);
847
848 num_bds--;
849 p_tx->bds_idx++;
850 qed_chain_consume(&p_tx->txq_chain);
851
852 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
853 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
854
855 if (b_dont_submit_rx) {
856 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
857 p_buffer);
858 continue;
859 }
860
861 rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
862 p_buffer->rx_buffer_phys_addr, 0,
863 p_buffer, true);
864 if (rc != 0) {
865 qed_ooo_put_free_buffer(p_hwfn,
866 p_hwfn->p_ooo_info, p_buffer);
867 b_dont_submit_rx = true;
868 }
869 }
870
871 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
872
873 return 0;
874}
875
876static int
877qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
878 struct qed_ll2_info *p_ll2_info,
879 u16 rx_num_ooo_buffers, u16 mtu)
880{
881 struct qed_ooo_buffer *p_buf = NULL;
882 void *p_virt;
883 u16 buf_idx;
884 int rc = 0;
885
Arnd Bergmann0629a332017-01-18 15:52:52 +0100886 if (p_ll2_info->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800887 return rc;
888
889 if (!rx_num_ooo_buffers)
890 return -EINVAL;
891
892 for (buf_idx = 0; buf_idx < rx_num_ooo_buffers; buf_idx++) {
893 p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
894 if (!p_buf) {
895 rc = -ENOMEM;
896 goto out;
897 }
898
899 p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
900 p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
901 ETH_CACHE_LINE_SIZE - 1) &
902 ~(ETH_CACHE_LINE_SIZE - 1);
903 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
904 p_buf->rx_buffer_size,
905 &p_buf->rx_buffer_phys_addr,
906 GFP_KERNEL);
907 if (!p_virt) {
908 kfree(p_buf);
909 rc = -ENOMEM;
910 goto out;
911 }
912
913 p_buf->rx_buffer_virt_addr = p_virt;
914 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
915 }
916
917 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
918 "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
919 rx_num_ooo_buffers, p_buf->rx_buffer_size);
920
921out:
922 return rc;
923}
924
925static void
926qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
927 struct qed_ll2_info *p_ll2_conn)
928{
Arnd Bergmann0629a332017-01-18 15:52:52 +0100929 if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800930 return;
931
932 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
933 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
934}
935
936static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
937 struct qed_ll2_info *p_ll2_conn)
938{
939 struct qed_ooo_buffer *p_buffer;
940
Arnd Bergmann0629a332017-01-18 15:52:52 +0100941 if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800942 return;
943
944 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
945 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
946 p_hwfn->p_ooo_info))) {
947 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
948 p_buffer->rx_buffer_size,
949 p_buffer->rx_buffer_virt_addr,
950 p_buffer->rx_buffer_phys_addr);
951 kfree(p_buffer);
952 }
953}
954
955static void qed_ll2_stop_ooo(struct qed_dev *cdev)
956{
957 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
958 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
959
960 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n",
961 *handle);
962
963 qed_ll2_terminate_connection(hwfn, *handle);
964 qed_ll2_release_connection(hwfn, *handle);
965 *handle = QED_LL2_UNUSED_HANDLE;
966}
967
968static int qed_ll2_start_ooo(struct qed_dev *cdev,
969 struct qed_ll2_params *params)
970{
971 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
972 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
robert.foss@collabora.com8aad6f12017-03-07 11:46:25 -0500973 struct qed_ll2_conn ll2_info = { 0 };
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800974 int rc;
975
Arnd Bergmann0629a332017-01-18 15:52:52 +0100976 ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
977 ll2_info.mtu = params->mtu;
978 ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
979 ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
980 ll2_info.tx_tc = OOO_LB_TC;
981 ll2_info.tx_dest = CORE_TX_DEST_LB;
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800982
Arnd Bergmann0629a332017-01-18 15:52:52 +0100983 rc = qed_ll2_acquire_connection(hwfn, &ll2_info,
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800984 QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
985 handle);
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800986 if (rc) {
987 DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
988 goto out;
989 }
990
991 rc = qed_ll2_establish_connection(hwfn, *handle);
992 if (rc) {
993 DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
994 goto fail;
995 }
996
997 return 0;
998
999fail:
1000 qed_ll2_release_connection(hwfn, *handle);
1001out:
1002 *handle = QED_LL2_UNUSED_HANDLE;
1003 return rc;
1004}
1005#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
1006static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn,
1007 void *p_cookie) { return -EINVAL; }
1008static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn,
1009 void *p_cookie) { return -EINVAL; }
1010static inline int
1011qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
1012 struct qed_ll2_info *p_ll2_info,
1013 u16 rx_num_ooo_buffers, u16 mtu) { return 0; }
1014static inline void
1015qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
1016 struct qed_ll2_info *p_ll2_conn) { return; }
1017static inline void
1018qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
1019 struct qed_ll2_info *p_ll2_conn) { return; }
1020static inline void qed_ll2_stop_ooo(struct qed_dev *cdev) { return; }
1021static inline int qed_ll2_start_ooo(struct qed_dev *cdev,
1022 struct qed_ll2_params *params)
1023 { return -EINVAL; }
1024#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
1025
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001026static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
1027 struct qed_ll2_info *p_ll2_conn,
1028 u8 action_on_error)
1029{
Arnd Bergmann0629a332017-01-18 15:52:52 +01001030 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001031 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
1032 struct core_rx_start_ramrod_data *p_ramrod = NULL;
1033 struct qed_spq_entry *p_ent = NULL;
1034 struct qed_sp_init_data init_data;
1035 u16 cqe_pbl_size;
1036 int rc = 0;
1037
1038 /* Get SPQ entry */
1039 memset(&init_data, 0, sizeof(init_data));
1040 init_data.cid = p_ll2_conn->cid;
1041 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1042 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1043
1044 rc = qed_sp_init_request(p_hwfn, &p_ent,
1045 CORE_RAMROD_RX_QUEUE_START,
1046 PROTOCOLID_CORE, &init_data);
1047 if (rc)
1048 return rc;
1049
1050 p_ramrod = &p_ent->ramrod.core_rx_queue_start;
1051
1052 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
1053 p_ramrod->sb_index = p_rx->rx_sb_index;
1054 p_ramrod->complete_event_flg = 1;
1055
Arnd Bergmann0629a332017-01-18 15:52:52 +01001056 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001057 DMA_REGPAIR_LE(p_ramrod->bd_base,
1058 p_rx->rxq_chain.p_phys_addr);
1059 cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
1060 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
1061 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
1062 qed_chain_get_pbl_phys(&p_rx->rcq_chain));
1063
Arnd Bergmann0629a332017-01-18 15:52:52 +01001064 p_ramrod->drop_ttl0_flg = p_ll2_conn->conn.rx_drop_ttl0_flg;
1065 p_ramrod->inner_vlan_removal_en = p_ll2_conn->conn.rx_vlan_removal_en;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001066 p_ramrod->queue_id = p_ll2_conn->queue_id;
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001067 p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
1068 : 1;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001069
1070 if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
1071 p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
1072 p_ramrod->mf_si_bcast_accept_all = 1;
1073 p_ramrod->mf_si_mcast_accept_all = 1;
1074 } else {
1075 p_ramrod->mf_si_bcast_accept_all = 0;
1076 p_ramrod->mf_si_mcast_accept_all = 0;
1077 }
1078
1079 p_ramrod->action_on_error.error_type = action_on_error;
Arnd Bergmann0629a332017-01-18 15:52:52 +01001080 p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001081 return qed_spq_post(p_hwfn, p_ent, NULL);
1082}
1083
1084static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
1085 struct qed_ll2_info *p_ll2_conn)
1086{
Arnd Bergmann0629a332017-01-18 15:52:52 +01001087 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001088 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1089 struct core_tx_start_ramrod_data *p_ramrod = NULL;
1090 struct qed_spq_entry *p_ent = NULL;
1091 struct qed_sp_init_data init_data;
1092 union qed_qm_pq_params pq_params;
1093 u16 pq_id = 0, pbl_size;
1094 int rc = -EINVAL;
1095
1096 if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
1097 return 0;
1098
Arnd Bergmann0629a332017-01-18 15:52:52 +01001099 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001100 p_ll2_conn->tx_stats_en = 0;
1101 else
1102 p_ll2_conn->tx_stats_en = 1;
1103
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001104 /* Get SPQ entry */
1105 memset(&init_data, 0, sizeof(init_data));
1106 init_data.cid = p_ll2_conn->cid;
1107 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1108 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1109
1110 rc = qed_sp_init_request(p_hwfn, &p_ent,
1111 CORE_RAMROD_TX_QUEUE_START,
1112 PROTOCOLID_CORE, &init_data);
1113 if (rc)
1114 return rc;
1115
1116 p_ramrod = &p_ent->ramrod.core_tx_queue_start;
1117
1118 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
1119 p_ramrod->sb_index = p_tx->tx_sb_index;
Arnd Bergmann0629a332017-01-18 15:52:52 +01001120 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001121 p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
1122 p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
1123
1124 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
1125 qed_chain_get_pbl_phys(&p_tx->txq_chain));
1126 pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
1127 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
1128
1129 memset(&pq_params, 0, sizeof(pq_params));
Arnd Bergmann0629a332017-01-18 15:52:52 +01001130 pq_params.core.tc = p_ll2_conn->conn.tx_tc;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001131 pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
1132 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
1133
1134 switch (conn_type) {
Arun Easi1e128c82017-02-15 06:28:22 -08001135 case QED_LL2_TYPE_FCOE:
1136 p_ramrod->conn_type = PROTOCOLID_FCOE;
1137 break;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001138 case QED_LL2_TYPE_ISCSI:
1139 case QED_LL2_TYPE_ISCSI_OOO:
1140 p_ramrod->conn_type = PROTOCOLID_ISCSI;
1141 break;
1142 case QED_LL2_TYPE_ROCE:
1143 p_ramrod->conn_type = PROTOCOLID_ROCE;
1144 break;
1145 default:
1146 p_ramrod->conn_type = PROTOCOLID_ETH;
1147 DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
1148 }
1149
Arnd Bergmann0629a332017-01-18 15:52:52 +01001150 p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001151 return qed_spq_post(p_hwfn, p_ent, NULL);
1152}
1153
1154static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
1155 struct qed_ll2_info *p_ll2_conn)
1156{
1157 struct core_rx_stop_ramrod_data *p_ramrod = NULL;
1158 struct qed_spq_entry *p_ent = NULL;
1159 struct qed_sp_init_data init_data;
1160 int rc = -EINVAL;
1161
1162 /* Get SPQ entry */
1163 memset(&init_data, 0, sizeof(init_data));
1164 init_data.cid = p_ll2_conn->cid;
1165 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1166 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1167
1168 rc = qed_sp_init_request(p_hwfn, &p_ent,
1169 CORE_RAMROD_RX_QUEUE_STOP,
1170 PROTOCOLID_CORE, &init_data);
1171 if (rc)
1172 return rc;
1173
1174 p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
1175
1176 p_ramrod->complete_event_flg = 1;
1177 p_ramrod->queue_id = p_ll2_conn->queue_id;
1178
1179 return qed_spq_post(p_hwfn, p_ent, NULL);
1180}
1181
1182static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
1183 struct qed_ll2_info *p_ll2_conn)
1184{
1185 struct qed_spq_entry *p_ent = NULL;
1186 struct qed_sp_init_data init_data;
1187 int rc = -EINVAL;
1188
1189 /* Get SPQ entry */
1190 memset(&init_data, 0, sizeof(init_data));
1191 init_data.cid = p_ll2_conn->cid;
1192 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1193 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1194
1195 rc = qed_sp_init_request(p_hwfn, &p_ent,
1196 CORE_RAMROD_TX_QUEUE_STOP,
1197 PROTOCOLID_CORE, &init_data);
1198 if (rc)
1199 return rc;
1200
1201 return qed_spq_post(p_hwfn, p_ent, NULL);
1202}
1203
1204static int
1205qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
1206 struct qed_ll2_info *p_ll2_info, u16 rx_num_desc)
1207{
1208 struct qed_ll2_rx_packet *p_descq;
1209 u32 capacity;
1210 int rc = 0;
1211
1212 if (!rx_num_desc)
1213 goto out;
1214
1215 rc = qed_chain_alloc(p_hwfn->cdev,
1216 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1217 QED_CHAIN_MODE_NEXT_PTR,
1218 QED_CHAIN_CNT_TYPE_U16,
1219 rx_num_desc,
1220 sizeof(struct core_rx_bd),
1221 &p_ll2_info->rx_queue.rxq_chain);
1222 if (rc) {
1223 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
1224 goto out;
1225 }
1226
1227 capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
1228 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
1229 GFP_KERNEL);
1230 if (!p_descq) {
1231 rc = -ENOMEM;
1232 DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
1233 goto out;
1234 }
1235 p_ll2_info->rx_queue.descq_array = p_descq;
1236
1237 rc = qed_chain_alloc(p_hwfn->cdev,
1238 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1239 QED_CHAIN_MODE_PBL,
1240 QED_CHAIN_CNT_TYPE_U16,
1241 rx_num_desc,
1242 sizeof(struct core_rx_fast_path_cqe),
1243 &p_ll2_info->rx_queue.rcq_chain);
1244 if (rc) {
1245 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
1246 goto out;
1247 }
1248
1249 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1250 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
Arnd Bergmann0629a332017-01-18 15:52:52 +01001251 p_ll2_info->conn.conn_type, rx_num_desc);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001252
1253out:
1254 return rc;
1255}
1256
1257static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
1258 struct qed_ll2_info *p_ll2_info,
1259 u16 tx_num_desc)
1260{
1261 struct qed_ll2_tx_packet *p_descq;
1262 u32 capacity;
1263 int rc = 0;
1264
1265 if (!tx_num_desc)
1266 goto out;
1267
1268 rc = qed_chain_alloc(p_hwfn->cdev,
1269 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1270 QED_CHAIN_MODE_PBL,
1271 QED_CHAIN_CNT_TYPE_U16,
1272 tx_num_desc,
1273 sizeof(struct core_tx_bd),
1274 &p_ll2_info->tx_queue.txq_chain);
1275 if (rc)
1276 goto out;
1277
1278 capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
1279 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet),
1280 GFP_KERNEL);
1281 if (!p_descq) {
1282 rc = -ENOMEM;
1283 goto out;
1284 }
1285 p_ll2_info->tx_queue.descq_array = p_descq;
1286
1287 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1288 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
Arnd Bergmann0629a332017-01-18 15:52:52 +01001289 p_ll2_info->conn.conn_type, tx_num_desc);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001290
1291out:
1292 if (rc)
1293 DP_NOTICE(p_hwfn,
1294 "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
1295 tx_num_desc);
1296 return rc;
1297}
1298
1299int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
Arnd Bergmann0629a332017-01-18 15:52:52 +01001300 struct qed_ll2_conn *p_params,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001301 u16 rx_num_desc,
1302 u16 tx_num_desc,
1303 u8 *p_connection_handle)
1304{
1305 qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
1306 struct qed_ll2_info *p_ll2_info = NULL;
1307 int rc;
1308 u8 i;
1309
1310 if (!p_connection_handle || !p_hwfn->p_ll2_info)
1311 return -EINVAL;
1312
1313 /* Find a free connection to be used */
1314 for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
1315 mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
1316 if (p_hwfn->p_ll2_info[i].b_active) {
1317 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1318 continue;
1319 }
1320
1321 p_hwfn->p_ll2_info[i].b_active = true;
1322 p_ll2_info = &p_hwfn->p_ll2_info[i];
1323 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1324 break;
1325 }
1326 if (!p_ll2_info)
1327 return -EBUSY;
1328
Arnd Bergmann0629a332017-01-18 15:52:52 +01001329 p_ll2_info->conn = *p_params;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001330
1331 rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
1332 if (rc)
1333 goto q_allocate_fail;
1334
1335 rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info, tx_num_desc);
1336 if (rc)
1337 goto q_allocate_fail;
1338
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001339 rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
1340 rx_num_desc * 2, p_params->mtu);
1341 if (rc)
1342 goto q_allocate_fail;
1343
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001344 /* Register callbacks for the Rx/Tx queues */
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001345 if (p_params->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
1346 comp_rx_cb = qed_ll2_lb_rxq_completion;
1347 comp_tx_cb = qed_ll2_lb_txq_completion;
1348 } else {
1349 comp_rx_cb = qed_ll2_rxq_completion;
1350 comp_tx_cb = qed_ll2_txq_completion;
1351 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001352
1353 if (rx_num_desc) {
1354 qed_int_register_cb(p_hwfn, comp_rx_cb,
1355 &p_hwfn->p_ll2_info[i],
1356 &p_ll2_info->rx_queue.rx_sb_index,
1357 &p_ll2_info->rx_queue.p_fw_cons);
1358 p_ll2_info->rx_queue.b_cb_registred = true;
1359 }
1360
1361 if (tx_num_desc) {
1362 qed_int_register_cb(p_hwfn,
1363 comp_tx_cb,
1364 &p_hwfn->p_ll2_info[i],
1365 &p_ll2_info->tx_queue.tx_sb_index,
1366 &p_ll2_info->tx_queue.p_fw_cons);
1367 p_ll2_info->tx_queue.b_cb_registred = true;
1368 }
1369
1370 *p_connection_handle = i;
1371 return rc;
1372
1373q_allocate_fail:
1374 qed_ll2_release_connection(p_hwfn, i);
1375 return -ENOMEM;
1376}
1377
1378static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
1379 struct qed_ll2_info *p_ll2_conn)
1380{
1381 u8 action_on_error = 0;
1382
1383 if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
1384 return 0;
1385
1386 DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
1387
1388 SET_FIELD(action_on_error,
1389 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
Arnd Bergmann0629a332017-01-18 15:52:52 +01001390 p_ll2_conn->conn.ai_err_packet_too_big);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001391 SET_FIELD(action_on_error,
Arnd Bergmann0629a332017-01-18 15:52:52 +01001392 CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->conn.ai_err_no_buf);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001393
1394 return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
1395}
1396
1397int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1398{
1399 struct qed_ll2_info *p_ll2_conn;
1400 struct qed_ll2_rx_queue *p_rx;
1401 struct qed_ll2_tx_queue *p_tx;
1402 int rc = -EINVAL;
1403 u32 i, capacity;
1404 u8 qid;
1405
1406 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
1407 if (!p_ll2_conn)
1408 return -EINVAL;
1409 p_rx = &p_ll2_conn->rx_queue;
1410 p_tx = &p_ll2_conn->tx_queue;
1411
1412 qed_chain_reset(&p_rx->rxq_chain);
1413 qed_chain_reset(&p_rx->rcq_chain);
1414 INIT_LIST_HEAD(&p_rx->active_descq);
1415 INIT_LIST_HEAD(&p_rx->free_descq);
1416 INIT_LIST_HEAD(&p_rx->posting_descq);
1417 spin_lock_init(&p_rx->lock);
1418 capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
1419 for (i = 0; i < capacity; i++)
1420 list_add_tail(&p_rx->descq_array[i].list_entry,
1421 &p_rx->free_descq);
1422 *p_rx->p_fw_cons = 0;
1423
1424 qed_chain_reset(&p_tx->txq_chain);
1425 INIT_LIST_HEAD(&p_tx->active_descq);
1426 INIT_LIST_HEAD(&p_tx->free_descq);
1427 INIT_LIST_HEAD(&p_tx->sending_descq);
1428 spin_lock_init(&p_tx->lock);
1429 capacity = qed_chain_get_capacity(&p_tx->txq_chain);
1430 for (i = 0; i < capacity; i++)
1431 list_add_tail(&p_tx->descq_array[i].list_entry,
1432 &p_tx->free_descq);
1433 p_tx->cur_completing_bd_idx = 0;
1434 p_tx->bds_idx = 0;
1435 p_tx->b_completing_packet = false;
1436 p_tx->cur_send_packet = NULL;
1437 p_tx->cur_send_frag_num = 0;
1438 p_tx->cur_completing_frag_num = 0;
1439 *p_tx->p_fw_cons = 0;
1440
1441 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
1442
1443 qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
1444 p_ll2_conn->queue_id = qid;
1445 p_ll2_conn->tx_stats_id = qid;
1446 p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
1447 GTT_BAR0_MAP_REG_TSDM_RAM +
1448 TSTORM_LL2_RX_PRODS_OFFSET(qid);
1449 p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
1450 qed_db_addr(p_ll2_conn->cid,
1451 DQ_DEMS_LEGACY);
1452
1453 rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
1454 if (rc)
1455 return rc;
1456
1457 rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
1458 if (rc)
1459 return rc;
1460
1461 if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
1462 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1);
1463
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001464 qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
1465
Arun Easi1e128c82017-02-15 06:28:22 -08001466 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
1467 qed_llh_add_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
1468 0x8906, 0,
1469 QED_LLH_FILTER_ETHERTYPE);
1470 qed_llh_add_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
1471 0x8914, 0,
1472 QED_LLH_FILTER_ETHERTYPE);
1473 }
1474
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001475 return rc;
1476}
1477
1478static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
1479 struct qed_ll2_rx_queue *p_rx,
1480 struct qed_ll2_rx_packet *p_curp)
1481{
1482 struct qed_ll2_rx_packet *p_posting_packet = NULL;
1483 struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
1484 bool b_notify_fw = false;
1485 u16 bd_prod, cq_prod;
1486
1487 /* This handles the flushing of already posted buffers */
1488 while (!list_empty(&p_rx->posting_descq)) {
1489 p_posting_packet = list_first_entry(&p_rx->posting_descq,
1490 struct qed_ll2_rx_packet,
1491 list_entry);
Wei Yongjunb4f0fd42016-10-17 15:17:51 +00001492 list_move_tail(&p_posting_packet->list_entry,
1493 &p_rx->active_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001494 b_notify_fw = true;
1495 }
1496
1497 /* This handles the supplied packet [if there is one] */
1498 if (p_curp) {
1499 list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
1500 b_notify_fw = true;
1501 }
1502
1503 if (!b_notify_fw)
1504 return;
1505
1506 bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
1507 cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1508 rx_prod.bd_prod = cpu_to_le16(bd_prod);
1509 rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1510 DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1511}
1512
1513int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
1514 u8 connection_handle,
1515 dma_addr_t addr,
1516 u16 buf_len, void *cookie, u8 notify_fw)
1517{
1518 struct core_rx_bd_with_buff_len *p_curb = NULL;
1519 struct qed_ll2_rx_packet *p_curp = NULL;
1520 struct qed_ll2_info *p_ll2_conn;
1521 struct qed_ll2_rx_queue *p_rx;
1522 unsigned long flags;
1523 void *p_data;
1524 int rc = 0;
1525
1526 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1527 if (!p_ll2_conn)
1528 return -EINVAL;
1529 p_rx = &p_ll2_conn->rx_queue;
1530
1531 spin_lock_irqsave(&p_rx->lock, flags);
1532 if (!list_empty(&p_rx->free_descq))
1533 p_curp = list_first_entry(&p_rx->free_descq,
1534 struct qed_ll2_rx_packet, list_entry);
1535 if (p_curp) {
1536 if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
1537 qed_chain_get_elem_left(&p_rx->rcq_chain)) {
1538 p_data = qed_chain_produce(&p_rx->rxq_chain);
1539 p_curb = (struct core_rx_bd_with_buff_len *)p_data;
1540 qed_chain_produce(&p_rx->rcq_chain);
1541 }
1542 }
1543
1544 /* If we're lacking entires, let's try to flush buffers to FW */
1545 if (!p_curp || !p_curb) {
1546 rc = -EBUSY;
1547 p_curp = NULL;
1548 goto out_notify;
1549 }
1550
1551 /* We have an Rx packet we can fill */
1552 DMA_REGPAIR_LE(p_curb->addr, addr);
1553 p_curb->buff_length = cpu_to_le16(buf_len);
1554 p_curp->rx_buf_addr = addr;
1555 p_curp->cookie = cookie;
1556 p_curp->rxq_bd = p_curb;
1557 p_curp->buf_length = buf_len;
1558 list_del(&p_curp->list_entry);
1559
1560 /* Check if we only want to enqueue this packet without informing FW */
1561 if (!notify_fw) {
1562 list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
1563 goto out;
1564 }
1565
1566out_notify:
1567 qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
1568out:
1569 spin_unlock_irqrestore(&p_rx->lock, flags);
1570 return rc;
1571}
1572
1573static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
1574 struct qed_ll2_tx_queue *p_tx,
1575 struct qed_ll2_tx_packet *p_curp,
1576 u8 num_of_bds,
1577 dma_addr_t first_frag,
1578 u16 first_frag_len, void *p_cookie,
1579 u8 notify_fw)
1580{
1581 list_del(&p_curp->list_entry);
1582 p_curp->cookie = p_cookie;
1583 p_curp->bd_used = num_of_bds;
1584 p_curp->notify_fw = notify_fw;
1585 p_tx->cur_send_packet = p_curp;
1586 p_tx->cur_send_frag_num = 0;
1587
1588 p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = first_frag;
1589 p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = first_frag_len;
1590 p_tx->cur_send_frag_num++;
1591}
1592
1593static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1594 struct qed_ll2_info *p_ll2,
1595 struct qed_ll2_tx_packet *p_curp,
1596 u8 num_of_bds,
1597 enum core_tx_dest tx_dest,
1598 u16 vlan,
1599 u8 bd_flags,
1600 u16 l4_hdr_offset_w,
Ram Amraniabd49672016-10-01 22:00:01 +03001601 enum core_roce_flavor_type type,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001602 dma_addr_t first_frag,
1603 u16 first_frag_len)
1604{
1605 struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
1606 u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
1607 struct core_tx_bd *start_bd = NULL;
1608 u16 frag_idx;
1609
1610 start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1611 start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
1612 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
1613 cpu_to_le16(l4_hdr_offset_w));
1614 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
1615 start_bd->bd_flags.as_bitfield = bd_flags;
1616 start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
1617 CORE_TX_BD_FLAGS_START_BD_SHIFT;
1618 SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
Ram Amrani8d1d8fc2016-11-09 22:48:43 +02001619 SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001620 DMA_REGPAIR_LE(start_bd->addr, first_frag);
1621 start_bd->nbytes = cpu_to_le16(first_frag_len);
1622
1623 DP_VERBOSE(p_hwfn,
1624 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1625 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1626 p_ll2->queue_id,
1627 p_ll2->cid,
Arnd Bergmann0629a332017-01-18 15:52:52 +01001628 p_ll2->conn.conn_type,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001629 prod_idx,
1630 first_frag_len,
1631 num_of_bds,
1632 le32_to_cpu(start_bd->addr.hi),
1633 le32_to_cpu(start_bd->addr.lo));
1634
1635 if (p_ll2->tx_queue.cur_send_frag_num == num_of_bds)
1636 return;
1637
1638 /* Need to provide the packet with additional BDs for frags */
1639 for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
1640 frag_idx < num_of_bds; frag_idx++) {
1641 struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
1642
1643 *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1644 (*p_bd)->bd_flags.as_bitfield = 0;
1645 (*p_bd)->bitfield1 = 0;
1646 (*p_bd)->bitfield0 = 0;
1647 p_curp->bds_set[frag_idx].tx_frag = 0;
1648 p_curp->bds_set[frag_idx].frag_len = 0;
1649 }
1650}
1651
1652/* This should be called while the Txq spinlock is being held */
1653static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
1654 struct qed_ll2_info *p_ll2_conn)
1655{
1656 bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
1657 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1658 struct qed_ll2_tx_packet *p_pkt = NULL;
1659 struct core_db_data db_msg = { 0, 0, 0 };
1660 u16 bd_prod;
1661
1662 /* If there are missing BDs, don't do anything now */
1663 if (p_ll2_conn->tx_queue.cur_send_frag_num !=
1664 p_ll2_conn->tx_queue.cur_send_packet->bd_used)
1665 return;
1666
1667 /* Push the current packet to the list and clean after it */
1668 list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
1669 &p_ll2_conn->tx_queue.sending_descq);
1670 p_ll2_conn->tx_queue.cur_send_packet = NULL;
1671 p_ll2_conn->tx_queue.cur_send_frag_num = 0;
1672
1673 /* Notify FW of packet only if requested to */
1674 if (!b_notify)
1675 return;
1676
1677 bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
1678
1679 while (!list_empty(&p_tx->sending_descq)) {
1680 p_pkt = list_first_entry(&p_tx->sending_descq,
1681 struct qed_ll2_tx_packet, list_entry);
1682 if (!p_pkt)
1683 break;
1684
Wei Yongjunb4f0fd42016-10-17 15:17:51 +00001685 list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001686 }
1687
1688 SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
1689 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1690 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
1691 DQ_XCM_CORE_TX_BD_PROD_CMD);
1692 db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
1693 db_msg.spq_prod = cpu_to_le16(bd_prod);
1694
1695 /* Make sure the BDs data is updated before ringing the doorbell */
1696 wmb();
1697
1698 DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
1699
1700 DP_VERBOSE(p_hwfn,
1701 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1702 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1703 p_ll2_conn->queue_id,
Arnd Bergmann0629a332017-01-18 15:52:52 +01001704 p_ll2_conn->cid, p_ll2_conn->conn.conn_type, db_msg.spq_prod);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001705}
1706
1707int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
1708 u8 connection_handle,
1709 u8 num_of_bds,
1710 u16 vlan,
1711 u8 bd_flags,
1712 u16 l4_hdr_offset_w,
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001713 enum qed_ll2_tx_dest e_tx_dest,
Ram Amraniabd49672016-10-01 22:00:01 +03001714 enum qed_ll2_roce_flavor_type qed_roce_flavor,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001715 dma_addr_t first_frag,
1716 u16 first_frag_len, void *cookie, u8 notify_fw)
1717{
1718 struct qed_ll2_tx_packet *p_curp = NULL;
1719 struct qed_ll2_info *p_ll2_conn = NULL;
Ram Amraniabd49672016-10-01 22:00:01 +03001720 enum core_roce_flavor_type roce_flavor;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001721 struct qed_ll2_tx_queue *p_tx;
1722 struct qed_chain *p_tx_chain;
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001723 enum core_tx_dest tx_dest;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001724 unsigned long flags;
1725 int rc = 0;
1726
1727 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1728 if (!p_ll2_conn)
1729 return -EINVAL;
1730 p_tx = &p_ll2_conn->tx_queue;
1731 p_tx_chain = &p_tx->txq_chain;
1732
1733 if (num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
1734 return -EIO;
1735
1736 spin_lock_irqsave(&p_tx->lock, flags);
1737 if (p_tx->cur_send_packet) {
1738 rc = -EEXIST;
1739 goto out;
1740 }
1741
1742 /* Get entry, but only if we have tx elements for it */
1743 if (!list_empty(&p_tx->free_descq))
1744 p_curp = list_first_entry(&p_tx->free_descq,
1745 struct qed_ll2_tx_packet, list_entry);
1746 if (p_curp && qed_chain_get_elem_left(p_tx_chain) < num_of_bds)
1747 p_curp = NULL;
1748
1749 if (!p_curp) {
1750 rc = -EBUSY;
1751 goto out;
1752 }
1753
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001754 tx_dest = e_tx_dest == QED_LL2_TX_DEST_NW ? CORE_TX_DEST_NW :
1755 CORE_TX_DEST_LB;
Ram Amraniabd49672016-10-01 22:00:01 +03001756 if (qed_roce_flavor == QED_LL2_ROCE) {
1757 roce_flavor = CORE_ROCE;
1758 } else if (qed_roce_flavor == QED_LL2_RROCE) {
1759 roce_flavor = CORE_RROCE;
1760 } else {
1761 rc = -EINVAL;
1762 goto out;
1763 }
1764
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001765 /* Prepare packet and BD, and perhaps send a doorbell to FW */
1766 qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp,
1767 num_of_bds, first_frag,
1768 first_frag_len, cookie, notify_fw);
1769 qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001770 num_of_bds, tx_dest,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001771 vlan, bd_flags, l4_hdr_offset_w,
Ram Amraniabd49672016-10-01 22:00:01 +03001772 roce_flavor,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001773 first_frag, first_frag_len);
1774
1775 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1776
1777out:
1778 spin_unlock_irqrestore(&p_tx->lock, flags);
1779 return rc;
1780}
1781
1782int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
1783 u8 connection_handle,
1784 dma_addr_t addr, u16 nbytes)
1785{
1786 struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
1787 struct qed_ll2_info *p_ll2_conn = NULL;
1788 u16 cur_send_frag_num = 0;
1789 struct core_tx_bd *p_bd;
1790 unsigned long flags;
1791
1792 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1793 if (!p_ll2_conn)
1794 return -EINVAL;
1795
1796 if (!p_ll2_conn->tx_queue.cur_send_packet)
1797 return -EINVAL;
1798
1799 p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
1800 cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
1801
1802 if (cur_send_frag_num >= p_cur_send_packet->bd_used)
1803 return -EINVAL;
1804
1805 /* Fill the BD information, and possibly notify FW */
1806 p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
1807 DMA_REGPAIR_LE(p_bd->addr, addr);
1808 p_bd->nbytes = cpu_to_le16(nbytes);
1809 p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
1810 p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
1811
1812 p_ll2_conn->tx_queue.cur_send_frag_num++;
1813
1814 spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
1815 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1816 spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
1817
1818 return 0;
1819}
1820
1821int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1822{
1823 struct qed_ll2_info *p_ll2_conn = NULL;
1824 int rc = -EINVAL;
1825
1826 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
1827 if (!p_ll2_conn)
1828 return -EINVAL;
1829
1830 /* Stop Tx & Rx of connection, if needed */
1831 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1832 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
1833 if (rc)
1834 return rc;
1835 qed_ll2_txq_flush(p_hwfn, connection_handle);
1836 }
1837
1838 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1839 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
1840 if (rc)
1841 return rc;
1842 qed_ll2_rxq_flush(p_hwfn, connection_handle);
1843 }
1844
Arnd Bergmann0629a332017-01-18 15:52:52 +01001845 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001846 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1847
Arun Easi1e128c82017-02-15 06:28:22 -08001848 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
1849 qed_llh_remove_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
1850 0x8906, 0,
1851 QED_LLH_FILTER_ETHERTYPE);
1852 qed_llh_remove_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
1853 0x8914, 0,
1854 QED_LLH_FILTER_ETHERTYPE);
1855 }
1856
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001857 return rc;
1858}
1859
1860void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1861{
1862 struct qed_ll2_info *p_ll2_conn = NULL;
1863
1864 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1865 if (!p_ll2_conn)
1866 return;
1867
1868 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1869 p_ll2_conn->rx_queue.b_cb_registred = false;
1870 qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
1871 }
1872
1873 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1874 p_ll2_conn->tx_queue.b_cb_registred = false;
1875 qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
1876 }
1877
1878 kfree(p_ll2_conn->tx_queue.descq_array);
1879 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
1880
1881 kfree(p_ll2_conn->rx_queue.descq_array);
1882 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
1883 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
1884
1885 qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
1886
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001887 qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
1888
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001889 mutex_lock(&p_ll2_conn->mutex);
1890 p_ll2_conn->b_active = false;
1891 mutex_unlock(&p_ll2_conn->mutex);
1892}
1893
1894struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn)
1895{
1896 struct qed_ll2_info *p_ll2_connections;
1897 u8 i;
1898
1899 /* Allocate LL2's set struct */
1900 p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
1901 sizeof(struct qed_ll2_info), GFP_KERNEL);
1902 if (!p_ll2_connections) {
1903 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
1904 return NULL;
1905 }
1906
1907 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1908 p_ll2_connections[i].my_id = i;
1909
1910 return p_ll2_connections;
1911}
1912
1913void qed_ll2_setup(struct qed_hwfn *p_hwfn,
1914 struct qed_ll2_info *p_ll2_connections)
1915{
1916 int i;
1917
1918 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1919 mutex_init(&p_ll2_connections[i].mutex);
1920}
1921
1922void qed_ll2_free(struct qed_hwfn *p_hwfn,
1923 struct qed_ll2_info *p_ll2_connections)
1924{
1925 kfree(p_ll2_connections);
1926}
1927
1928static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
1929 struct qed_ptt *p_ptt,
1930 struct qed_ll2_info *p_ll2_conn,
1931 struct qed_ll2_stats *p_stats)
1932{
1933 struct core_ll2_tstorm_per_queue_stat tstats;
1934 u8 qid = p_ll2_conn->queue_id;
1935 u32 tstats_addr;
1936
1937 memset(&tstats, 0, sizeof(tstats));
1938 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1939 CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
1940 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
1941
1942 p_stats->packet_too_big_discard =
1943 HILO_64_REGPAIR(tstats.packet_too_big_discard);
1944 p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
1945}
1946
1947static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
1948 struct qed_ptt *p_ptt,
1949 struct qed_ll2_info *p_ll2_conn,
1950 struct qed_ll2_stats *p_stats)
1951{
1952 struct core_ll2_ustorm_per_queue_stat ustats;
1953 u8 qid = p_ll2_conn->queue_id;
1954 u32 ustats_addr;
1955
1956 memset(&ustats, 0, sizeof(ustats));
1957 ustats_addr = BAR0_MAP_REG_USDM_RAM +
1958 CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
1959 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
1960
1961 p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1962 p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1963 p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1964 p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1965 p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1966 p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1967}
1968
1969static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
1970 struct qed_ptt *p_ptt,
1971 struct qed_ll2_info *p_ll2_conn,
1972 struct qed_ll2_stats *p_stats)
1973{
1974 struct core_ll2_pstorm_per_queue_stat pstats;
1975 u8 stats_id = p_ll2_conn->tx_stats_id;
1976 u32 pstats_addr;
1977
1978 memset(&pstats, 0, sizeof(pstats));
1979 pstats_addr = BAR0_MAP_REG_PSDM_RAM +
1980 CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
1981 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
1982
1983 p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1984 p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1985 p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1986 p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1987 p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1988 p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1989}
1990
1991int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
1992 u8 connection_handle, struct qed_ll2_stats *p_stats)
1993{
1994 struct qed_ll2_info *p_ll2_conn = NULL;
1995 struct qed_ptt *p_ptt;
1996
1997 memset(p_stats, 0, sizeof(*p_stats));
1998
1999 if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
2000 !p_hwfn->p_ll2_info)
2001 return -EINVAL;
2002
2003 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
2004
2005 p_ptt = qed_ptt_acquire(p_hwfn);
2006 if (!p_ptt) {
2007 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
2008 return -EINVAL;
2009 }
2010
2011 _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2012 _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2013 if (p_ll2_conn->tx_stats_en)
2014 _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2015
2016 qed_ptt_release(p_hwfn, p_ptt);
2017 return 0;
2018}
2019
2020static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
2021 const struct qed_ll2_cb_ops *ops,
2022 void *cookie)
2023{
2024 cdev->ll2->cbs = ops;
2025 cdev->ll2->cb_cookie = cookie;
2026}
2027
2028static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
2029{
Arnd Bergmann0629a332017-01-18 15:52:52 +01002030 struct qed_ll2_conn ll2_info;
Wei Yongjun88a24282016-10-10 14:08:28 +00002031 struct qed_ll2_buffer *buffer, *tmp_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002032 enum qed_ll2_conn_type conn_type;
2033 struct qed_ptt *p_ptt;
2034 int rc, i;
Yuval Mintzfc831822016-12-01 00:21:06 -08002035 u8 gsi_enable = 1;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002036
2037 /* Initialize LL2 locks & lists */
2038 INIT_LIST_HEAD(&cdev->ll2->list);
2039 spin_lock_init(&cdev->ll2->lock);
2040 cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
2041 L1_CACHE_BYTES + params->mtu;
2042 cdev->ll2->frags_mapped = params->frags_mapped;
2043
2044 /*Allocate memory for LL2 */
2045 DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
2046 cdev->ll2->rx_size);
2047 for (i = 0; i < QED_LL2_RX_SIZE; i++) {
2048 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2049 if (!buffer) {
2050 DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
2051 goto fail;
2052 }
2053
2054 rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
2055 &buffer->phys_addr);
2056 if (rc) {
2057 kfree(buffer);
2058 goto fail;
2059 }
2060
2061 list_add_tail(&buffer->list, &cdev->ll2->list);
2062 }
2063
2064 switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
Arun Easi1e128c82017-02-15 06:28:22 -08002065 case QED_PCI_FCOE:
2066 conn_type = QED_LL2_TYPE_FCOE;
2067 gsi_enable = 0;
2068 break;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002069 case QED_PCI_ISCSI:
2070 conn_type = QED_LL2_TYPE_ISCSI;
Yuval Mintzfc831822016-12-01 00:21:06 -08002071 gsi_enable = 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002072 break;
2073 case QED_PCI_ETH_ROCE:
2074 conn_type = QED_LL2_TYPE_ROCE;
2075 break;
2076 default:
2077 conn_type = QED_LL2_TYPE_TEST;
2078 }
2079
2080 /* Prepare the temporary ll2 information */
2081 memset(&ll2_info, 0, sizeof(ll2_info));
Arnd Bergmann0629a332017-01-18 15:52:52 +01002082
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002083 ll2_info.conn_type = conn_type;
2084 ll2_info.mtu = params->mtu;
2085 ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
2086 ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
2087 ll2_info.tx_tc = 0;
2088 ll2_info.tx_dest = CORE_TX_DEST_NW;
Yuval Mintzfc831822016-12-01 00:21:06 -08002089 ll2_info.gsi_enable = gsi_enable;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002090
2091 rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info,
2092 QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
2093 &cdev->ll2->handle);
2094 if (rc) {
2095 DP_INFO(cdev, "Failed to acquire LL2 connection\n");
2096 goto fail;
2097 }
2098
2099 rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
2100 cdev->ll2->handle);
2101 if (rc) {
2102 DP_INFO(cdev, "Failed to establish LL2 connection\n");
2103 goto release_fail;
2104 }
2105
2106 /* Post all Rx buffers to FW */
2107 spin_lock_bh(&cdev->ll2->lock);
Wei Yongjun88a24282016-10-10 14:08:28 +00002108 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002109 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
2110 cdev->ll2->handle,
2111 buffer->phys_addr, 0, buffer, 1);
2112 if (rc) {
2113 DP_INFO(cdev,
2114 "Failed to post an Rx buffer; Deleting it\n");
2115 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
2116 cdev->ll2->rx_size, DMA_FROM_DEVICE);
2117 kfree(buffer->data);
2118 list_del(&buffer->list);
2119 kfree(buffer);
2120 } else {
2121 cdev->ll2->rx_cnt++;
2122 }
2123 }
2124 spin_unlock_bh(&cdev->ll2->lock);
2125
2126 if (!cdev->ll2->rx_cnt) {
2127 DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
2128 goto release_terminate;
2129 }
2130
2131 if (!is_valid_ether_addr(params->ll2_mac_address)) {
2132 DP_INFO(cdev, "Invalid Ethernet address\n");
2133 goto release_terminate;
2134 }
2135
Yuval Mintz1d6cff42016-12-01 00:21:07 -08002136 if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
2137 cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
2138 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
2139 rc = qed_ll2_start_ooo(cdev, params);
2140 if (rc) {
2141 DP_INFO(cdev,
2142 "Failed to initialize the OOO LL2 queue\n");
2143 goto release_terminate;
2144 }
2145 }
2146
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002147 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2148 if (!p_ptt) {
2149 DP_INFO(cdev, "Failed to acquire PTT\n");
2150 goto release_terminate;
2151 }
2152
2153 rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2154 params->ll2_mac_address);
2155 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2156 if (rc) {
2157 DP_ERR(cdev, "Failed to allocate LLH filter\n");
2158 goto release_terminate_all;
2159 }
2160
2161 ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002162 return 0;
2163
2164release_terminate_all:
2165
2166release_terminate:
2167 qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2168release_fail:
2169 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2170fail:
2171 qed_ll2_kill_buffers(cdev);
2172 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2173 return -EINVAL;
2174}
2175
2176static int qed_ll2_stop(struct qed_dev *cdev)
2177{
2178 struct qed_ptt *p_ptt;
2179 int rc;
2180
2181 if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
2182 return 0;
2183
2184 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2185 if (!p_ptt) {
2186 DP_INFO(cdev, "Failed to acquire PTT\n");
2187 goto fail;
2188 }
2189
2190 qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2191 cdev->ll2_mac_address);
2192 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2193 eth_zero_addr(cdev->ll2_mac_address);
2194
Yuval Mintz1d6cff42016-12-01 00:21:07 -08002195 if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
2196 cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
2197 qed_ll2_stop_ooo(cdev);
2198
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002199 rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
2200 cdev->ll2->handle);
2201 if (rc)
2202 DP_INFO(cdev, "Failed to terminate LL2 connection\n");
2203
2204 qed_ll2_kill_buffers(cdev);
2205
2206 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2207 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2208
2209 return rc;
2210fail:
2211 return -EINVAL;
2212}
2213
2214static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
2215{
2216 const skb_frag_t *frag;
2217 int rc = -EINVAL, i;
2218 dma_addr_t mapping;
2219 u16 vlan = 0;
2220 u8 flags = 0;
2221
2222 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
2223 DP_INFO(cdev, "Cannot transmit a checksumed packet\n");
2224 return -EINVAL;
2225 }
2226
2227 if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
2228 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
2229 1 + skb_shinfo(skb)->nr_frags);
2230 return -EINVAL;
2231 }
2232
2233 mapping = dma_map_single(&cdev->pdev->dev, skb->data,
2234 skb->len, DMA_TO_DEVICE);
2235 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2236 DP_NOTICE(cdev, "SKB mapping failed\n");
2237 return -EINVAL;
2238 }
2239
2240 /* Request HW to calculate IP csum */
2241 if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
2242 ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2243 flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
2244
2245 if (skb_vlan_tag_present(skb)) {
2246 vlan = skb_vlan_tag_get(skb);
2247 flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT);
2248 }
2249
2250 rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
2251 cdev->ll2->handle,
2252 1 + skb_shinfo(skb)->nr_frags,
Yuval Mintz1d6cff42016-12-01 00:21:07 -08002253 vlan, flags, 0, QED_LL2_TX_DEST_NW,
2254 0 /* RoCE FLAVOR */,
Ram Amraniabd49672016-10-01 22:00:01 +03002255 mapping, skb->len, skb, 1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002256 if (rc)
2257 goto err;
2258
2259 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2260 frag = &skb_shinfo(skb)->frags[i];
2261 if (!cdev->ll2->frags_mapped) {
2262 mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
2263 skb_frag_size(frag),
2264 DMA_TO_DEVICE);
2265
2266 if (unlikely(dma_mapping_error(&cdev->pdev->dev,
2267 mapping))) {
2268 DP_NOTICE(cdev,
2269 "Unable to map frag - dropping packet\n");
Pan Bian0ff18d22016-12-04 13:53:53 +08002270 rc = -ENOMEM;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002271 goto err;
2272 }
2273 } else {
2274 mapping = page_to_phys(skb_frag_page(frag)) |
2275 frag->page_offset;
2276 }
2277
2278 rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
2279 cdev->ll2->handle,
2280 mapping,
2281 skb_frag_size(frag));
2282
2283 /* if failed not much to do here, partial packet has been posted
2284 * we can't free memory, will need to wait for completion.
2285 */
2286 if (rc)
2287 goto err2;
2288 }
2289
2290 return 0;
2291
2292err:
2293 dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
2294
2295err2:
2296 return rc;
2297}
2298
2299static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
2300{
2301 if (!cdev->ll2)
2302 return -EINVAL;
2303
2304 return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
2305 cdev->ll2->handle, stats);
2306}
2307
2308const struct qed_ll2_ops qed_ll2_ops_pass = {
2309 .start = &qed_ll2_start,
2310 .stop = &qed_ll2_stop,
2311 .start_xmit = &qed_ll2_start_xmit,
2312 .register_cb_ops = &qed_ll2_register_cb_ops,
2313 .get_stats = &qed_ll2_stats,
2314};
2315
2316int qed_ll2_alloc_if(struct qed_dev *cdev)
2317{
2318 cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
2319 return cdev->ll2 ? 0 : -ENOMEM;
2320}
2321
2322void qed_ll2_dealloc_if(struct qed_dev *cdev)
2323{
2324 kfree(cdev->ll2);
2325 cdev->ll2 = NULL;
2326}