blob: 02c5d47cfc6d4d1369c82868dc4b9f34143e1342 [file] [log] [blame]
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001/* QLogic qed NIC Driver
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02002 * Copyright (c) 2015-2017 QLogic Corporation
Yuval Mintz0a7fb112016-10-01 21:59:55 +03003 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
Yuval Mintz0a7fb112016-10-01 21:59:55 +03009 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +020010 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Yuval Mintz0a7fb112016-10-01 21:59:55 +030031 */
32
33#include <linux/types.h>
34#include <asm/byteorder.h>
35#include <linux/dma-mapping.h>
36#include <linux/if_vlan.h>
37#include <linux/kernel.h>
38#include <linux/pci.h>
39#include <linux/slab.h>
40#include <linux/stddef.h>
41#include <linux/version.h>
42#include <linux/workqueue.h>
43#include <net/ipv6.h>
44#include <linux/bitops.h>
45#include <linux/delay.h>
46#include <linux/errno.h>
47#include <linux/etherdevice.h>
48#include <linux/io.h>
49#include <linux/list.h>
50#include <linux/mutex.h>
51#include <linux/spinlock.h>
52#include <linux/string.h>
53#include <linux/qed/qed_ll2_if.h>
54#include "qed.h"
55#include "qed_cxt.h"
56#include "qed_dev_api.h"
57#include "qed_hsi.h"
58#include "qed_hw.h"
59#include "qed_int.h"
60#include "qed_ll2.h"
61#include "qed_mcp.h"
Yuval Mintz1d6cff42016-12-01 00:21:07 -080062#include "qed_ooo.h"
Yuval Mintz0a7fb112016-10-01 21:59:55 +030063#include "qed_reg_addr.h"
64#include "qed_sp.h"
Yuval Mintz0189efb2016-10-13 22:57:02 +030065#include "qed_roce.h"
Yuval Mintz0a7fb112016-10-01 21:59:55 +030066
67#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
68#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
69
70#define QED_LL2_TX_SIZE (256)
71#define QED_LL2_RX_SIZE (4096)
72
73struct qed_cb_ll2_info {
74 int rx_cnt;
75 u32 rx_size;
76 u8 handle;
77 bool frags_mapped;
78
79 /* Lock protecting LL2 buffer lists in sleepless context */
80 spinlock_t lock;
81 struct list_head list;
82
83 const struct qed_ll2_cb_ops *cbs;
84 void *cb_cookie;
85};
86
87struct qed_ll2_buffer {
88 struct list_head list;
89 void *data;
90 dma_addr_t phys_addr;
91};
92
93static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn,
94 u8 connection_handle,
95 void *cookie,
96 dma_addr_t first_frag_addr,
97 bool b_last_fragment,
98 bool b_last_packet)
99{
100 struct qed_dev *cdev = p_hwfn->cdev;
101 struct sk_buff *skb = cookie;
102
103 /* All we need to do is release the mapping */
104 dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
105 skb_headlen(skb), DMA_TO_DEVICE);
106
107 if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
108 cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
109 b_last_fragment);
110
111 if (cdev->ll2->frags_mapped)
112 /* Case where mapped frags were received, need to
113 * free skb with nr_frags marked as 0
114 */
115 skb_shinfo(skb)->nr_frags = 0;
116
117 dev_kfree_skb_any(skb);
118}
119
120static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
121 u8 **data, dma_addr_t *phys_addr)
122{
123 *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
124 if (!(*data)) {
125 DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
126 return -ENOMEM;
127 }
128
129 *phys_addr = dma_map_single(&cdev->pdev->dev,
130 ((*data) + NET_SKB_PAD),
131 cdev->ll2->rx_size, DMA_FROM_DEVICE);
132 if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
133 DP_INFO(cdev, "Failed to map LL2 buffer data\n");
134 kfree((*data));
135 return -ENOMEM;
136 }
137
138 return 0;
139}
140
141static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
142 struct qed_ll2_buffer *buffer)
143{
144 spin_lock_bh(&cdev->ll2->lock);
145
146 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
147 cdev->ll2->rx_size, DMA_FROM_DEVICE);
148 kfree(buffer->data);
149 list_del(&buffer->list);
150
151 cdev->ll2->rx_cnt--;
152 if (!cdev->ll2->rx_cnt)
153 DP_INFO(cdev, "All LL2 entries were removed\n");
154
155 spin_unlock_bh(&cdev->ll2->lock);
156
157 return 0;
158}
159
160static void qed_ll2_kill_buffers(struct qed_dev *cdev)
161{
162 struct qed_ll2_buffer *buffer, *tmp_buffer;
163
164 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
165 qed_ll2_dealloc_buffer(cdev, buffer);
166}
167
Yuval Mintz8c93bea2016-10-13 22:57:03 +0300168static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
169 u8 connection_handle,
170 struct qed_ll2_rx_packet *p_pkt,
171 struct core_rx_fast_path_cqe *p_cqe,
172 bool b_last_packet)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300173{
174 u16 packet_length = le16_to_cpu(p_cqe->packet_length);
175 struct qed_ll2_buffer *buffer = p_pkt->cookie;
176 struct qed_dev *cdev = p_hwfn->cdev;
177 u16 vlan = le16_to_cpu(p_cqe->vlan);
178 u32 opaque_data_0, opaque_data_1;
179 u8 pad = p_cqe->placement_offset;
180 dma_addr_t new_phys_addr;
181 struct sk_buff *skb;
182 bool reuse = false;
183 int rc = -EINVAL;
184 u8 *new_data;
185
186 opaque_data_0 = le32_to_cpu(p_cqe->opaque_data.data[0]);
187 opaque_data_1 = le32_to_cpu(p_cqe->opaque_data.data[1]);
188
189 DP_VERBOSE(p_hwfn,
190 (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
191 "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
192 (u64)p_pkt->rx_buf_addr, pad, packet_length,
193 le16_to_cpu(p_cqe->parse_flags.flags), vlan,
194 opaque_data_0, opaque_data_1);
195
196 if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
197 print_hex_dump(KERN_INFO, "",
198 DUMP_PREFIX_OFFSET, 16, 1,
199 buffer->data, packet_length, false);
200 }
201
202 /* Determine if data is valid */
203 if (packet_length < ETH_HLEN)
204 reuse = true;
205
206 /* Allocate a replacement for buffer; Reuse upon failure */
207 if (!reuse)
208 rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
209 &new_phys_addr);
210
211 /* If need to reuse or there's no replacement buffer, repost this */
212 if (rc)
213 goto out_post;
214
215 skb = build_skb(buffer->data, 0);
216 if (!skb) {
217 rc = -ENOMEM;
218 goto out_post;
219 }
220
221 pad += NET_SKB_PAD;
222 skb_reserve(skb, pad);
223 skb_put(skb, packet_length);
224 skb_checksum_none_assert(skb);
225
226 /* Get parital ethernet information instead of eth_type_trans(),
227 * Since we don't have an associated net_device.
228 */
229 skb_reset_mac_header(skb);
230 skb->protocol = eth_hdr(skb)->h_proto;
231
232 /* Pass SKB onward */
233 if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
234 if (vlan)
235 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
236 cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
237 opaque_data_0, opaque_data_1);
238 }
239
240 /* Update Buffer information and update FW producer */
241 buffer->data = new_data;
242 buffer->phys_addr = new_phys_addr;
243
244out_post:
245 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
246 buffer->phys_addr, 0, buffer, 1);
247
248 if (rc)
249 qed_ll2_dealloc_buffer(cdev, buffer);
250}
251
252static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
253 u8 connection_handle,
254 bool b_lock,
255 bool b_only_active)
256{
257 struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
258
259 if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
260 return NULL;
261
262 if (!p_hwfn->p_ll2_info)
263 return NULL;
264
265 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
266
267 if (b_only_active) {
268 if (b_lock)
269 mutex_lock(&p_ll2_conn->mutex);
270 if (p_ll2_conn->b_active)
271 p_ret = p_ll2_conn;
272 if (b_lock)
273 mutex_unlock(&p_ll2_conn->mutex);
274 } else {
275 p_ret = p_ll2_conn;
276 }
277
278 return p_ret;
279}
280
281static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
282 u8 connection_handle)
283{
284 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
285}
286
287static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
288 u8 connection_handle)
289{
290 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
291}
292
293static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
294 *p_hwfn,
295 u8 connection_handle)
296{
297 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
298}
299
300static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
301{
302 bool b_last_packet = false, b_last_frag = false;
303 struct qed_ll2_tx_packet *p_pkt = NULL;
304 struct qed_ll2_info *p_ll2_conn;
305 struct qed_ll2_tx_queue *p_tx;
Ram Amraniabd49672016-10-01 22:00:01 +0300306 dma_addr_t tx_frag;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300307
308 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
309 if (!p_ll2_conn)
310 return;
311
312 p_tx = &p_ll2_conn->tx_queue;
313
314 while (!list_empty(&p_tx->active_descq)) {
315 p_pkt = list_first_entry(&p_tx->active_descq,
316 struct qed_ll2_tx_packet, list_entry);
317 if (!p_pkt)
318 break;
319
320 list_del(&p_pkt->list_entry);
321 b_last_packet = list_empty(&p_tx->active_descq);
322 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
Arnd Bergmann0629a332017-01-18 15:52:52 +0100323 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800324 struct qed_ooo_buffer *p_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300325
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800326 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
327 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
328 p_buffer);
329 } else {
330 p_tx->cur_completing_packet = *p_pkt;
331 p_tx->cur_completing_bd_idx = 1;
332 b_last_frag =
333 p_tx->cur_completing_bd_idx == p_pkt->bd_used;
334 tx_frag = p_pkt->bds_set[0].tx_frag;
Arnd Bergmann0629a332017-01-18 15:52:52 +0100335 if (p_ll2_conn->conn.gsi_enable)
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800336 qed_ll2b_release_tx_gsi_packet(p_hwfn,
337 p_ll2_conn->
338 my_id,
339 p_pkt->cookie,
340 tx_frag,
341 b_last_frag,
342 b_last_packet);
343 else
344 qed_ll2b_complete_tx_packet(p_hwfn,
345 p_ll2_conn->my_id,
346 p_pkt->cookie,
347 tx_frag,
348 b_last_frag,
349 b_last_packet);
350 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300351 }
352}
353
354static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
355{
356 struct qed_ll2_info *p_ll2_conn = p_cookie;
357 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
358 u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
359 struct qed_ll2_tx_packet *p_pkt;
360 bool b_last_frag = false;
361 unsigned long flags;
Ram Amraniabd49672016-10-01 22:00:01 +0300362 dma_addr_t tx_frag;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300363 int rc = -EINVAL;
364
365 spin_lock_irqsave(&p_tx->lock, flags);
366 if (p_tx->b_completing_packet) {
367 rc = -EBUSY;
368 goto out;
369 }
370
371 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
372 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
373 while (num_bds) {
374 if (list_empty(&p_tx->active_descq))
375 goto out;
376
377 p_pkt = list_first_entry(&p_tx->active_descq,
378 struct qed_ll2_tx_packet, list_entry);
379 if (!p_pkt)
380 goto out;
381
382 p_tx->b_completing_packet = true;
383 p_tx->cur_completing_packet = *p_pkt;
384 num_bds_in_packet = p_pkt->bd_used;
385 list_del(&p_pkt->list_entry);
386
387 if (num_bds < num_bds_in_packet) {
388 DP_NOTICE(p_hwfn,
389 "Rest of BDs does not cover whole packet\n");
390 goto out;
391 }
392
393 num_bds -= num_bds_in_packet;
394 p_tx->bds_idx += num_bds_in_packet;
395 while (num_bds_in_packet--)
396 qed_chain_consume(&p_tx->txq_chain);
397
398 p_tx->cur_completing_bd_idx = 1;
399 b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
400 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
401
402 spin_unlock_irqrestore(&p_tx->lock, flags);
Ram Amraniabd49672016-10-01 22:00:01 +0300403 tx_frag = p_pkt->bds_set[0].tx_frag;
Arnd Bergmann0629a332017-01-18 15:52:52 +0100404 if (p_ll2_conn->conn.gsi_enable)
Ram Amraniabd49672016-10-01 22:00:01 +0300405 qed_ll2b_complete_tx_gsi_packet(p_hwfn,
406 p_ll2_conn->my_id,
407 p_pkt->cookie,
408 tx_frag,
409 b_last_frag, !num_bds);
410 else
411 qed_ll2b_complete_tx_packet(p_hwfn,
412 p_ll2_conn->my_id,
413 p_pkt->cookie,
414 tx_frag,
415 b_last_frag, !num_bds);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300416 spin_lock_irqsave(&p_tx->lock, flags);
417 }
418
419 p_tx->b_completing_packet = false;
420 rc = 0;
421out:
422 spin_unlock_irqrestore(&p_tx->lock, flags);
423 return rc;
424}
425
Ram Amraniabd49672016-10-01 22:00:01 +0300426static int
427qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
428 struct qed_ll2_info *p_ll2_info,
429 union core_rx_cqe_union *p_cqe,
430 unsigned long lock_flags, bool b_last_cqe)
431{
432 struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue;
433 struct qed_ll2_rx_packet *p_pkt = NULL;
434 u16 packet_length, parse_flags, vlan;
435 u32 src_mac_addrhi;
436 u16 src_mac_addrlo;
437
438 if (!list_empty(&p_rx->active_descq))
439 p_pkt = list_first_entry(&p_rx->active_descq,
440 struct qed_ll2_rx_packet, list_entry);
441 if (!p_pkt) {
442 DP_NOTICE(p_hwfn,
443 "GSI Rx completion but active_descq is empty\n");
444 return -EIO;
445 }
446
447 list_del(&p_pkt->list_entry);
448 parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
449 packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
450 vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
451 src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
452 src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
453 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
454 DP_NOTICE(p_hwfn,
455 "Mismatch between active_descq and the LL2 Rx chain\n");
456 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
457
458 spin_unlock_irqrestore(&p_rx->lock, lock_flags);
459 qed_ll2b_complete_rx_gsi_packet(p_hwfn,
460 p_ll2_info->my_id,
461 p_pkt->cookie,
462 p_pkt->rx_buf_addr,
463 packet_length,
464 p_cqe->rx_cqe_gsi.data_length_error,
465 parse_flags,
466 vlan,
467 src_mac_addrhi,
468 src_mac_addrlo, b_last_cqe);
469 spin_lock_irqsave(&p_rx->lock, lock_flags);
470
471 return 0;
472}
473
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300474static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
475 struct qed_ll2_info *p_ll2_conn,
476 union core_rx_cqe_union *p_cqe,
477 unsigned long lock_flags,
478 bool b_last_cqe)
479{
480 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
481 struct qed_ll2_rx_packet *p_pkt = NULL;
482
483 if (!list_empty(&p_rx->active_descq))
484 p_pkt = list_first_entry(&p_rx->active_descq,
485 struct qed_ll2_rx_packet, list_entry);
486 if (!p_pkt) {
487 DP_NOTICE(p_hwfn,
488 "LL2 Rx completion but active_descq is empty\n");
489 return -EIO;
490 }
491 list_del(&p_pkt->list_entry);
492
493 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
494 DP_NOTICE(p_hwfn,
495 "Mismatch between active_descq and the LL2 Rx chain\n");
496 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
497
498 spin_unlock_irqrestore(&p_rx->lock, lock_flags);
499 qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
500 p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
501 spin_lock_irqsave(&p_rx->lock, lock_flags);
502
503 return 0;
504}
505
506static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
507{
508 struct qed_ll2_info *p_ll2_conn = cookie;
509 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
510 union core_rx_cqe_union *cqe = NULL;
511 u16 cq_new_idx = 0, cq_old_idx = 0;
512 unsigned long flags = 0;
513 int rc = 0;
514
515 spin_lock_irqsave(&p_rx->lock, flags);
516 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
517 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
518
519 while (cq_new_idx != cq_old_idx) {
520 bool b_last_cqe = (cq_new_idx == cq_old_idx);
521
522 cqe = qed_chain_consume(&p_rx->rcq_chain);
523 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
524
525 DP_VERBOSE(p_hwfn,
526 QED_MSG_LL2,
527 "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
528 cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
529
530 switch (cqe->rx_cqe_sp.type) {
531 case CORE_RX_CQE_TYPE_SLOW_PATH:
532 DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
533 rc = -EINVAL;
534 break;
Ram Amraniabd49672016-10-01 22:00:01 +0300535 case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
536 rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn,
537 cqe, flags, b_last_cqe);
538 break;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300539 case CORE_RX_CQE_TYPE_REGULAR:
540 rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
541 cqe, flags, b_last_cqe);
542 break;
543 default:
544 rc = -EIO;
545 }
546 }
547
548 spin_unlock_irqrestore(&p_rx->lock, flags);
549 return rc;
550}
551
Yuval Mintz8c93bea2016-10-13 22:57:03 +0300552static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300553{
554 struct qed_ll2_info *p_ll2_conn = NULL;
555 struct qed_ll2_rx_packet *p_pkt = NULL;
556 struct qed_ll2_rx_queue *p_rx;
557
558 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
559 if (!p_ll2_conn)
560 return;
561
562 p_rx = &p_ll2_conn->rx_queue;
563
564 while (!list_empty(&p_rx->active_descq)) {
565 dma_addr_t rx_buf_addr;
566 void *cookie;
567 bool b_last;
568
569 p_pkt = list_first_entry(&p_rx->active_descq,
570 struct qed_ll2_rx_packet, list_entry);
571 if (!p_pkt)
572 break;
573
Wei Yongjunb4f0fd42016-10-17 15:17:51 +0000574 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300575
Arnd Bergmann0629a332017-01-18 15:52:52 +0100576 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800577 struct qed_ooo_buffer *p_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300578
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800579 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
580 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
581 p_buffer);
582 } else {
583 rx_buf_addr = p_pkt->rx_buf_addr;
584 cookie = p_pkt->cookie;
585
586 b_last = list_empty(&p_rx->active_descq);
587 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300588 }
589}
590
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800591#if IS_ENABLED(CONFIG_QED_ISCSI)
592static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
593{
594 u8 bd_flags = 0;
595
596 if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
597 SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_VLAN_INSERTION, 1);
598
599 return bd_flags;
600}
601
602static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
603 struct qed_ll2_info *p_ll2_conn)
604{
605 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
606 u16 packet_length = 0, parse_flags = 0, vlan = 0;
607 struct qed_ll2_rx_packet *p_pkt = NULL;
608 u32 num_ooo_add_to_peninsula = 0, cid;
609 union core_rx_cqe_union *cqe = NULL;
610 u16 cq_new_idx = 0, cq_old_idx = 0;
611 struct qed_ooo_buffer *p_buffer;
612 struct ooo_opaque *iscsi_ooo;
613 u8 placement_offset = 0;
614 u8 cqe_type;
615
616 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
617 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
618 if (cq_new_idx == cq_old_idx)
619 return 0;
620
621 while (cq_new_idx != cq_old_idx) {
622 struct core_rx_fast_path_cqe *p_cqe_fp;
623
624 cqe = qed_chain_consume(&p_rx->rcq_chain);
625 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
626 cqe_type = cqe->rx_cqe_sp.type;
627
628 if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
629 DP_NOTICE(p_hwfn,
630 "Got a non-regular LB LL2 completion [type 0x%02x]\n",
631 cqe_type);
632 return -EINVAL;
633 }
634 p_cqe_fp = &cqe->rx_cqe_fp;
635
636 placement_offset = p_cqe_fp->placement_offset;
637 parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
638 packet_length = le16_to_cpu(p_cqe_fp->packet_length);
639 vlan = le16_to_cpu(p_cqe_fp->vlan);
640 iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
641 qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
642 iscsi_ooo);
643 cid = le32_to_cpu(iscsi_ooo->cid);
644
645 /* Process delete isle first */
646 if (iscsi_ooo->drop_size)
647 qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
648 iscsi_ooo->drop_isle,
649 iscsi_ooo->drop_size);
650
651 if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
652 continue;
653
654 /* Now process create/add/join isles */
655 if (list_empty(&p_rx->active_descq)) {
656 DP_NOTICE(p_hwfn,
657 "LL2 OOO RX chain has no submitted buffers\n"
658 );
659 return -EIO;
660 }
661
662 p_pkt = list_first_entry(&p_rx->active_descq,
663 struct qed_ll2_rx_packet, list_entry);
664
665 if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
666 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
667 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
668 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
669 (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
670 if (!p_pkt) {
671 DP_NOTICE(p_hwfn,
672 "LL2 OOO RX packet is not valid\n");
673 return -EIO;
674 }
675 list_del(&p_pkt->list_entry);
676 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
677 p_buffer->packet_length = packet_length;
678 p_buffer->parse_flags = parse_flags;
679 p_buffer->vlan = vlan;
680 p_buffer->placement_offset = placement_offset;
681 qed_chain_consume(&p_rx->rxq_chain);
682 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
683
684 switch (iscsi_ooo->ooo_opcode) {
685 case TCP_EVENT_ADD_NEW_ISLE:
686 qed_ooo_add_new_isle(p_hwfn,
687 p_hwfn->p_ooo_info,
688 cid,
689 iscsi_ooo->ooo_isle,
690 p_buffer);
691 break;
692 case TCP_EVENT_ADD_ISLE_RIGHT:
693 qed_ooo_add_new_buffer(p_hwfn,
694 p_hwfn->p_ooo_info,
695 cid,
696 iscsi_ooo->ooo_isle,
697 p_buffer,
698 QED_OOO_RIGHT_BUF);
699 break;
700 case TCP_EVENT_ADD_ISLE_LEFT:
701 qed_ooo_add_new_buffer(p_hwfn,
702 p_hwfn->p_ooo_info,
703 cid,
704 iscsi_ooo->ooo_isle,
705 p_buffer,
706 QED_OOO_LEFT_BUF);
707 break;
708 case TCP_EVENT_JOIN:
709 qed_ooo_add_new_buffer(p_hwfn,
710 p_hwfn->p_ooo_info,
711 cid,
712 iscsi_ooo->ooo_isle +
713 1,
714 p_buffer,
715 QED_OOO_LEFT_BUF);
716 qed_ooo_join_isles(p_hwfn,
717 p_hwfn->p_ooo_info,
718 cid, iscsi_ooo->ooo_isle);
719 break;
720 case TCP_EVENT_ADD_PEN:
721 num_ooo_add_to_peninsula++;
722 qed_ooo_put_ready_buffer(p_hwfn,
723 p_hwfn->p_ooo_info,
724 p_buffer, true);
725 break;
726 }
727 } else {
728 DP_NOTICE(p_hwfn,
729 "Unexpected event (%d) TX OOO completion\n",
730 iscsi_ooo->ooo_opcode);
731 }
732 }
733
734 return 0;
735}
736
737static void
738qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
739 struct qed_ll2_info *p_ll2_conn)
740{
741 struct qed_ooo_buffer *p_buffer;
742 int rc;
743 u16 l4_hdr_offset_w;
744 dma_addr_t first_frag;
745 u16 parse_flags;
746 u8 bd_flags;
747
748 /* Submit Tx buffers here */
749 while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
750 p_hwfn->p_ooo_info))) {
751 l4_hdr_offset_w = 0;
752 bd_flags = 0;
753
754 first_frag = p_buffer->rx_buffer_phys_addr +
755 p_buffer->placement_offset;
756 parse_flags = p_buffer->parse_flags;
757 bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
758 SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE, 1);
759 SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_L4_PROTOCOL, 1);
760
761 rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
762 p_buffer->vlan, bd_flags,
763 l4_hdr_offset_w,
Arnd Bergmann0629a332017-01-18 15:52:52 +0100764 p_ll2_conn->conn.tx_dest, 0,
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800765 first_frag,
766 p_buffer->packet_length,
767 p_buffer, true);
768 if (rc) {
769 qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
770 p_buffer, false);
771 break;
772 }
773 }
774}
775
776static void
777qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn,
778 struct qed_ll2_info *p_ll2_conn)
779{
780 struct qed_ooo_buffer *p_buffer;
781 int rc;
782
783 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
784 p_hwfn->p_ooo_info))) {
785 rc = qed_ll2_post_rx_buffer(p_hwfn,
786 p_ll2_conn->my_id,
787 p_buffer->rx_buffer_phys_addr,
788 0, p_buffer, true);
789 if (rc) {
790 qed_ooo_put_free_buffer(p_hwfn,
791 p_hwfn->p_ooo_info, p_buffer);
792 break;
793 }
794 }
795}
796
797static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
798{
799 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
800 int rc;
801
802 rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
803 if (rc)
804 return rc;
805
806 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
807 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
808
809 return 0;
810}
811
812static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
813{
814 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
815 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
816 struct qed_ll2_tx_packet *p_pkt = NULL;
817 struct qed_ooo_buffer *p_buffer;
818 bool b_dont_submit_rx = false;
819 u16 new_idx = 0, num_bds = 0;
820 int rc;
821
822 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
823 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
824
825 if (!num_bds)
826 return 0;
827
828 while (num_bds) {
829 if (list_empty(&p_tx->active_descq))
830 return -EINVAL;
831
832 p_pkt = list_first_entry(&p_tx->active_descq,
833 struct qed_ll2_tx_packet, list_entry);
834 if (!p_pkt)
835 return -EINVAL;
836
837 if (p_pkt->bd_used != 1) {
838 DP_NOTICE(p_hwfn,
839 "Unexpectedly many BDs(%d) in TX OOO completion\n",
840 p_pkt->bd_used);
841 return -EINVAL;
842 }
843
844 list_del(&p_pkt->list_entry);
845
846 num_bds--;
847 p_tx->bds_idx++;
848 qed_chain_consume(&p_tx->txq_chain);
849
850 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
851 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
852
853 if (b_dont_submit_rx) {
854 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
855 p_buffer);
856 continue;
857 }
858
859 rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
860 p_buffer->rx_buffer_phys_addr, 0,
861 p_buffer, true);
862 if (rc != 0) {
863 qed_ooo_put_free_buffer(p_hwfn,
864 p_hwfn->p_ooo_info, p_buffer);
865 b_dont_submit_rx = true;
866 }
867 }
868
869 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
870
871 return 0;
872}
873
874static int
875qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
876 struct qed_ll2_info *p_ll2_info,
877 u16 rx_num_ooo_buffers, u16 mtu)
878{
879 struct qed_ooo_buffer *p_buf = NULL;
880 void *p_virt;
881 u16 buf_idx;
882 int rc = 0;
883
Arnd Bergmann0629a332017-01-18 15:52:52 +0100884 if (p_ll2_info->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800885 return rc;
886
887 if (!rx_num_ooo_buffers)
888 return -EINVAL;
889
890 for (buf_idx = 0; buf_idx < rx_num_ooo_buffers; buf_idx++) {
891 p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
892 if (!p_buf) {
893 rc = -ENOMEM;
894 goto out;
895 }
896
897 p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
898 p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
899 ETH_CACHE_LINE_SIZE - 1) &
900 ~(ETH_CACHE_LINE_SIZE - 1);
901 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
902 p_buf->rx_buffer_size,
903 &p_buf->rx_buffer_phys_addr,
904 GFP_KERNEL);
905 if (!p_virt) {
906 kfree(p_buf);
907 rc = -ENOMEM;
908 goto out;
909 }
910
911 p_buf->rx_buffer_virt_addr = p_virt;
912 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
913 }
914
915 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
916 "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
917 rx_num_ooo_buffers, p_buf->rx_buffer_size);
918
919out:
920 return rc;
921}
922
923static void
924qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
925 struct qed_ll2_info *p_ll2_conn)
926{
Arnd Bergmann0629a332017-01-18 15:52:52 +0100927 if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800928 return;
929
930 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
931 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
932}
933
934static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
935 struct qed_ll2_info *p_ll2_conn)
936{
937 struct qed_ooo_buffer *p_buffer;
938
Arnd Bergmann0629a332017-01-18 15:52:52 +0100939 if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800940 return;
941
942 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
943 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
944 p_hwfn->p_ooo_info))) {
945 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
946 p_buffer->rx_buffer_size,
947 p_buffer->rx_buffer_virt_addr,
948 p_buffer->rx_buffer_phys_addr);
949 kfree(p_buffer);
950 }
951}
952
953static void qed_ll2_stop_ooo(struct qed_dev *cdev)
954{
955 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
956 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
957
958 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n",
959 *handle);
960
961 qed_ll2_terminate_connection(hwfn, *handle);
962 qed_ll2_release_connection(hwfn, *handle);
963 *handle = QED_LL2_UNUSED_HANDLE;
964}
965
966static int qed_ll2_start_ooo(struct qed_dev *cdev,
967 struct qed_ll2_params *params)
968{
969 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
970 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
Arnd Bergmann0629a332017-01-18 15:52:52 +0100971 struct qed_ll2_conn ll2_info;
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800972 int rc;
973
Arnd Bergmann0629a332017-01-18 15:52:52 +0100974 ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
975 ll2_info.mtu = params->mtu;
976 ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
977 ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
978 ll2_info.tx_tc = OOO_LB_TC;
979 ll2_info.tx_dest = CORE_TX_DEST_LB;
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800980
Arnd Bergmann0629a332017-01-18 15:52:52 +0100981 rc = qed_ll2_acquire_connection(hwfn, &ll2_info,
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800982 QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
983 handle);
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800984 if (rc) {
985 DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
986 goto out;
987 }
988
989 rc = qed_ll2_establish_connection(hwfn, *handle);
990 if (rc) {
991 DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
992 goto fail;
993 }
994
995 return 0;
996
997fail:
998 qed_ll2_release_connection(hwfn, *handle);
999out:
1000 *handle = QED_LL2_UNUSED_HANDLE;
1001 return rc;
1002}
1003#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
1004static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn,
1005 void *p_cookie) { return -EINVAL; }
1006static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn,
1007 void *p_cookie) { return -EINVAL; }
1008static inline int
1009qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
1010 struct qed_ll2_info *p_ll2_info,
1011 u16 rx_num_ooo_buffers, u16 mtu) { return 0; }
1012static inline void
1013qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
1014 struct qed_ll2_info *p_ll2_conn) { return; }
1015static inline void
1016qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
1017 struct qed_ll2_info *p_ll2_conn) { return; }
1018static inline void qed_ll2_stop_ooo(struct qed_dev *cdev) { return; }
1019static inline int qed_ll2_start_ooo(struct qed_dev *cdev,
1020 struct qed_ll2_params *params)
1021 { return -EINVAL; }
1022#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
1023
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001024static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
1025 struct qed_ll2_info *p_ll2_conn,
1026 u8 action_on_error)
1027{
Arnd Bergmann0629a332017-01-18 15:52:52 +01001028 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001029 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
1030 struct core_rx_start_ramrod_data *p_ramrod = NULL;
1031 struct qed_spq_entry *p_ent = NULL;
1032 struct qed_sp_init_data init_data;
1033 u16 cqe_pbl_size;
1034 int rc = 0;
1035
1036 /* Get SPQ entry */
1037 memset(&init_data, 0, sizeof(init_data));
1038 init_data.cid = p_ll2_conn->cid;
1039 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1040 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1041
1042 rc = qed_sp_init_request(p_hwfn, &p_ent,
1043 CORE_RAMROD_RX_QUEUE_START,
1044 PROTOCOLID_CORE, &init_data);
1045 if (rc)
1046 return rc;
1047
1048 p_ramrod = &p_ent->ramrod.core_rx_queue_start;
1049
1050 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
1051 p_ramrod->sb_index = p_rx->rx_sb_index;
1052 p_ramrod->complete_event_flg = 1;
1053
Arnd Bergmann0629a332017-01-18 15:52:52 +01001054 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001055 DMA_REGPAIR_LE(p_ramrod->bd_base,
1056 p_rx->rxq_chain.p_phys_addr);
1057 cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
1058 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
1059 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
1060 qed_chain_get_pbl_phys(&p_rx->rcq_chain));
1061
Arnd Bergmann0629a332017-01-18 15:52:52 +01001062 p_ramrod->drop_ttl0_flg = p_ll2_conn->conn.rx_drop_ttl0_flg;
1063 p_ramrod->inner_vlan_removal_en = p_ll2_conn->conn.rx_vlan_removal_en;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001064 p_ramrod->queue_id = p_ll2_conn->queue_id;
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001065 p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
1066 : 1;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001067
1068 if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
1069 p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
1070 p_ramrod->mf_si_bcast_accept_all = 1;
1071 p_ramrod->mf_si_mcast_accept_all = 1;
1072 } else {
1073 p_ramrod->mf_si_bcast_accept_all = 0;
1074 p_ramrod->mf_si_mcast_accept_all = 0;
1075 }
1076
1077 p_ramrod->action_on_error.error_type = action_on_error;
Arnd Bergmann0629a332017-01-18 15:52:52 +01001078 p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001079 return qed_spq_post(p_hwfn, p_ent, NULL);
1080}
1081
1082static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
1083 struct qed_ll2_info *p_ll2_conn)
1084{
Arnd Bergmann0629a332017-01-18 15:52:52 +01001085 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001086 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1087 struct core_tx_start_ramrod_data *p_ramrod = NULL;
1088 struct qed_spq_entry *p_ent = NULL;
1089 struct qed_sp_init_data init_data;
1090 union qed_qm_pq_params pq_params;
1091 u16 pq_id = 0, pbl_size;
1092 int rc = -EINVAL;
1093
1094 if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
1095 return 0;
1096
Arnd Bergmann0629a332017-01-18 15:52:52 +01001097 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001098 p_ll2_conn->tx_stats_en = 0;
1099 else
1100 p_ll2_conn->tx_stats_en = 1;
1101
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001102 /* Get SPQ entry */
1103 memset(&init_data, 0, sizeof(init_data));
1104 init_data.cid = p_ll2_conn->cid;
1105 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1106 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1107
1108 rc = qed_sp_init_request(p_hwfn, &p_ent,
1109 CORE_RAMROD_TX_QUEUE_START,
1110 PROTOCOLID_CORE, &init_data);
1111 if (rc)
1112 return rc;
1113
1114 p_ramrod = &p_ent->ramrod.core_tx_queue_start;
1115
1116 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
1117 p_ramrod->sb_index = p_tx->tx_sb_index;
Arnd Bergmann0629a332017-01-18 15:52:52 +01001118 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001119 p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
1120 p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
1121
1122 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
1123 qed_chain_get_pbl_phys(&p_tx->txq_chain));
1124 pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
1125 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
1126
1127 memset(&pq_params, 0, sizeof(pq_params));
Arnd Bergmann0629a332017-01-18 15:52:52 +01001128 pq_params.core.tc = p_ll2_conn->conn.tx_tc;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001129 pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
1130 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
1131
1132 switch (conn_type) {
1133 case QED_LL2_TYPE_ISCSI:
1134 case QED_LL2_TYPE_ISCSI_OOO:
1135 p_ramrod->conn_type = PROTOCOLID_ISCSI;
1136 break;
1137 case QED_LL2_TYPE_ROCE:
1138 p_ramrod->conn_type = PROTOCOLID_ROCE;
1139 break;
1140 default:
1141 p_ramrod->conn_type = PROTOCOLID_ETH;
1142 DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
1143 }
1144
Arnd Bergmann0629a332017-01-18 15:52:52 +01001145 p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001146 return qed_spq_post(p_hwfn, p_ent, NULL);
1147}
1148
1149static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
1150 struct qed_ll2_info *p_ll2_conn)
1151{
1152 struct core_rx_stop_ramrod_data *p_ramrod = NULL;
1153 struct qed_spq_entry *p_ent = NULL;
1154 struct qed_sp_init_data init_data;
1155 int rc = -EINVAL;
1156
1157 /* Get SPQ entry */
1158 memset(&init_data, 0, sizeof(init_data));
1159 init_data.cid = p_ll2_conn->cid;
1160 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1161 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1162
1163 rc = qed_sp_init_request(p_hwfn, &p_ent,
1164 CORE_RAMROD_RX_QUEUE_STOP,
1165 PROTOCOLID_CORE, &init_data);
1166 if (rc)
1167 return rc;
1168
1169 p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
1170
1171 p_ramrod->complete_event_flg = 1;
1172 p_ramrod->queue_id = p_ll2_conn->queue_id;
1173
1174 return qed_spq_post(p_hwfn, p_ent, NULL);
1175}
1176
1177static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
1178 struct qed_ll2_info *p_ll2_conn)
1179{
1180 struct qed_spq_entry *p_ent = NULL;
1181 struct qed_sp_init_data init_data;
1182 int rc = -EINVAL;
1183
1184 /* Get SPQ entry */
1185 memset(&init_data, 0, sizeof(init_data));
1186 init_data.cid = p_ll2_conn->cid;
1187 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1188 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1189
1190 rc = qed_sp_init_request(p_hwfn, &p_ent,
1191 CORE_RAMROD_TX_QUEUE_STOP,
1192 PROTOCOLID_CORE, &init_data);
1193 if (rc)
1194 return rc;
1195
1196 return qed_spq_post(p_hwfn, p_ent, NULL);
1197}
1198
1199static int
1200qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
1201 struct qed_ll2_info *p_ll2_info, u16 rx_num_desc)
1202{
1203 struct qed_ll2_rx_packet *p_descq;
1204 u32 capacity;
1205 int rc = 0;
1206
1207 if (!rx_num_desc)
1208 goto out;
1209
1210 rc = qed_chain_alloc(p_hwfn->cdev,
1211 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1212 QED_CHAIN_MODE_NEXT_PTR,
1213 QED_CHAIN_CNT_TYPE_U16,
1214 rx_num_desc,
1215 sizeof(struct core_rx_bd),
1216 &p_ll2_info->rx_queue.rxq_chain);
1217 if (rc) {
1218 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
1219 goto out;
1220 }
1221
1222 capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
1223 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
1224 GFP_KERNEL);
1225 if (!p_descq) {
1226 rc = -ENOMEM;
1227 DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
1228 goto out;
1229 }
1230 p_ll2_info->rx_queue.descq_array = p_descq;
1231
1232 rc = qed_chain_alloc(p_hwfn->cdev,
1233 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1234 QED_CHAIN_MODE_PBL,
1235 QED_CHAIN_CNT_TYPE_U16,
1236 rx_num_desc,
1237 sizeof(struct core_rx_fast_path_cqe),
1238 &p_ll2_info->rx_queue.rcq_chain);
1239 if (rc) {
1240 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
1241 goto out;
1242 }
1243
1244 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1245 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
Arnd Bergmann0629a332017-01-18 15:52:52 +01001246 p_ll2_info->conn.conn_type, rx_num_desc);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001247
1248out:
1249 return rc;
1250}
1251
1252static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
1253 struct qed_ll2_info *p_ll2_info,
1254 u16 tx_num_desc)
1255{
1256 struct qed_ll2_tx_packet *p_descq;
1257 u32 capacity;
1258 int rc = 0;
1259
1260 if (!tx_num_desc)
1261 goto out;
1262
1263 rc = qed_chain_alloc(p_hwfn->cdev,
1264 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1265 QED_CHAIN_MODE_PBL,
1266 QED_CHAIN_CNT_TYPE_U16,
1267 tx_num_desc,
1268 sizeof(struct core_tx_bd),
1269 &p_ll2_info->tx_queue.txq_chain);
1270 if (rc)
1271 goto out;
1272
1273 capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
1274 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet),
1275 GFP_KERNEL);
1276 if (!p_descq) {
1277 rc = -ENOMEM;
1278 goto out;
1279 }
1280 p_ll2_info->tx_queue.descq_array = p_descq;
1281
1282 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1283 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
Arnd Bergmann0629a332017-01-18 15:52:52 +01001284 p_ll2_info->conn.conn_type, tx_num_desc);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001285
1286out:
1287 if (rc)
1288 DP_NOTICE(p_hwfn,
1289 "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
1290 tx_num_desc);
1291 return rc;
1292}
1293
1294int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
Arnd Bergmann0629a332017-01-18 15:52:52 +01001295 struct qed_ll2_conn *p_params,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001296 u16 rx_num_desc,
1297 u16 tx_num_desc,
1298 u8 *p_connection_handle)
1299{
1300 qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
1301 struct qed_ll2_info *p_ll2_info = NULL;
1302 int rc;
1303 u8 i;
1304
1305 if (!p_connection_handle || !p_hwfn->p_ll2_info)
1306 return -EINVAL;
1307
1308 /* Find a free connection to be used */
1309 for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
1310 mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
1311 if (p_hwfn->p_ll2_info[i].b_active) {
1312 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1313 continue;
1314 }
1315
1316 p_hwfn->p_ll2_info[i].b_active = true;
1317 p_ll2_info = &p_hwfn->p_ll2_info[i];
1318 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1319 break;
1320 }
1321 if (!p_ll2_info)
1322 return -EBUSY;
1323
Arnd Bergmann0629a332017-01-18 15:52:52 +01001324 p_ll2_info->conn = *p_params;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001325
1326 rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
1327 if (rc)
1328 goto q_allocate_fail;
1329
1330 rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info, tx_num_desc);
1331 if (rc)
1332 goto q_allocate_fail;
1333
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001334 rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
1335 rx_num_desc * 2, p_params->mtu);
1336 if (rc)
1337 goto q_allocate_fail;
1338
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001339 /* Register callbacks for the Rx/Tx queues */
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001340 if (p_params->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
1341 comp_rx_cb = qed_ll2_lb_rxq_completion;
1342 comp_tx_cb = qed_ll2_lb_txq_completion;
1343 } else {
1344 comp_rx_cb = qed_ll2_rxq_completion;
1345 comp_tx_cb = qed_ll2_txq_completion;
1346 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001347
1348 if (rx_num_desc) {
1349 qed_int_register_cb(p_hwfn, comp_rx_cb,
1350 &p_hwfn->p_ll2_info[i],
1351 &p_ll2_info->rx_queue.rx_sb_index,
1352 &p_ll2_info->rx_queue.p_fw_cons);
1353 p_ll2_info->rx_queue.b_cb_registred = true;
1354 }
1355
1356 if (tx_num_desc) {
1357 qed_int_register_cb(p_hwfn,
1358 comp_tx_cb,
1359 &p_hwfn->p_ll2_info[i],
1360 &p_ll2_info->tx_queue.tx_sb_index,
1361 &p_ll2_info->tx_queue.p_fw_cons);
1362 p_ll2_info->tx_queue.b_cb_registred = true;
1363 }
1364
1365 *p_connection_handle = i;
1366 return rc;
1367
1368q_allocate_fail:
1369 qed_ll2_release_connection(p_hwfn, i);
1370 return -ENOMEM;
1371}
1372
1373static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
1374 struct qed_ll2_info *p_ll2_conn)
1375{
1376 u8 action_on_error = 0;
1377
1378 if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
1379 return 0;
1380
1381 DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
1382
1383 SET_FIELD(action_on_error,
1384 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
Arnd Bergmann0629a332017-01-18 15:52:52 +01001385 p_ll2_conn->conn.ai_err_packet_too_big);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001386 SET_FIELD(action_on_error,
Arnd Bergmann0629a332017-01-18 15:52:52 +01001387 CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->conn.ai_err_no_buf);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001388
1389 return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
1390}
1391
1392int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1393{
1394 struct qed_ll2_info *p_ll2_conn;
1395 struct qed_ll2_rx_queue *p_rx;
1396 struct qed_ll2_tx_queue *p_tx;
1397 int rc = -EINVAL;
1398 u32 i, capacity;
1399 u8 qid;
1400
1401 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
1402 if (!p_ll2_conn)
1403 return -EINVAL;
1404 p_rx = &p_ll2_conn->rx_queue;
1405 p_tx = &p_ll2_conn->tx_queue;
1406
1407 qed_chain_reset(&p_rx->rxq_chain);
1408 qed_chain_reset(&p_rx->rcq_chain);
1409 INIT_LIST_HEAD(&p_rx->active_descq);
1410 INIT_LIST_HEAD(&p_rx->free_descq);
1411 INIT_LIST_HEAD(&p_rx->posting_descq);
1412 spin_lock_init(&p_rx->lock);
1413 capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
1414 for (i = 0; i < capacity; i++)
1415 list_add_tail(&p_rx->descq_array[i].list_entry,
1416 &p_rx->free_descq);
1417 *p_rx->p_fw_cons = 0;
1418
1419 qed_chain_reset(&p_tx->txq_chain);
1420 INIT_LIST_HEAD(&p_tx->active_descq);
1421 INIT_LIST_HEAD(&p_tx->free_descq);
1422 INIT_LIST_HEAD(&p_tx->sending_descq);
1423 spin_lock_init(&p_tx->lock);
1424 capacity = qed_chain_get_capacity(&p_tx->txq_chain);
1425 for (i = 0; i < capacity; i++)
1426 list_add_tail(&p_tx->descq_array[i].list_entry,
1427 &p_tx->free_descq);
1428 p_tx->cur_completing_bd_idx = 0;
1429 p_tx->bds_idx = 0;
1430 p_tx->b_completing_packet = false;
1431 p_tx->cur_send_packet = NULL;
1432 p_tx->cur_send_frag_num = 0;
1433 p_tx->cur_completing_frag_num = 0;
1434 *p_tx->p_fw_cons = 0;
1435
1436 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
1437
1438 qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
1439 p_ll2_conn->queue_id = qid;
1440 p_ll2_conn->tx_stats_id = qid;
1441 p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
1442 GTT_BAR0_MAP_REG_TSDM_RAM +
1443 TSTORM_LL2_RX_PRODS_OFFSET(qid);
1444 p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
1445 qed_db_addr(p_ll2_conn->cid,
1446 DQ_DEMS_LEGACY);
1447
1448 rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
1449 if (rc)
1450 return rc;
1451
1452 rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
1453 if (rc)
1454 return rc;
1455
1456 if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
1457 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1);
1458
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001459 qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
1460
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001461 return rc;
1462}
1463
1464static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
1465 struct qed_ll2_rx_queue *p_rx,
1466 struct qed_ll2_rx_packet *p_curp)
1467{
1468 struct qed_ll2_rx_packet *p_posting_packet = NULL;
1469 struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
1470 bool b_notify_fw = false;
1471 u16 bd_prod, cq_prod;
1472
1473 /* This handles the flushing of already posted buffers */
1474 while (!list_empty(&p_rx->posting_descq)) {
1475 p_posting_packet = list_first_entry(&p_rx->posting_descq,
1476 struct qed_ll2_rx_packet,
1477 list_entry);
Wei Yongjunb4f0fd42016-10-17 15:17:51 +00001478 list_move_tail(&p_posting_packet->list_entry,
1479 &p_rx->active_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001480 b_notify_fw = true;
1481 }
1482
1483 /* This handles the supplied packet [if there is one] */
1484 if (p_curp) {
1485 list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
1486 b_notify_fw = true;
1487 }
1488
1489 if (!b_notify_fw)
1490 return;
1491
1492 bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
1493 cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1494 rx_prod.bd_prod = cpu_to_le16(bd_prod);
1495 rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1496 DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1497}
1498
1499int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
1500 u8 connection_handle,
1501 dma_addr_t addr,
1502 u16 buf_len, void *cookie, u8 notify_fw)
1503{
1504 struct core_rx_bd_with_buff_len *p_curb = NULL;
1505 struct qed_ll2_rx_packet *p_curp = NULL;
1506 struct qed_ll2_info *p_ll2_conn;
1507 struct qed_ll2_rx_queue *p_rx;
1508 unsigned long flags;
1509 void *p_data;
1510 int rc = 0;
1511
1512 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1513 if (!p_ll2_conn)
1514 return -EINVAL;
1515 p_rx = &p_ll2_conn->rx_queue;
1516
1517 spin_lock_irqsave(&p_rx->lock, flags);
1518 if (!list_empty(&p_rx->free_descq))
1519 p_curp = list_first_entry(&p_rx->free_descq,
1520 struct qed_ll2_rx_packet, list_entry);
1521 if (p_curp) {
1522 if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
1523 qed_chain_get_elem_left(&p_rx->rcq_chain)) {
1524 p_data = qed_chain_produce(&p_rx->rxq_chain);
1525 p_curb = (struct core_rx_bd_with_buff_len *)p_data;
1526 qed_chain_produce(&p_rx->rcq_chain);
1527 }
1528 }
1529
1530 /* If we're lacking entires, let's try to flush buffers to FW */
1531 if (!p_curp || !p_curb) {
1532 rc = -EBUSY;
1533 p_curp = NULL;
1534 goto out_notify;
1535 }
1536
1537 /* We have an Rx packet we can fill */
1538 DMA_REGPAIR_LE(p_curb->addr, addr);
1539 p_curb->buff_length = cpu_to_le16(buf_len);
1540 p_curp->rx_buf_addr = addr;
1541 p_curp->cookie = cookie;
1542 p_curp->rxq_bd = p_curb;
1543 p_curp->buf_length = buf_len;
1544 list_del(&p_curp->list_entry);
1545
1546 /* Check if we only want to enqueue this packet without informing FW */
1547 if (!notify_fw) {
1548 list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
1549 goto out;
1550 }
1551
1552out_notify:
1553 qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
1554out:
1555 spin_unlock_irqrestore(&p_rx->lock, flags);
1556 return rc;
1557}
1558
1559static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
1560 struct qed_ll2_tx_queue *p_tx,
1561 struct qed_ll2_tx_packet *p_curp,
1562 u8 num_of_bds,
1563 dma_addr_t first_frag,
1564 u16 first_frag_len, void *p_cookie,
1565 u8 notify_fw)
1566{
1567 list_del(&p_curp->list_entry);
1568 p_curp->cookie = p_cookie;
1569 p_curp->bd_used = num_of_bds;
1570 p_curp->notify_fw = notify_fw;
1571 p_tx->cur_send_packet = p_curp;
1572 p_tx->cur_send_frag_num = 0;
1573
1574 p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = first_frag;
1575 p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = first_frag_len;
1576 p_tx->cur_send_frag_num++;
1577}
1578
1579static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1580 struct qed_ll2_info *p_ll2,
1581 struct qed_ll2_tx_packet *p_curp,
1582 u8 num_of_bds,
1583 enum core_tx_dest tx_dest,
1584 u16 vlan,
1585 u8 bd_flags,
1586 u16 l4_hdr_offset_w,
Ram Amraniabd49672016-10-01 22:00:01 +03001587 enum core_roce_flavor_type type,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001588 dma_addr_t first_frag,
1589 u16 first_frag_len)
1590{
1591 struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
1592 u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
1593 struct core_tx_bd *start_bd = NULL;
1594 u16 frag_idx;
1595
1596 start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1597 start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
1598 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
1599 cpu_to_le16(l4_hdr_offset_w));
1600 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
1601 start_bd->bd_flags.as_bitfield = bd_flags;
1602 start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
1603 CORE_TX_BD_FLAGS_START_BD_SHIFT;
1604 SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
Ram Amrani8d1d8fc2016-11-09 22:48:43 +02001605 SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001606 DMA_REGPAIR_LE(start_bd->addr, first_frag);
1607 start_bd->nbytes = cpu_to_le16(first_frag_len);
1608
1609 DP_VERBOSE(p_hwfn,
1610 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1611 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1612 p_ll2->queue_id,
1613 p_ll2->cid,
Arnd Bergmann0629a332017-01-18 15:52:52 +01001614 p_ll2->conn.conn_type,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001615 prod_idx,
1616 first_frag_len,
1617 num_of_bds,
1618 le32_to_cpu(start_bd->addr.hi),
1619 le32_to_cpu(start_bd->addr.lo));
1620
1621 if (p_ll2->tx_queue.cur_send_frag_num == num_of_bds)
1622 return;
1623
1624 /* Need to provide the packet with additional BDs for frags */
1625 for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
1626 frag_idx < num_of_bds; frag_idx++) {
1627 struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
1628
1629 *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1630 (*p_bd)->bd_flags.as_bitfield = 0;
1631 (*p_bd)->bitfield1 = 0;
1632 (*p_bd)->bitfield0 = 0;
1633 p_curp->bds_set[frag_idx].tx_frag = 0;
1634 p_curp->bds_set[frag_idx].frag_len = 0;
1635 }
1636}
1637
1638/* This should be called while the Txq spinlock is being held */
1639static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
1640 struct qed_ll2_info *p_ll2_conn)
1641{
1642 bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
1643 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1644 struct qed_ll2_tx_packet *p_pkt = NULL;
1645 struct core_db_data db_msg = { 0, 0, 0 };
1646 u16 bd_prod;
1647
1648 /* If there are missing BDs, don't do anything now */
1649 if (p_ll2_conn->tx_queue.cur_send_frag_num !=
1650 p_ll2_conn->tx_queue.cur_send_packet->bd_used)
1651 return;
1652
1653 /* Push the current packet to the list and clean after it */
1654 list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
1655 &p_ll2_conn->tx_queue.sending_descq);
1656 p_ll2_conn->tx_queue.cur_send_packet = NULL;
1657 p_ll2_conn->tx_queue.cur_send_frag_num = 0;
1658
1659 /* Notify FW of packet only if requested to */
1660 if (!b_notify)
1661 return;
1662
1663 bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
1664
1665 while (!list_empty(&p_tx->sending_descq)) {
1666 p_pkt = list_first_entry(&p_tx->sending_descq,
1667 struct qed_ll2_tx_packet, list_entry);
1668 if (!p_pkt)
1669 break;
1670
Wei Yongjunb4f0fd42016-10-17 15:17:51 +00001671 list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001672 }
1673
1674 SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
1675 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1676 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
1677 DQ_XCM_CORE_TX_BD_PROD_CMD);
1678 db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
1679 db_msg.spq_prod = cpu_to_le16(bd_prod);
1680
1681 /* Make sure the BDs data is updated before ringing the doorbell */
1682 wmb();
1683
1684 DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
1685
1686 DP_VERBOSE(p_hwfn,
1687 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1688 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1689 p_ll2_conn->queue_id,
Arnd Bergmann0629a332017-01-18 15:52:52 +01001690 p_ll2_conn->cid, p_ll2_conn->conn.conn_type, db_msg.spq_prod);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001691}
1692
1693int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
1694 u8 connection_handle,
1695 u8 num_of_bds,
1696 u16 vlan,
1697 u8 bd_flags,
1698 u16 l4_hdr_offset_w,
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001699 enum qed_ll2_tx_dest e_tx_dest,
Ram Amraniabd49672016-10-01 22:00:01 +03001700 enum qed_ll2_roce_flavor_type qed_roce_flavor,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001701 dma_addr_t first_frag,
1702 u16 first_frag_len, void *cookie, u8 notify_fw)
1703{
1704 struct qed_ll2_tx_packet *p_curp = NULL;
1705 struct qed_ll2_info *p_ll2_conn = NULL;
Ram Amraniabd49672016-10-01 22:00:01 +03001706 enum core_roce_flavor_type roce_flavor;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001707 struct qed_ll2_tx_queue *p_tx;
1708 struct qed_chain *p_tx_chain;
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001709 enum core_tx_dest tx_dest;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001710 unsigned long flags;
1711 int rc = 0;
1712
1713 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1714 if (!p_ll2_conn)
1715 return -EINVAL;
1716 p_tx = &p_ll2_conn->tx_queue;
1717 p_tx_chain = &p_tx->txq_chain;
1718
1719 if (num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
1720 return -EIO;
1721
1722 spin_lock_irqsave(&p_tx->lock, flags);
1723 if (p_tx->cur_send_packet) {
1724 rc = -EEXIST;
1725 goto out;
1726 }
1727
1728 /* Get entry, but only if we have tx elements for it */
1729 if (!list_empty(&p_tx->free_descq))
1730 p_curp = list_first_entry(&p_tx->free_descq,
1731 struct qed_ll2_tx_packet, list_entry);
1732 if (p_curp && qed_chain_get_elem_left(p_tx_chain) < num_of_bds)
1733 p_curp = NULL;
1734
1735 if (!p_curp) {
1736 rc = -EBUSY;
1737 goto out;
1738 }
1739
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001740 tx_dest = e_tx_dest == QED_LL2_TX_DEST_NW ? CORE_TX_DEST_NW :
1741 CORE_TX_DEST_LB;
Ram Amraniabd49672016-10-01 22:00:01 +03001742 if (qed_roce_flavor == QED_LL2_ROCE) {
1743 roce_flavor = CORE_ROCE;
1744 } else if (qed_roce_flavor == QED_LL2_RROCE) {
1745 roce_flavor = CORE_RROCE;
1746 } else {
1747 rc = -EINVAL;
1748 goto out;
1749 }
1750
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001751 /* Prepare packet and BD, and perhaps send a doorbell to FW */
1752 qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp,
1753 num_of_bds, first_frag,
1754 first_frag_len, cookie, notify_fw);
1755 qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001756 num_of_bds, tx_dest,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001757 vlan, bd_flags, l4_hdr_offset_w,
Ram Amraniabd49672016-10-01 22:00:01 +03001758 roce_flavor,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001759 first_frag, first_frag_len);
1760
1761 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1762
1763out:
1764 spin_unlock_irqrestore(&p_tx->lock, flags);
1765 return rc;
1766}
1767
1768int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
1769 u8 connection_handle,
1770 dma_addr_t addr, u16 nbytes)
1771{
1772 struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
1773 struct qed_ll2_info *p_ll2_conn = NULL;
1774 u16 cur_send_frag_num = 0;
1775 struct core_tx_bd *p_bd;
1776 unsigned long flags;
1777
1778 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1779 if (!p_ll2_conn)
1780 return -EINVAL;
1781
1782 if (!p_ll2_conn->tx_queue.cur_send_packet)
1783 return -EINVAL;
1784
1785 p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
1786 cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
1787
1788 if (cur_send_frag_num >= p_cur_send_packet->bd_used)
1789 return -EINVAL;
1790
1791 /* Fill the BD information, and possibly notify FW */
1792 p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
1793 DMA_REGPAIR_LE(p_bd->addr, addr);
1794 p_bd->nbytes = cpu_to_le16(nbytes);
1795 p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
1796 p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
1797
1798 p_ll2_conn->tx_queue.cur_send_frag_num++;
1799
1800 spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
1801 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1802 spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
1803
1804 return 0;
1805}
1806
1807int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1808{
1809 struct qed_ll2_info *p_ll2_conn = NULL;
1810 int rc = -EINVAL;
1811
1812 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
1813 if (!p_ll2_conn)
1814 return -EINVAL;
1815
1816 /* Stop Tx & Rx of connection, if needed */
1817 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1818 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
1819 if (rc)
1820 return rc;
1821 qed_ll2_txq_flush(p_hwfn, connection_handle);
1822 }
1823
1824 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1825 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
1826 if (rc)
1827 return rc;
1828 qed_ll2_rxq_flush(p_hwfn, connection_handle);
1829 }
1830
Arnd Bergmann0629a332017-01-18 15:52:52 +01001831 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001832 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1833
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001834 return rc;
1835}
1836
1837void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1838{
1839 struct qed_ll2_info *p_ll2_conn = NULL;
1840
1841 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1842 if (!p_ll2_conn)
1843 return;
1844
1845 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1846 p_ll2_conn->rx_queue.b_cb_registred = false;
1847 qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
1848 }
1849
1850 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1851 p_ll2_conn->tx_queue.b_cb_registred = false;
1852 qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
1853 }
1854
1855 kfree(p_ll2_conn->tx_queue.descq_array);
1856 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
1857
1858 kfree(p_ll2_conn->rx_queue.descq_array);
1859 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
1860 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
1861
1862 qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
1863
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001864 qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
1865
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001866 mutex_lock(&p_ll2_conn->mutex);
1867 p_ll2_conn->b_active = false;
1868 mutex_unlock(&p_ll2_conn->mutex);
1869}
1870
1871struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn)
1872{
1873 struct qed_ll2_info *p_ll2_connections;
1874 u8 i;
1875
1876 /* Allocate LL2's set struct */
1877 p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
1878 sizeof(struct qed_ll2_info), GFP_KERNEL);
1879 if (!p_ll2_connections) {
1880 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
1881 return NULL;
1882 }
1883
1884 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1885 p_ll2_connections[i].my_id = i;
1886
1887 return p_ll2_connections;
1888}
1889
1890void qed_ll2_setup(struct qed_hwfn *p_hwfn,
1891 struct qed_ll2_info *p_ll2_connections)
1892{
1893 int i;
1894
1895 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1896 mutex_init(&p_ll2_connections[i].mutex);
1897}
1898
1899void qed_ll2_free(struct qed_hwfn *p_hwfn,
1900 struct qed_ll2_info *p_ll2_connections)
1901{
1902 kfree(p_ll2_connections);
1903}
1904
1905static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
1906 struct qed_ptt *p_ptt,
1907 struct qed_ll2_info *p_ll2_conn,
1908 struct qed_ll2_stats *p_stats)
1909{
1910 struct core_ll2_tstorm_per_queue_stat tstats;
1911 u8 qid = p_ll2_conn->queue_id;
1912 u32 tstats_addr;
1913
1914 memset(&tstats, 0, sizeof(tstats));
1915 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1916 CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
1917 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
1918
1919 p_stats->packet_too_big_discard =
1920 HILO_64_REGPAIR(tstats.packet_too_big_discard);
1921 p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
1922}
1923
1924static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
1925 struct qed_ptt *p_ptt,
1926 struct qed_ll2_info *p_ll2_conn,
1927 struct qed_ll2_stats *p_stats)
1928{
1929 struct core_ll2_ustorm_per_queue_stat ustats;
1930 u8 qid = p_ll2_conn->queue_id;
1931 u32 ustats_addr;
1932
1933 memset(&ustats, 0, sizeof(ustats));
1934 ustats_addr = BAR0_MAP_REG_USDM_RAM +
1935 CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
1936 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
1937
1938 p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1939 p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1940 p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1941 p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1942 p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1943 p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1944}
1945
1946static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
1947 struct qed_ptt *p_ptt,
1948 struct qed_ll2_info *p_ll2_conn,
1949 struct qed_ll2_stats *p_stats)
1950{
1951 struct core_ll2_pstorm_per_queue_stat pstats;
1952 u8 stats_id = p_ll2_conn->tx_stats_id;
1953 u32 pstats_addr;
1954
1955 memset(&pstats, 0, sizeof(pstats));
1956 pstats_addr = BAR0_MAP_REG_PSDM_RAM +
1957 CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
1958 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
1959
1960 p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1961 p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1962 p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1963 p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1964 p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1965 p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1966}
1967
1968int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
1969 u8 connection_handle, struct qed_ll2_stats *p_stats)
1970{
1971 struct qed_ll2_info *p_ll2_conn = NULL;
1972 struct qed_ptt *p_ptt;
1973
1974 memset(p_stats, 0, sizeof(*p_stats));
1975
1976 if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
1977 !p_hwfn->p_ll2_info)
1978 return -EINVAL;
1979
1980 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
1981
1982 p_ptt = qed_ptt_acquire(p_hwfn);
1983 if (!p_ptt) {
1984 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1985 return -EINVAL;
1986 }
1987
1988 _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
1989 _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
1990 if (p_ll2_conn->tx_stats_en)
1991 _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
1992
1993 qed_ptt_release(p_hwfn, p_ptt);
1994 return 0;
1995}
1996
1997static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
1998 const struct qed_ll2_cb_ops *ops,
1999 void *cookie)
2000{
2001 cdev->ll2->cbs = ops;
2002 cdev->ll2->cb_cookie = cookie;
2003}
2004
2005static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
2006{
Arnd Bergmann0629a332017-01-18 15:52:52 +01002007 struct qed_ll2_conn ll2_info;
Wei Yongjun88a24282016-10-10 14:08:28 +00002008 struct qed_ll2_buffer *buffer, *tmp_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002009 enum qed_ll2_conn_type conn_type;
2010 struct qed_ptt *p_ptt;
2011 int rc, i;
Yuval Mintzfc831822016-12-01 00:21:06 -08002012 u8 gsi_enable = 1;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002013
2014 /* Initialize LL2 locks & lists */
2015 INIT_LIST_HEAD(&cdev->ll2->list);
2016 spin_lock_init(&cdev->ll2->lock);
2017 cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
2018 L1_CACHE_BYTES + params->mtu;
2019 cdev->ll2->frags_mapped = params->frags_mapped;
2020
2021 /*Allocate memory for LL2 */
2022 DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
2023 cdev->ll2->rx_size);
2024 for (i = 0; i < QED_LL2_RX_SIZE; i++) {
2025 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2026 if (!buffer) {
2027 DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
2028 goto fail;
2029 }
2030
2031 rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
2032 &buffer->phys_addr);
2033 if (rc) {
2034 kfree(buffer);
2035 goto fail;
2036 }
2037
2038 list_add_tail(&buffer->list, &cdev->ll2->list);
2039 }
2040
2041 switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
2042 case QED_PCI_ISCSI:
2043 conn_type = QED_LL2_TYPE_ISCSI;
Yuval Mintzfc831822016-12-01 00:21:06 -08002044 gsi_enable = 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002045 break;
2046 case QED_PCI_ETH_ROCE:
2047 conn_type = QED_LL2_TYPE_ROCE;
2048 break;
2049 default:
2050 conn_type = QED_LL2_TYPE_TEST;
2051 }
2052
2053 /* Prepare the temporary ll2 information */
2054 memset(&ll2_info, 0, sizeof(ll2_info));
Arnd Bergmann0629a332017-01-18 15:52:52 +01002055
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002056 ll2_info.conn_type = conn_type;
2057 ll2_info.mtu = params->mtu;
2058 ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
2059 ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
2060 ll2_info.tx_tc = 0;
2061 ll2_info.tx_dest = CORE_TX_DEST_NW;
Yuval Mintzfc831822016-12-01 00:21:06 -08002062 ll2_info.gsi_enable = gsi_enable;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002063
2064 rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info,
2065 QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
2066 &cdev->ll2->handle);
2067 if (rc) {
2068 DP_INFO(cdev, "Failed to acquire LL2 connection\n");
2069 goto fail;
2070 }
2071
2072 rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
2073 cdev->ll2->handle);
2074 if (rc) {
2075 DP_INFO(cdev, "Failed to establish LL2 connection\n");
2076 goto release_fail;
2077 }
2078
2079 /* Post all Rx buffers to FW */
2080 spin_lock_bh(&cdev->ll2->lock);
Wei Yongjun88a24282016-10-10 14:08:28 +00002081 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002082 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
2083 cdev->ll2->handle,
2084 buffer->phys_addr, 0, buffer, 1);
2085 if (rc) {
2086 DP_INFO(cdev,
2087 "Failed to post an Rx buffer; Deleting it\n");
2088 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
2089 cdev->ll2->rx_size, DMA_FROM_DEVICE);
2090 kfree(buffer->data);
2091 list_del(&buffer->list);
2092 kfree(buffer);
2093 } else {
2094 cdev->ll2->rx_cnt++;
2095 }
2096 }
2097 spin_unlock_bh(&cdev->ll2->lock);
2098
2099 if (!cdev->ll2->rx_cnt) {
2100 DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
2101 goto release_terminate;
2102 }
2103
2104 if (!is_valid_ether_addr(params->ll2_mac_address)) {
2105 DP_INFO(cdev, "Invalid Ethernet address\n");
2106 goto release_terminate;
2107 }
2108
Yuval Mintz1d6cff42016-12-01 00:21:07 -08002109 if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
2110 cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
2111 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
2112 rc = qed_ll2_start_ooo(cdev, params);
2113 if (rc) {
2114 DP_INFO(cdev,
2115 "Failed to initialize the OOO LL2 queue\n");
2116 goto release_terminate;
2117 }
2118 }
2119
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002120 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2121 if (!p_ptt) {
2122 DP_INFO(cdev, "Failed to acquire PTT\n");
2123 goto release_terminate;
2124 }
2125
2126 rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2127 params->ll2_mac_address);
2128 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2129 if (rc) {
2130 DP_ERR(cdev, "Failed to allocate LLH filter\n");
2131 goto release_terminate_all;
2132 }
2133
2134 ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002135 return 0;
2136
2137release_terminate_all:
2138
2139release_terminate:
2140 qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2141release_fail:
2142 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2143fail:
2144 qed_ll2_kill_buffers(cdev);
2145 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2146 return -EINVAL;
2147}
2148
2149static int qed_ll2_stop(struct qed_dev *cdev)
2150{
2151 struct qed_ptt *p_ptt;
2152 int rc;
2153
2154 if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
2155 return 0;
2156
2157 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2158 if (!p_ptt) {
2159 DP_INFO(cdev, "Failed to acquire PTT\n");
2160 goto fail;
2161 }
2162
2163 qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2164 cdev->ll2_mac_address);
2165 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2166 eth_zero_addr(cdev->ll2_mac_address);
2167
Yuval Mintz1d6cff42016-12-01 00:21:07 -08002168 if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
2169 cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
2170 qed_ll2_stop_ooo(cdev);
2171
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002172 rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
2173 cdev->ll2->handle);
2174 if (rc)
2175 DP_INFO(cdev, "Failed to terminate LL2 connection\n");
2176
2177 qed_ll2_kill_buffers(cdev);
2178
2179 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2180 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2181
2182 return rc;
2183fail:
2184 return -EINVAL;
2185}
2186
2187static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
2188{
2189 const skb_frag_t *frag;
2190 int rc = -EINVAL, i;
2191 dma_addr_t mapping;
2192 u16 vlan = 0;
2193 u8 flags = 0;
2194
2195 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
2196 DP_INFO(cdev, "Cannot transmit a checksumed packet\n");
2197 return -EINVAL;
2198 }
2199
2200 if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
2201 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
2202 1 + skb_shinfo(skb)->nr_frags);
2203 return -EINVAL;
2204 }
2205
2206 mapping = dma_map_single(&cdev->pdev->dev, skb->data,
2207 skb->len, DMA_TO_DEVICE);
2208 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2209 DP_NOTICE(cdev, "SKB mapping failed\n");
2210 return -EINVAL;
2211 }
2212
2213 /* Request HW to calculate IP csum */
2214 if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
2215 ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2216 flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
2217
2218 if (skb_vlan_tag_present(skb)) {
2219 vlan = skb_vlan_tag_get(skb);
2220 flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT);
2221 }
2222
2223 rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
2224 cdev->ll2->handle,
2225 1 + skb_shinfo(skb)->nr_frags,
Yuval Mintz1d6cff42016-12-01 00:21:07 -08002226 vlan, flags, 0, QED_LL2_TX_DEST_NW,
2227 0 /* RoCE FLAVOR */,
Ram Amraniabd49672016-10-01 22:00:01 +03002228 mapping, skb->len, skb, 1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002229 if (rc)
2230 goto err;
2231
2232 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2233 frag = &skb_shinfo(skb)->frags[i];
2234 if (!cdev->ll2->frags_mapped) {
2235 mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
2236 skb_frag_size(frag),
2237 DMA_TO_DEVICE);
2238
2239 if (unlikely(dma_mapping_error(&cdev->pdev->dev,
2240 mapping))) {
2241 DP_NOTICE(cdev,
2242 "Unable to map frag - dropping packet\n");
Pan Bian0ff18d22016-12-04 13:53:53 +08002243 rc = -ENOMEM;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002244 goto err;
2245 }
2246 } else {
2247 mapping = page_to_phys(skb_frag_page(frag)) |
2248 frag->page_offset;
2249 }
2250
2251 rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
2252 cdev->ll2->handle,
2253 mapping,
2254 skb_frag_size(frag));
2255
2256 /* if failed not much to do here, partial packet has been posted
2257 * we can't free memory, will need to wait for completion.
2258 */
2259 if (rc)
2260 goto err2;
2261 }
2262
2263 return 0;
2264
2265err:
2266 dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
2267
2268err2:
2269 return rc;
2270}
2271
2272static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
2273{
2274 if (!cdev->ll2)
2275 return -EINVAL;
2276
2277 return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
2278 cdev->ll2->handle, stats);
2279}
2280
2281const struct qed_ll2_ops qed_ll2_ops_pass = {
2282 .start = &qed_ll2_start,
2283 .stop = &qed_ll2_stop,
2284 .start_xmit = &qed_ll2_start_xmit,
2285 .register_cb_ops = &qed_ll2_register_cb_ops,
2286 .get_stats = &qed_ll2_stats,
2287};
2288
2289int qed_ll2_alloc_if(struct qed_dev *cdev)
2290{
2291 cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
2292 return cdev->ll2 ? 0 : -ENOMEM;
2293}
2294
2295void qed_ll2_dealloc_if(struct qed_dev *cdev)
2296{
2297 kfree(cdev->ll2);
2298 cdev->ll2 = NULL;
2299}