blob: 05e32f4322eb1a46eca22e5cd53f00682c0431d9 [file] [log] [blame]
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001/* QLogic qed NIC Driver
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02002 * Copyright (c) 2015-2017 QLogic Corporation
Yuval Mintz0a7fb112016-10-01 21:59:55 +03003 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
Yuval Mintz0a7fb112016-10-01 21:59:55 +03009 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +020010 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Yuval Mintz0a7fb112016-10-01 21:59:55 +030031 */
32
33#include <linux/types.h>
34#include <asm/byteorder.h>
35#include <linux/dma-mapping.h>
36#include <linux/if_vlan.h>
37#include <linux/kernel.h>
38#include <linux/pci.h>
39#include <linux/slab.h>
40#include <linux/stddef.h>
41#include <linux/version.h>
42#include <linux/workqueue.h>
43#include <net/ipv6.h>
44#include <linux/bitops.h>
45#include <linux/delay.h>
46#include <linux/errno.h>
47#include <linux/etherdevice.h>
48#include <linux/io.h>
49#include <linux/list.h>
50#include <linux/mutex.h>
51#include <linux/spinlock.h>
52#include <linux/string.h>
53#include <linux/qed/qed_ll2_if.h>
54#include "qed.h"
55#include "qed_cxt.h"
56#include "qed_dev_api.h"
57#include "qed_hsi.h"
58#include "qed_hw.h"
59#include "qed_int.h"
60#include "qed_ll2.h"
61#include "qed_mcp.h"
Yuval Mintz1d6cff42016-12-01 00:21:07 -080062#include "qed_ooo.h"
Yuval Mintz0a7fb112016-10-01 21:59:55 +030063#include "qed_reg_addr.h"
64#include "qed_sp.h"
Yuval Mintz0189efb2016-10-13 22:57:02 +030065#include "qed_roce.h"
Yuval Mintz0a7fb112016-10-01 21:59:55 +030066
67#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
68#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
69
70#define QED_LL2_TX_SIZE (256)
71#define QED_LL2_RX_SIZE (4096)
72
73struct qed_cb_ll2_info {
74 int rx_cnt;
75 u32 rx_size;
76 u8 handle;
77 bool frags_mapped;
78
79 /* Lock protecting LL2 buffer lists in sleepless context */
80 spinlock_t lock;
81 struct list_head list;
82
83 const struct qed_ll2_cb_ops *cbs;
84 void *cb_cookie;
85};
86
87struct qed_ll2_buffer {
88 struct list_head list;
89 void *data;
90 dma_addr_t phys_addr;
91};
92
93static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn,
94 u8 connection_handle,
95 void *cookie,
96 dma_addr_t first_frag_addr,
97 bool b_last_fragment,
98 bool b_last_packet)
99{
100 struct qed_dev *cdev = p_hwfn->cdev;
101 struct sk_buff *skb = cookie;
102
103 /* All we need to do is release the mapping */
104 dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
105 skb_headlen(skb), DMA_TO_DEVICE);
106
107 if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
108 cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
109 b_last_fragment);
110
111 if (cdev->ll2->frags_mapped)
112 /* Case where mapped frags were received, need to
113 * free skb with nr_frags marked as 0
114 */
115 skb_shinfo(skb)->nr_frags = 0;
116
117 dev_kfree_skb_any(skb);
118}
119
120static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
121 u8 **data, dma_addr_t *phys_addr)
122{
123 *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
124 if (!(*data)) {
125 DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
126 return -ENOMEM;
127 }
128
129 *phys_addr = dma_map_single(&cdev->pdev->dev,
130 ((*data) + NET_SKB_PAD),
131 cdev->ll2->rx_size, DMA_FROM_DEVICE);
132 if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
133 DP_INFO(cdev, "Failed to map LL2 buffer data\n");
134 kfree((*data));
135 return -ENOMEM;
136 }
137
138 return 0;
139}
140
141static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
142 struct qed_ll2_buffer *buffer)
143{
144 spin_lock_bh(&cdev->ll2->lock);
145
146 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
147 cdev->ll2->rx_size, DMA_FROM_DEVICE);
148 kfree(buffer->data);
149 list_del(&buffer->list);
150
151 cdev->ll2->rx_cnt--;
152 if (!cdev->ll2->rx_cnt)
153 DP_INFO(cdev, "All LL2 entries were removed\n");
154
155 spin_unlock_bh(&cdev->ll2->lock);
156
157 return 0;
158}
159
160static void qed_ll2_kill_buffers(struct qed_dev *cdev)
161{
162 struct qed_ll2_buffer *buffer, *tmp_buffer;
163
164 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
165 qed_ll2_dealloc_buffer(cdev, buffer);
166}
167
Yuval Mintz8c93bea2016-10-13 22:57:03 +0300168static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
169 u8 connection_handle,
170 struct qed_ll2_rx_packet *p_pkt,
171 struct core_rx_fast_path_cqe *p_cqe,
172 bool b_last_packet)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300173{
174 u16 packet_length = le16_to_cpu(p_cqe->packet_length);
175 struct qed_ll2_buffer *buffer = p_pkt->cookie;
176 struct qed_dev *cdev = p_hwfn->cdev;
177 u16 vlan = le16_to_cpu(p_cqe->vlan);
178 u32 opaque_data_0, opaque_data_1;
179 u8 pad = p_cqe->placement_offset;
180 dma_addr_t new_phys_addr;
181 struct sk_buff *skb;
182 bool reuse = false;
183 int rc = -EINVAL;
184 u8 *new_data;
185
186 opaque_data_0 = le32_to_cpu(p_cqe->opaque_data.data[0]);
187 opaque_data_1 = le32_to_cpu(p_cqe->opaque_data.data[1]);
188
189 DP_VERBOSE(p_hwfn,
190 (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
191 "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
192 (u64)p_pkt->rx_buf_addr, pad, packet_length,
193 le16_to_cpu(p_cqe->parse_flags.flags), vlan,
194 opaque_data_0, opaque_data_1);
195
196 if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
197 print_hex_dump(KERN_INFO, "",
198 DUMP_PREFIX_OFFSET, 16, 1,
199 buffer->data, packet_length, false);
200 }
201
202 /* Determine if data is valid */
203 if (packet_length < ETH_HLEN)
204 reuse = true;
205
206 /* Allocate a replacement for buffer; Reuse upon failure */
207 if (!reuse)
208 rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
209 &new_phys_addr);
210
211 /* If need to reuse or there's no replacement buffer, repost this */
212 if (rc)
213 goto out_post;
214
215 skb = build_skb(buffer->data, 0);
216 if (!skb) {
217 rc = -ENOMEM;
218 goto out_post;
219 }
220
221 pad += NET_SKB_PAD;
222 skb_reserve(skb, pad);
223 skb_put(skb, packet_length);
224 skb_checksum_none_assert(skb);
225
226 /* Get parital ethernet information instead of eth_type_trans(),
227 * Since we don't have an associated net_device.
228 */
229 skb_reset_mac_header(skb);
230 skb->protocol = eth_hdr(skb)->h_proto;
231
232 /* Pass SKB onward */
233 if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
234 if (vlan)
235 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
236 cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
237 opaque_data_0, opaque_data_1);
238 }
239
240 /* Update Buffer information and update FW producer */
241 buffer->data = new_data;
242 buffer->phys_addr = new_phys_addr;
243
244out_post:
245 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
246 buffer->phys_addr, 0, buffer, 1);
247
248 if (rc)
249 qed_ll2_dealloc_buffer(cdev, buffer);
250}
251
252static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
253 u8 connection_handle,
254 bool b_lock,
255 bool b_only_active)
256{
257 struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
258
259 if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
260 return NULL;
261
262 if (!p_hwfn->p_ll2_info)
263 return NULL;
264
265 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
266
267 if (b_only_active) {
268 if (b_lock)
269 mutex_lock(&p_ll2_conn->mutex);
270 if (p_ll2_conn->b_active)
271 p_ret = p_ll2_conn;
272 if (b_lock)
273 mutex_unlock(&p_ll2_conn->mutex);
274 } else {
275 p_ret = p_ll2_conn;
276 }
277
278 return p_ret;
279}
280
281static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
282 u8 connection_handle)
283{
284 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
285}
286
287static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
288 u8 connection_handle)
289{
290 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
291}
292
293static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
294 *p_hwfn,
295 u8 connection_handle)
296{
297 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
298}
299
300static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
301{
302 bool b_last_packet = false, b_last_frag = false;
303 struct qed_ll2_tx_packet *p_pkt = NULL;
304 struct qed_ll2_info *p_ll2_conn;
305 struct qed_ll2_tx_queue *p_tx;
Ram Amraniabd49672016-10-01 22:00:01 +0300306 dma_addr_t tx_frag;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300307
308 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
309 if (!p_ll2_conn)
310 return;
311
312 p_tx = &p_ll2_conn->tx_queue;
313
314 while (!list_empty(&p_tx->active_descq)) {
315 p_pkt = list_first_entry(&p_tx->active_descq,
316 struct qed_ll2_tx_packet, list_entry);
317 if (!p_pkt)
318 break;
319
320 list_del(&p_pkt->list_entry);
321 b_last_packet = list_empty(&p_tx->active_descq);
322 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800323 if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
324 struct qed_ooo_buffer *p_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300325
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800326 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
327 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
328 p_buffer);
329 } else {
330 p_tx->cur_completing_packet = *p_pkt;
331 p_tx->cur_completing_bd_idx = 1;
332 b_last_frag =
333 p_tx->cur_completing_bd_idx == p_pkt->bd_used;
334 tx_frag = p_pkt->bds_set[0].tx_frag;
335 if (p_ll2_conn->gsi_enable)
336 qed_ll2b_release_tx_gsi_packet(p_hwfn,
337 p_ll2_conn->
338 my_id,
339 p_pkt->cookie,
340 tx_frag,
341 b_last_frag,
342 b_last_packet);
343 else
344 qed_ll2b_complete_tx_packet(p_hwfn,
345 p_ll2_conn->my_id,
346 p_pkt->cookie,
347 tx_frag,
348 b_last_frag,
349 b_last_packet);
350 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300351 }
352}
353
354static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
355{
356 struct qed_ll2_info *p_ll2_conn = p_cookie;
357 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
358 u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
359 struct qed_ll2_tx_packet *p_pkt;
360 bool b_last_frag = false;
361 unsigned long flags;
Ram Amraniabd49672016-10-01 22:00:01 +0300362 dma_addr_t tx_frag;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300363 int rc = -EINVAL;
364
365 spin_lock_irqsave(&p_tx->lock, flags);
366 if (p_tx->b_completing_packet) {
367 rc = -EBUSY;
368 goto out;
369 }
370
371 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
372 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
373 while (num_bds) {
374 if (list_empty(&p_tx->active_descq))
375 goto out;
376
377 p_pkt = list_first_entry(&p_tx->active_descq,
378 struct qed_ll2_tx_packet, list_entry);
379 if (!p_pkt)
380 goto out;
381
382 p_tx->b_completing_packet = true;
383 p_tx->cur_completing_packet = *p_pkt;
384 num_bds_in_packet = p_pkt->bd_used;
385 list_del(&p_pkt->list_entry);
386
387 if (num_bds < num_bds_in_packet) {
388 DP_NOTICE(p_hwfn,
389 "Rest of BDs does not cover whole packet\n");
390 goto out;
391 }
392
393 num_bds -= num_bds_in_packet;
394 p_tx->bds_idx += num_bds_in_packet;
395 while (num_bds_in_packet--)
396 qed_chain_consume(&p_tx->txq_chain);
397
398 p_tx->cur_completing_bd_idx = 1;
399 b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
400 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
401
402 spin_unlock_irqrestore(&p_tx->lock, flags);
Ram Amraniabd49672016-10-01 22:00:01 +0300403 tx_frag = p_pkt->bds_set[0].tx_frag;
404 if (p_ll2_conn->gsi_enable)
405 qed_ll2b_complete_tx_gsi_packet(p_hwfn,
406 p_ll2_conn->my_id,
407 p_pkt->cookie,
408 tx_frag,
409 b_last_frag, !num_bds);
410 else
411 qed_ll2b_complete_tx_packet(p_hwfn,
412 p_ll2_conn->my_id,
413 p_pkt->cookie,
414 tx_frag,
415 b_last_frag, !num_bds);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300416 spin_lock_irqsave(&p_tx->lock, flags);
417 }
418
419 p_tx->b_completing_packet = false;
420 rc = 0;
421out:
422 spin_unlock_irqrestore(&p_tx->lock, flags);
423 return rc;
424}
425
Ram Amraniabd49672016-10-01 22:00:01 +0300426static int
427qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
428 struct qed_ll2_info *p_ll2_info,
429 union core_rx_cqe_union *p_cqe,
430 unsigned long lock_flags, bool b_last_cqe)
431{
432 struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue;
433 struct qed_ll2_rx_packet *p_pkt = NULL;
434 u16 packet_length, parse_flags, vlan;
435 u32 src_mac_addrhi;
436 u16 src_mac_addrlo;
437
438 if (!list_empty(&p_rx->active_descq))
439 p_pkt = list_first_entry(&p_rx->active_descq,
440 struct qed_ll2_rx_packet, list_entry);
441 if (!p_pkt) {
442 DP_NOTICE(p_hwfn,
443 "GSI Rx completion but active_descq is empty\n");
444 return -EIO;
445 }
446
447 list_del(&p_pkt->list_entry);
448 parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
449 packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
450 vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
451 src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
452 src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
453 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
454 DP_NOTICE(p_hwfn,
455 "Mismatch between active_descq and the LL2 Rx chain\n");
456 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
457
458 spin_unlock_irqrestore(&p_rx->lock, lock_flags);
459 qed_ll2b_complete_rx_gsi_packet(p_hwfn,
460 p_ll2_info->my_id,
461 p_pkt->cookie,
462 p_pkt->rx_buf_addr,
463 packet_length,
464 p_cqe->rx_cqe_gsi.data_length_error,
465 parse_flags,
466 vlan,
467 src_mac_addrhi,
468 src_mac_addrlo, b_last_cqe);
469 spin_lock_irqsave(&p_rx->lock, lock_flags);
470
471 return 0;
472}
473
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300474static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
475 struct qed_ll2_info *p_ll2_conn,
476 union core_rx_cqe_union *p_cqe,
477 unsigned long lock_flags,
478 bool b_last_cqe)
479{
480 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
481 struct qed_ll2_rx_packet *p_pkt = NULL;
482
483 if (!list_empty(&p_rx->active_descq))
484 p_pkt = list_first_entry(&p_rx->active_descq,
485 struct qed_ll2_rx_packet, list_entry);
486 if (!p_pkt) {
487 DP_NOTICE(p_hwfn,
488 "LL2 Rx completion but active_descq is empty\n");
489 return -EIO;
490 }
491 list_del(&p_pkt->list_entry);
492
493 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
494 DP_NOTICE(p_hwfn,
495 "Mismatch between active_descq and the LL2 Rx chain\n");
496 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
497
498 spin_unlock_irqrestore(&p_rx->lock, lock_flags);
499 qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
500 p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
501 spin_lock_irqsave(&p_rx->lock, lock_flags);
502
503 return 0;
504}
505
506static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
507{
508 struct qed_ll2_info *p_ll2_conn = cookie;
509 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
510 union core_rx_cqe_union *cqe = NULL;
511 u16 cq_new_idx = 0, cq_old_idx = 0;
512 unsigned long flags = 0;
513 int rc = 0;
514
515 spin_lock_irqsave(&p_rx->lock, flags);
516 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
517 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
518
519 while (cq_new_idx != cq_old_idx) {
520 bool b_last_cqe = (cq_new_idx == cq_old_idx);
521
522 cqe = qed_chain_consume(&p_rx->rcq_chain);
523 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
524
525 DP_VERBOSE(p_hwfn,
526 QED_MSG_LL2,
527 "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
528 cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
529
530 switch (cqe->rx_cqe_sp.type) {
531 case CORE_RX_CQE_TYPE_SLOW_PATH:
532 DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
533 rc = -EINVAL;
534 break;
Ram Amraniabd49672016-10-01 22:00:01 +0300535 case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
536 rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn,
537 cqe, flags, b_last_cqe);
538 break;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300539 case CORE_RX_CQE_TYPE_REGULAR:
540 rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
541 cqe, flags, b_last_cqe);
542 break;
543 default:
544 rc = -EIO;
545 }
546 }
547
548 spin_unlock_irqrestore(&p_rx->lock, flags);
549 return rc;
550}
551
Yuval Mintz8c93bea2016-10-13 22:57:03 +0300552static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300553{
554 struct qed_ll2_info *p_ll2_conn = NULL;
555 struct qed_ll2_rx_packet *p_pkt = NULL;
556 struct qed_ll2_rx_queue *p_rx;
557
558 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
559 if (!p_ll2_conn)
560 return;
561
562 p_rx = &p_ll2_conn->rx_queue;
563
564 while (!list_empty(&p_rx->active_descq)) {
565 dma_addr_t rx_buf_addr;
566 void *cookie;
567 bool b_last;
568
569 p_pkt = list_first_entry(&p_rx->active_descq,
570 struct qed_ll2_rx_packet, list_entry);
571 if (!p_pkt)
572 break;
573
Wei Yongjunb4f0fd42016-10-17 15:17:51 +0000574 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300575
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800576 if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
577 struct qed_ooo_buffer *p_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300578
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800579 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
580 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
581 p_buffer);
582 } else {
583 rx_buf_addr = p_pkt->rx_buf_addr;
584 cookie = p_pkt->cookie;
585
586 b_last = list_empty(&p_rx->active_descq);
587 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +0300588 }
589}
590
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800591#if IS_ENABLED(CONFIG_QED_ISCSI)
592static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
593{
594 u8 bd_flags = 0;
595
596 if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
597 SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_VLAN_INSERTION, 1);
598
599 return bd_flags;
600}
601
602static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
603 struct qed_ll2_info *p_ll2_conn)
604{
605 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
606 u16 packet_length = 0, parse_flags = 0, vlan = 0;
607 struct qed_ll2_rx_packet *p_pkt = NULL;
608 u32 num_ooo_add_to_peninsula = 0, cid;
609 union core_rx_cqe_union *cqe = NULL;
610 u16 cq_new_idx = 0, cq_old_idx = 0;
611 struct qed_ooo_buffer *p_buffer;
612 struct ooo_opaque *iscsi_ooo;
613 u8 placement_offset = 0;
614 u8 cqe_type;
615
616 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
617 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
618 if (cq_new_idx == cq_old_idx)
619 return 0;
620
621 while (cq_new_idx != cq_old_idx) {
622 struct core_rx_fast_path_cqe *p_cqe_fp;
623
624 cqe = qed_chain_consume(&p_rx->rcq_chain);
625 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
626 cqe_type = cqe->rx_cqe_sp.type;
627
628 if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
629 DP_NOTICE(p_hwfn,
630 "Got a non-regular LB LL2 completion [type 0x%02x]\n",
631 cqe_type);
632 return -EINVAL;
633 }
634 p_cqe_fp = &cqe->rx_cqe_fp;
635
636 placement_offset = p_cqe_fp->placement_offset;
637 parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
638 packet_length = le16_to_cpu(p_cqe_fp->packet_length);
639 vlan = le16_to_cpu(p_cqe_fp->vlan);
640 iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
641 qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
642 iscsi_ooo);
643 cid = le32_to_cpu(iscsi_ooo->cid);
644
645 /* Process delete isle first */
646 if (iscsi_ooo->drop_size)
647 qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
648 iscsi_ooo->drop_isle,
649 iscsi_ooo->drop_size);
650
651 if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
652 continue;
653
654 /* Now process create/add/join isles */
655 if (list_empty(&p_rx->active_descq)) {
656 DP_NOTICE(p_hwfn,
657 "LL2 OOO RX chain has no submitted buffers\n"
658 );
659 return -EIO;
660 }
661
662 p_pkt = list_first_entry(&p_rx->active_descq,
663 struct qed_ll2_rx_packet, list_entry);
664
665 if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
666 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
667 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
668 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
669 (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
670 if (!p_pkt) {
671 DP_NOTICE(p_hwfn,
672 "LL2 OOO RX packet is not valid\n");
673 return -EIO;
674 }
675 list_del(&p_pkt->list_entry);
676 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
677 p_buffer->packet_length = packet_length;
678 p_buffer->parse_flags = parse_flags;
679 p_buffer->vlan = vlan;
680 p_buffer->placement_offset = placement_offset;
681 qed_chain_consume(&p_rx->rxq_chain);
682 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
683
684 switch (iscsi_ooo->ooo_opcode) {
685 case TCP_EVENT_ADD_NEW_ISLE:
686 qed_ooo_add_new_isle(p_hwfn,
687 p_hwfn->p_ooo_info,
688 cid,
689 iscsi_ooo->ooo_isle,
690 p_buffer);
691 break;
692 case TCP_EVENT_ADD_ISLE_RIGHT:
693 qed_ooo_add_new_buffer(p_hwfn,
694 p_hwfn->p_ooo_info,
695 cid,
696 iscsi_ooo->ooo_isle,
697 p_buffer,
698 QED_OOO_RIGHT_BUF);
699 break;
700 case TCP_EVENT_ADD_ISLE_LEFT:
701 qed_ooo_add_new_buffer(p_hwfn,
702 p_hwfn->p_ooo_info,
703 cid,
704 iscsi_ooo->ooo_isle,
705 p_buffer,
706 QED_OOO_LEFT_BUF);
707 break;
708 case TCP_EVENT_JOIN:
709 qed_ooo_add_new_buffer(p_hwfn,
710 p_hwfn->p_ooo_info,
711 cid,
712 iscsi_ooo->ooo_isle +
713 1,
714 p_buffer,
715 QED_OOO_LEFT_BUF);
716 qed_ooo_join_isles(p_hwfn,
717 p_hwfn->p_ooo_info,
718 cid, iscsi_ooo->ooo_isle);
719 break;
720 case TCP_EVENT_ADD_PEN:
721 num_ooo_add_to_peninsula++;
722 qed_ooo_put_ready_buffer(p_hwfn,
723 p_hwfn->p_ooo_info,
724 p_buffer, true);
725 break;
726 }
727 } else {
728 DP_NOTICE(p_hwfn,
729 "Unexpected event (%d) TX OOO completion\n",
730 iscsi_ooo->ooo_opcode);
731 }
732 }
733
734 return 0;
735}
736
737static void
738qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
739 struct qed_ll2_info *p_ll2_conn)
740{
741 struct qed_ooo_buffer *p_buffer;
742 int rc;
743 u16 l4_hdr_offset_w;
744 dma_addr_t first_frag;
745 u16 parse_flags;
746 u8 bd_flags;
747
748 /* Submit Tx buffers here */
749 while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
750 p_hwfn->p_ooo_info))) {
751 l4_hdr_offset_w = 0;
752 bd_flags = 0;
753
754 first_frag = p_buffer->rx_buffer_phys_addr +
755 p_buffer->placement_offset;
756 parse_flags = p_buffer->parse_flags;
757 bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
758 SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE, 1);
759 SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_L4_PROTOCOL, 1);
760
761 rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
762 p_buffer->vlan, bd_flags,
763 l4_hdr_offset_w,
764 p_ll2_conn->tx_dest, 0,
765 first_frag,
766 p_buffer->packet_length,
767 p_buffer, true);
768 if (rc) {
769 qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
770 p_buffer, false);
771 break;
772 }
773 }
774}
775
776static void
777qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn,
778 struct qed_ll2_info *p_ll2_conn)
779{
780 struct qed_ooo_buffer *p_buffer;
781 int rc;
782
783 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
784 p_hwfn->p_ooo_info))) {
785 rc = qed_ll2_post_rx_buffer(p_hwfn,
786 p_ll2_conn->my_id,
787 p_buffer->rx_buffer_phys_addr,
788 0, p_buffer, true);
789 if (rc) {
790 qed_ooo_put_free_buffer(p_hwfn,
791 p_hwfn->p_ooo_info, p_buffer);
792 break;
793 }
794 }
795}
796
797static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
798{
799 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
800 int rc;
801
802 rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
803 if (rc)
804 return rc;
805
806 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
807 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
808
809 return 0;
810}
811
812static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
813{
814 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
815 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
816 struct qed_ll2_tx_packet *p_pkt = NULL;
817 struct qed_ooo_buffer *p_buffer;
818 bool b_dont_submit_rx = false;
819 u16 new_idx = 0, num_bds = 0;
820 int rc;
821
822 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
823 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
824
825 if (!num_bds)
826 return 0;
827
828 while (num_bds) {
829 if (list_empty(&p_tx->active_descq))
830 return -EINVAL;
831
832 p_pkt = list_first_entry(&p_tx->active_descq,
833 struct qed_ll2_tx_packet, list_entry);
834 if (!p_pkt)
835 return -EINVAL;
836
837 if (p_pkt->bd_used != 1) {
838 DP_NOTICE(p_hwfn,
839 "Unexpectedly many BDs(%d) in TX OOO completion\n",
840 p_pkt->bd_used);
841 return -EINVAL;
842 }
843
844 list_del(&p_pkt->list_entry);
845
846 num_bds--;
847 p_tx->bds_idx++;
848 qed_chain_consume(&p_tx->txq_chain);
849
850 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
851 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
852
853 if (b_dont_submit_rx) {
854 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
855 p_buffer);
856 continue;
857 }
858
859 rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
860 p_buffer->rx_buffer_phys_addr, 0,
861 p_buffer, true);
862 if (rc != 0) {
863 qed_ooo_put_free_buffer(p_hwfn,
864 p_hwfn->p_ooo_info, p_buffer);
865 b_dont_submit_rx = true;
866 }
867 }
868
869 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
870
871 return 0;
872}
873
874static int
875qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
876 struct qed_ll2_info *p_ll2_info,
877 u16 rx_num_ooo_buffers, u16 mtu)
878{
879 struct qed_ooo_buffer *p_buf = NULL;
880 void *p_virt;
881 u16 buf_idx;
882 int rc = 0;
883
884 if (p_ll2_info->conn_type != QED_LL2_TYPE_ISCSI_OOO)
885 return rc;
886
887 if (!rx_num_ooo_buffers)
888 return -EINVAL;
889
890 for (buf_idx = 0; buf_idx < rx_num_ooo_buffers; buf_idx++) {
891 p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
892 if (!p_buf) {
893 rc = -ENOMEM;
894 goto out;
895 }
896
897 p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
898 p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
899 ETH_CACHE_LINE_SIZE - 1) &
900 ~(ETH_CACHE_LINE_SIZE - 1);
901 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
902 p_buf->rx_buffer_size,
903 &p_buf->rx_buffer_phys_addr,
904 GFP_KERNEL);
905 if (!p_virt) {
906 kfree(p_buf);
907 rc = -ENOMEM;
908 goto out;
909 }
910
911 p_buf->rx_buffer_virt_addr = p_virt;
912 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
913 }
914
915 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
916 "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
917 rx_num_ooo_buffers, p_buf->rx_buffer_size);
918
919out:
920 return rc;
921}
922
923static void
924qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
925 struct qed_ll2_info *p_ll2_conn)
926{
927 if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO)
928 return;
929
930 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
931 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
932}
933
934static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
935 struct qed_ll2_info *p_ll2_conn)
936{
937 struct qed_ooo_buffer *p_buffer;
938
939 if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO)
940 return;
941
942 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
943 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
944 p_hwfn->p_ooo_info))) {
945 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
946 p_buffer->rx_buffer_size,
947 p_buffer->rx_buffer_virt_addr,
948 p_buffer->rx_buffer_phys_addr);
949 kfree(p_buffer);
950 }
951}
952
953static void qed_ll2_stop_ooo(struct qed_dev *cdev)
954{
955 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
956 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
957
958 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n",
959 *handle);
960
961 qed_ll2_terminate_connection(hwfn, *handle);
962 qed_ll2_release_connection(hwfn, *handle);
963 *handle = QED_LL2_UNUSED_HANDLE;
964}
965
966static int qed_ll2_start_ooo(struct qed_dev *cdev,
967 struct qed_ll2_params *params)
968{
969 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
970 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
971 struct qed_ll2_info *ll2_info;
972 int rc;
973
974 ll2_info = kzalloc(sizeof(*ll2_info), GFP_KERNEL);
975 if (!ll2_info)
976 return -ENOMEM;
977 ll2_info->conn_type = QED_LL2_TYPE_ISCSI_OOO;
978 ll2_info->mtu = params->mtu;
979 ll2_info->rx_drop_ttl0_flg = params->drop_ttl0_packets;
980 ll2_info->rx_vlan_removal_en = params->rx_vlan_stripping;
981 ll2_info->tx_tc = OOO_LB_TC;
982 ll2_info->tx_dest = CORE_TX_DEST_LB;
983
984 rc = qed_ll2_acquire_connection(hwfn, ll2_info,
985 QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
986 handle);
987 kfree(ll2_info);
988 if (rc) {
989 DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
990 goto out;
991 }
992
993 rc = qed_ll2_establish_connection(hwfn, *handle);
994 if (rc) {
995 DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
996 goto fail;
997 }
998
999 return 0;
1000
1001fail:
1002 qed_ll2_release_connection(hwfn, *handle);
1003out:
1004 *handle = QED_LL2_UNUSED_HANDLE;
1005 return rc;
1006}
1007#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
1008static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn,
1009 void *p_cookie) { return -EINVAL; }
1010static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn,
1011 void *p_cookie) { return -EINVAL; }
1012static inline int
1013qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
1014 struct qed_ll2_info *p_ll2_info,
1015 u16 rx_num_ooo_buffers, u16 mtu) { return 0; }
1016static inline void
1017qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
1018 struct qed_ll2_info *p_ll2_conn) { return; }
1019static inline void
1020qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
1021 struct qed_ll2_info *p_ll2_conn) { return; }
1022static inline void qed_ll2_stop_ooo(struct qed_dev *cdev) { return; }
1023static inline int qed_ll2_start_ooo(struct qed_dev *cdev,
1024 struct qed_ll2_params *params)
1025 { return -EINVAL; }
1026#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
1027
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001028static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
1029 struct qed_ll2_info *p_ll2_conn,
1030 u8 action_on_error)
1031{
1032 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
1033 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
1034 struct core_rx_start_ramrod_data *p_ramrod = NULL;
1035 struct qed_spq_entry *p_ent = NULL;
1036 struct qed_sp_init_data init_data;
1037 u16 cqe_pbl_size;
1038 int rc = 0;
1039
1040 /* Get SPQ entry */
1041 memset(&init_data, 0, sizeof(init_data));
1042 init_data.cid = p_ll2_conn->cid;
1043 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1044 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1045
1046 rc = qed_sp_init_request(p_hwfn, &p_ent,
1047 CORE_RAMROD_RX_QUEUE_START,
1048 PROTOCOLID_CORE, &init_data);
1049 if (rc)
1050 return rc;
1051
1052 p_ramrod = &p_ent->ramrod.core_rx_queue_start;
1053
1054 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
1055 p_ramrod->sb_index = p_rx->rx_sb_index;
1056 p_ramrod->complete_event_flg = 1;
1057
1058 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
1059 DMA_REGPAIR_LE(p_ramrod->bd_base,
1060 p_rx->rxq_chain.p_phys_addr);
1061 cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
1062 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
1063 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
1064 qed_chain_get_pbl_phys(&p_rx->rcq_chain));
1065
1066 p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg;
1067 p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en;
1068 p_ramrod->queue_id = p_ll2_conn->queue_id;
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001069 p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
1070 : 1;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001071
1072 if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
1073 p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
1074 p_ramrod->mf_si_bcast_accept_all = 1;
1075 p_ramrod->mf_si_mcast_accept_all = 1;
1076 } else {
1077 p_ramrod->mf_si_bcast_accept_all = 0;
1078 p_ramrod->mf_si_mcast_accept_all = 0;
1079 }
1080
1081 p_ramrod->action_on_error.error_type = action_on_error;
Ram Amraniabd49672016-10-01 22:00:01 +03001082 p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001083 return qed_spq_post(p_hwfn, p_ent, NULL);
1084}
1085
1086static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
1087 struct qed_ll2_info *p_ll2_conn)
1088{
1089 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
1090 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1091 struct core_tx_start_ramrod_data *p_ramrod = NULL;
1092 struct qed_spq_entry *p_ent = NULL;
1093 struct qed_sp_init_data init_data;
1094 union qed_qm_pq_params pq_params;
1095 u16 pq_id = 0, pbl_size;
1096 int rc = -EINVAL;
1097
1098 if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
1099 return 0;
1100
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001101 if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO)
1102 p_ll2_conn->tx_stats_en = 0;
1103 else
1104 p_ll2_conn->tx_stats_en = 1;
1105
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001106 /* Get SPQ entry */
1107 memset(&init_data, 0, sizeof(init_data));
1108 init_data.cid = p_ll2_conn->cid;
1109 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1110 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1111
1112 rc = qed_sp_init_request(p_hwfn, &p_ent,
1113 CORE_RAMROD_TX_QUEUE_START,
1114 PROTOCOLID_CORE, &init_data);
1115 if (rc)
1116 return rc;
1117
1118 p_ramrod = &p_ent->ramrod.core_tx_queue_start;
1119
1120 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
1121 p_ramrod->sb_index = p_tx->tx_sb_index;
1122 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001123 p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
1124 p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
1125
1126 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
1127 qed_chain_get_pbl_phys(&p_tx->txq_chain));
1128 pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
1129 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
1130
1131 memset(&pq_params, 0, sizeof(pq_params));
1132 pq_params.core.tc = p_ll2_conn->tx_tc;
1133 pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
1134 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
1135
1136 switch (conn_type) {
1137 case QED_LL2_TYPE_ISCSI:
1138 case QED_LL2_TYPE_ISCSI_OOO:
1139 p_ramrod->conn_type = PROTOCOLID_ISCSI;
1140 break;
1141 case QED_LL2_TYPE_ROCE:
1142 p_ramrod->conn_type = PROTOCOLID_ROCE;
1143 break;
1144 default:
1145 p_ramrod->conn_type = PROTOCOLID_ETH;
1146 DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
1147 }
1148
Ram Amraniabd49672016-10-01 22:00:01 +03001149 p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001150 return qed_spq_post(p_hwfn, p_ent, NULL);
1151}
1152
1153static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
1154 struct qed_ll2_info *p_ll2_conn)
1155{
1156 struct core_rx_stop_ramrod_data *p_ramrod = NULL;
1157 struct qed_spq_entry *p_ent = NULL;
1158 struct qed_sp_init_data init_data;
1159 int rc = -EINVAL;
1160
1161 /* Get SPQ entry */
1162 memset(&init_data, 0, sizeof(init_data));
1163 init_data.cid = p_ll2_conn->cid;
1164 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1165 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1166
1167 rc = qed_sp_init_request(p_hwfn, &p_ent,
1168 CORE_RAMROD_RX_QUEUE_STOP,
1169 PROTOCOLID_CORE, &init_data);
1170 if (rc)
1171 return rc;
1172
1173 p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
1174
1175 p_ramrod->complete_event_flg = 1;
1176 p_ramrod->queue_id = p_ll2_conn->queue_id;
1177
1178 return qed_spq_post(p_hwfn, p_ent, NULL);
1179}
1180
1181static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
1182 struct qed_ll2_info *p_ll2_conn)
1183{
1184 struct qed_spq_entry *p_ent = NULL;
1185 struct qed_sp_init_data init_data;
1186 int rc = -EINVAL;
1187
1188 /* Get SPQ entry */
1189 memset(&init_data, 0, sizeof(init_data));
1190 init_data.cid = p_ll2_conn->cid;
1191 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1192 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1193
1194 rc = qed_sp_init_request(p_hwfn, &p_ent,
1195 CORE_RAMROD_TX_QUEUE_STOP,
1196 PROTOCOLID_CORE, &init_data);
1197 if (rc)
1198 return rc;
1199
1200 return qed_spq_post(p_hwfn, p_ent, NULL);
1201}
1202
1203static int
1204qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
1205 struct qed_ll2_info *p_ll2_info, u16 rx_num_desc)
1206{
1207 struct qed_ll2_rx_packet *p_descq;
1208 u32 capacity;
1209 int rc = 0;
1210
1211 if (!rx_num_desc)
1212 goto out;
1213
1214 rc = qed_chain_alloc(p_hwfn->cdev,
1215 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1216 QED_CHAIN_MODE_NEXT_PTR,
1217 QED_CHAIN_CNT_TYPE_U16,
1218 rx_num_desc,
1219 sizeof(struct core_rx_bd),
1220 &p_ll2_info->rx_queue.rxq_chain);
1221 if (rc) {
1222 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
1223 goto out;
1224 }
1225
1226 capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
1227 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
1228 GFP_KERNEL);
1229 if (!p_descq) {
1230 rc = -ENOMEM;
1231 DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
1232 goto out;
1233 }
1234 p_ll2_info->rx_queue.descq_array = p_descq;
1235
1236 rc = qed_chain_alloc(p_hwfn->cdev,
1237 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1238 QED_CHAIN_MODE_PBL,
1239 QED_CHAIN_CNT_TYPE_U16,
1240 rx_num_desc,
1241 sizeof(struct core_rx_fast_path_cqe),
1242 &p_ll2_info->rx_queue.rcq_chain);
1243 if (rc) {
1244 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
1245 goto out;
1246 }
1247
1248 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1249 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
1250 p_ll2_info->conn_type, rx_num_desc);
1251
1252out:
1253 return rc;
1254}
1255
1256static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
1257 struct qed_ll2_info *p_ll2_info,
1258 u16 tx_num_desc)
1259{
1260 struct qed_ll2_tx_packet *p_descq;
1261 u32 capacity;
1262 int rc = 0;
1263
1264 if (!tx_num_desc)
1265 goto out;
1266
1267 rc = qed_chain_alloc(p_hwfn->cdev,
1268 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1269 QED_CHAIN_MODE_PBL,
1270 QED_CHAIN_CNT_TYPE_U16,
1271 tx_num_desc,
1272 sizeof(struct core_tx_bd),
1273 &p_ll2_info->tx_queue.txq_chain);
1274 if (rc)
1275 goto out;
1276
1277 capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
1278 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet),
1279 GFP_KERNEL);
1280 if (!p_descq) {
1281 rc = -ENOMEM;
1282 goto out;
1283 }
1284 p_ll2_info->tx_queue.descq_array = p_descq;
1285
1286 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1287 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
1288 p_ll2_info->conn_type, tx_num_desc);
1289
1290out:
1291 if (rc)
1292 DP_NOTICE(p_hwfn,
1293 "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
1294 tx_num_desc);
1295 return rc;
1296}
1297
1298int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
1299 struct qed_ll2_info *p_params,
1300 u16 rx_num_desc,
1301 u16 tx_num_desc,
1302 u8 *p_connection_handle)
1303{
1304 qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
1305 struct qed_ll2_info *p_ll2_info = NULL;
1306 int rc;
1307 u8 i;
1308
1309 if (!p_connection_handle || !p_hwfn->p_ll2_info)
1310 return -EINVAL;
1311
1312 /* Find a free connection to be used */
1313 for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
1314 mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
1315 if (p_hwfn->p_ll2_info[i].b_active) {
1316 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1317 continue;
1318 }
1319
1320 p_hwfn->p_ll2_info[i].b_active = true;
1321 p_ll2_info = &p_hwfn->p_ll2_info[i];
1322 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1323 break;
1324 }
1325 if (!p_ll2_info)
1326 return -EBUSY;
1327
1328 p_ll2_info->conn_type = p_params->conn_type;
1329 p_ll2_info->mtu = p_params->mtu;
1330 p_ll2_info->rx_drop_ttl0_flg = p_params->rx_drop_ttl0_flg;
1331 p_ll2_info->rx_vlan_removal_en = p_params->rx_vlan_removal_en;
1332 p_ll2_info->tx_tc = p_params->tx_tc;
1333 p_ll2_info->tx_dest = p_params->tx_dest;
1334 p_ll2_info->ai_err_packet_too_big = p_params->ai_err_packet_too_big;
1335 p_ll2_info->ai_err_no_buf = p_params->ai_err_no_buf;
Ram Amraniabd49672016-10-01 22:00:01 +03001336 p_ll2_info->gsi_enable = p_params->gsi_enable;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001337
1338 rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
1339 if (rc)
1340 goto q_allocate_fail;
1341
1342 rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info, tx_num_desc);
1343 if (rc)
1344 goto q_allocate_fail;
1345
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001346 rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
1347 rx_num_desc * 2, p_params->mtu);
1348 if (rc)
1349 goto q_allocate_fail;
1350
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001351 /* Register callbacks for the Rx/Tx queues */
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001352 if (p_params->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
1353 comp_rx_cb = qed_ll2_lb_rxq_completion;
1354 comp_tx_cb = qed_ll2_lb_txq_completion;
1355 } else {
1356 comp_rx_cb = qed_ll2_rxq_completion;
1357 comp_tx_cb = qed_ll2_txq_completion;
1358 }
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001359
1360 if (rx_num_desc) {
1361 qed_int_register_cb(p_hwfn, comp_rx_cb,
1362 &p_hwfn->p_ll2_info[i],
1363 &p_ll2_info->rx_queue.rx_sb_index,
1364 &p_ll2_info->rx_queue.p_fw_cons);
1365 p_ll2_info->rx_queue.b_cb_registred = true;
1366 }
1367
1368 if (tx_num_desc) {
1369 qed_int_register_cb(p_hwfn,
1370 comp_tx_cb,
1371 &p_hwfn->p_ll2_info[i],
1372 &p_ll2_info->tx_queue.tx_sb_index,
1373 &p_ll2_info->tx_queue.p_fw_cons);
1374 p_ll2_info->tx_queue.b_cb_registred = true;
1375 }
1376
1377 *p_connection_handle = i;
1378 return rc;
1379
1380q_allocate_fail:
1381 qed_ll2_release_connection(p_hwfn, i);
1382 return -ENOMEM;
1383}
1384
1385static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
1386 struct qed_ll2_info *p_ll2_conn)
1387{
1388 u8 action_on_error = 0;
1389
1390 if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
1391 return 0;
1392
1393 DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
1394
1395 SET_FIELD(action_on_error,
1396 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
1397 p_ll2_conn->ai_err_packet_too_big);
1398 SET_FIELD(action_on_error,
1399 CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->ai_err_no_buf);
1400
1401 return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
1402}
1403
1404int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1405{
1406 struct qed_ll2_info *p_ll2_conn;
1407 struct qed_ll2_rx_queue *p_rx;
1408 struct qed_ll2_tx_queue *p_tx;
1409 int rc = -EINVAL;
1410 u32 i, capacity;
1411 u8 qid;
1412
1413 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
1414 if (!p_ll2_conn)
1415 return -EINVAL;
1416 p_rx = &p_ll2_conn->rx_queue;
1417 p_tx = &p_ll2_conn->tx_queue;
1418
1419 qed_chain_reset(&p_rx->rxq_chain);
1420 qed_chain_reset(&p_rx->rcq_chain);
1421 INIT_LIST_HEAD(&p_rx->active_descq);
1422 INIT_LIST_HEAD(&p_rx->free_descq);
1423 INIT_LIST_HEAD(&p_rx->posting_descq);
1424 spin_lock_init(&p_rx->lock);
1425 capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
1426 for (i = 0; i < capacity; i++)
1427 list_add_tail(&p_rx->descq_array[i].list_entry,
1428 &p_rx->free_descq);
1429 *p_rx->p_fw_cons = 0;
1430
1431 qed_chain_reset(&p_tx->txq_chain);
1432 INIT_LIST_HEAD(&p_tx->active_descq);
1433 INIT_LIST_HEAD(&p_tx->free_descq);
1434 INIT_LIST_HEAD(&p_tx->sending_descq);
1435 spin_lock_init(&p_tx->lock);
1436 capacity = qed_chain_get_capacity(&p_tx->txq_chain);
1437 for (i = 0; i < capacity; i++)
1438 list_add_tail(&p_tx->descq_array[i].list_entry,
1439 &p_tx->free_descq);
1440 p_tx->cur_completing_bd_idx = 0;
1441 p_tx->bds_idx = 0;
1442 p_tx->b_completing_packet = false;
1443 p_tx->cur_send_packet = NULL;
1444 p_tx->cur_send_frag_num = 0;
1445 p_tx->cur_completing_frag_num = 0;
1446 *p_tx->p_fw_cons = 0;
1447
1448 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
1449
1450 qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
1451 p_ll2_conn->queue_id = qid;
1452 p_ll2_conn->tx_stats_id = qid;
1453 p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
1454 GTT_BAR0_MAP_REG_TSDM_RAM +
1455 TSTORM_LL2_RX_PRODS_OFFSET(qid);
1456 p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
1457 qed_db_addr(p_ll2_conn->cid,
1458 DQ_DEMS_LEGACY);
1459
1460 rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
1461 if (rc)
1462 return rc;
1463
1464 rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
1465 if (rc)
1466 return rc;
1467
1468 if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
1469 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1);
1470
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001471 qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
1472
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001473 return rc;
1474}
1475
1476static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
1477 struct qed_ll2_rx_queue *p_rx,
1478 struct qed_ll2_rx_packet *p_curp)
1479{
1480 struct qed_ll2_rx_packet *p_posting_packet = NULL;
1481 struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
1482 bool b_notify_fw = false;
1483 u16 bd_prod, cq_prod;
1484
1485 /* This handles the flushing of already posted buffers */
1486 while (!list_empty(&p_rx->posting_descq)) {
1487 p_posting_packet = list_first_entry(&p_rx->posting_descq,
1488 struct qed_ll2_rx_packet,
1489 list_entry);
Wei Yongjunb4f0fd42016-10-17 15:17:51 +00001490 list_move_tail(&p_posting_packet->list_entry,
1491 &p_rx->active_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001492 b_notify_fw = true;
1493 }
1494
1495 /* This handles the supplied packet [if there is one] */
1496 if (p_curp) {
1497 list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
1498 b_notify_fw = true;
1499 }
1500
1501 if (!b_notify_fw)
1502 return;
1503
1504 bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
1505 cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1506 rx_prod.bd_prod = cpu_to_le16(bd_prod);
1507 rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1508 DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1509}
1510
1511int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
1512 u8 connection_handle,
1513 dma_addr_t addr,
1514 u16 buf_len, void *cookie, u8 notify_fw)
1515{
1516 struct core_rx_bd_with_buff_len *p_curb = NULL;
1517 struct qed_ll2_rx_packet *p_curp = NULL;
1518 struct qed_ll2_info *p_ll2_conn;
1519 struct qed_ll2_rx_queue *p_rx;
1520 unsigned long flags;
1521 void *p_data;
1522 int rc = 0;
1523
1524 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1525 if (!p_ll2_conn)
1526 return -EINVAL;
1527 p_rx = &p_ll2_conn->rx_queue;
1528
1529 spin_lock_irqsave(&p_rx->lock, flags);
1530 if (!list_empty(&p_rx->free_descq))
1531 p_curp = list_first_entry(&p_rx->free_descq,
1532 struct qed_ll2_rx_packet, list_entry);
1533 if (p_curp) {
1534 if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
1535 qed_chain_get_elem_left(&p_rx->rcq_chain)) {
1536 p_data = qed_chain_produce(&p_rx->rxq_chain);
1537 p_curb = (struct core_rx_bd_with_buff_len *)p_data;
1538 qed_chain_produce(&p_rx->rcq_chain);
1539 }
1540 }
1541
1542 /* If we're lacking entires, let's try to flush buffers to FW */
1543 if (!p_curp || !p_curb) {
1544 rc = -EBUSY;
1545 p_curp = NULL;
1546 goto out_notify;
1547 }
1548
1549 /* We have an Rx packet we can fill */
1550 DMA_REGPAIR_LE(p_curb->addr, addr);
1551 p_curb->buff_length = cpu_to_le16(buf_len);
1552 p_curp->rx_buf_addr = addr;
1553 p_curp->cookie = cookie;
1554 p_curp->rxq_bd = p_curb;
1555 p_curp->buf_length = buf_len;
1556 list_del(&p_curp->list_entry);
1557
1558 /* Check if we only want to enqueue this packet without informing FW */
1559 if (!notify_fw) {
1560 list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
1561 goto out;
1562 }
1563
1564out_notify:
1565 qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
1566out:
1567 spin_unlock_irqrestore(&p_rx->lock, flags);
1568 return rc;
1569}
1570
1571static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
1572 struct qed_ll2_tx_queue *p_tx,
1573 struct qed_ll2_tx_packet *p_curp,
1574 u8 num_of_bds,
1575 dma_addr_t first_frag,
1576 u16 first_frag_len, void *p_cookie,
1577 u8 notify_fw)
1578{
1579 list_del(&p_curp->list_entry);
1580 p_curp->cookie = p_cookie;
1581 p_curp->bd_used = num_of_bds;
1582 p_curp->notify_fw = notify_fw;
1583 p_tx->cur_send_packet = p_curp;
1584 p_tx->cur_send_frag_num = 0;
1585
1586 p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = first_frag;
1587 p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = first_frag_len;
1588 p_tx->cur_send_frag_num++;
1589}
1590
1591static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1592 struct qed_ll2_info *p_ll2,
1593 struct qed_ll2_tx_packet *p_curp,
1594 u8 num_of_bds,
1595 enum core_tx_dest tx_dest,
1596 u16 vlan,
1597 u8 bd_flags,
1598 u16 l4_hdr_offset_w,
Ram Amraniabd49672016-10-01 22:00:01 +03001599 enum core_roce_flavor_type type,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001600 dma_addr_t first_frag,
1601 u16 first_frag_len)
1602{
1603 struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
1604 u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
1605 struct core_tx_bd *start_bd = NULL;
1606 u16 frag_idx;
1607
1608 start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1609 start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
1610 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
1611 cpu_to_le16(l4_hdr_offset_w));
1612 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
1613 start_bd->bd_flags.as_bitfield = bd_flags;
1614 start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
1615 CORE_TX_BD_FLAGS_START_BD_SHIFT;
1616 SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
Ram Amrani8d1d8fc2016-11-09 22:48:43 +02001617 SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001618 DMA_REGPAIR_LE(start_bd->addr, first_frag);
1619 start_bd->nbytes = cpu_to_le16(first_frag_len);
1620
1621 DP_VERBOSE(p_hwfn,
1622 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1623 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1624 p_ll2->queue_id,
1625 p_ll2->cid,
1626 p_ll2->conn_type,
1627 prod_idx,
1628 first_frag_len,
1629 num_of_bds,
1630 le32_to_cpu(start_bd->addr.hi),
1631 le32_to_cpu(start_bd->addr.lo));
1632
1633 if (p_ll2->tx_queue.cur_send_frag_num == num_of_bds)
1634 return;
1635
1636 /* Need to provide the packet with additional BDs for frags */
1637 for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
1638 frag_idx < num_of_bds; frag_idx++) {
1639 struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
1640
1641 *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1642 (*p_bd)->bd_flags.as_bitfield = 0;
1643 (*p_bd)->bitfield1 = 0;
1644 (*p_bd)->bitfield0 = 0;
1645 p_curp->bds_set[frag_idx].tx_frag = 0;
1646 p_curp->bds_set[frag_idx].frag_len = 0;
1647 }
1648}
1649
1650/* This should be called while the Txq spinlock is being held */
1651static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
1652 struct qed_ll2_info *p_ll2_conn)
1653{
1654 bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
1655 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1656 struct qed_ll2_tx_packet *p_pkt = NULL;
1657 struct core_db_data db_msg = { 0, 0, 0 };
1658 u16 bd_prod;
1659
1660 /* If there are missing BDs, don't do anything now */
1661 if (p_ll2_conn->tx_queue.cur_send_frag_num !=
1662 p_ll2_conn->tx_queue.cur_send_packet->bd_used)
1663 return;
1664
1665 /* Push the current packet to the list and clean after it */
1666 list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
1667 &p_ll2_conn->tx_queue.sending_descq);
1668 p_ll2_conn->tx_queue.cur_send_packet = NULL;
1669 p_ll2_conn->tx_queue.cur_send_frag_num = 0;
1670
1671 /* Notify FW of packet only if requested to */
1672 if (!b_notify)
1673 return;
1674
1675 bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
1676
1677 while (!list_empty(&p_tx->sending_descq)) {
1678 p_pkt = list_first_entry(&p_tx->sending_descq,
1679 struct qed_ll2_tx_packet, list_entry);
1680 if (!p_pkt)
1681 break;
1682
Wei Yongjunb4f0fd42016-10-17 15:17:51 +00001683 list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001684 }
1685
1686 SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
1687 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1688 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
1689 DQ_XCM_CORE_TX_BD_PROD_CMD);
1690 db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
1691 db_msg.spq_prod = cpu_to_le16(bd_prod);
1692
1693 /* Make sure the BDs data is updated before ringing the doorbell */
1694 wmb();
1695
1696 DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
1697
1698 DP_VERBOSE(p_hwfn,
1699 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1700 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1701 p_ll2_conn->queue_id,
1702 p_ll2_conn->cid, p_ll2_conn->conn_type, db_msg.spq_prod);
1703}
1704
1705int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
1706 u8 connection_handle,
1707 u8 num_of_bds,
1708 u16 vlan,
1709 u8 bd_flags,
1710 u16 l4_hdr_offset_w,
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001711 enum qed_ll2_tx_dest e_tx_dest,
Ram Amraniabd49672016-10-01 22:00:01 +03001712 enum qed_ll2_roce_flavor_type qed_roce_flavor,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001713 dma_addr_t first_frag,
1714 u16 first_frag_len, void *cookie, u8 notify_fw)
1715{
1716 struct qed_ll2_tx_packet *p_curp = NULL;
1717 struct qed_ll2_info *p_ll2_conn = NULL;
Ram Amraniabd49672016-10-01 22:00:01 +03001718 enum core_roce_flavor_type roce_flavor;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001719 struct qed_ll2_tx_queue *p_tx;
1720 struct qed_chain *p_tx_chain;
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001721 enum core_tx_dest tx_dest;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001722 unsigned long flags;
1723 int rc = 0;
1724
1725 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1726 if (!p_ll2_conn)
1727 return -EINVAL;
1728 p_tx = &p_ll2_conn->tx_queue;
1729 p_tx_chain = &p_tx->txq_chain;
1730
1731 if (num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
1732 return -EIO;
1733
1734 spin_lock_irqsave(&p_tx->lock, flags);
1735 if (p_tx->cur_send_packet) {
1736 rc = -EEXIST;
1737 goto out;
1738 }
1739
1740 /* Get entry, but only if we have tx elements for it */
1741 if (!list_empty(&p_tx->free_descq))
1742 p_curp = list_first_entry(&p_tx->free_descq,
1743 struct qed_ll2_tx_packet, list_entry);
1744 if (p_curp && qed_chain_get_elem_left(p_tx_chain) < num_of_bds)
1745 p_curp = NULL;
1746
1747 if (!p_curp) {
1748 rc = -EBUSY;
1749 goto out;
1750 }
1751
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001752 tx_dest = e_tx_dest == QED_LL2_TX_DEST_NW ? CORE_TX_DEST_NW :
1753 CORE_TX_DEST_LB;
Ram Amraniabd49672016-10-01 22:00:01 +03001754 if (qed_roce_flavor == QED_LL2_ROCE) {
1755 roce_flavor = CORE_ROCE;
1756 } else if (qed_roce_flavor == QED_LL2_RROCE) {
1757 roce_flavor = CORE_RROCE;
1758 } else {
1759 rc = -EINVAL;
1760 goto out;
1761 }
1762
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001763 /* Prepare packet and BD, and perhaps send a doorbell to FW */
1764 qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp,
1765 num_of_bds, first_frag,
1766 first_frag_len, cookie, notify_fw);
1767 qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001768 num_of_bds, tx_dest,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001769 vlan, bd_flags, l4_hdr_offset_w,
Ram Amraniabd49672016-10-01 22:00:01 +03001770 roce_flavor,
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001771 first_frag, first_frag_len);
1772
1773 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1774
1775out:
1776 spin_unlock_irqrestore(&p_tx->lock, flags);
1777 return rc;
1778}
1779
1780int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
1781 u8 connection_handle,
1782 dma_addr_t addr, u16 nbytes)
1783{
1784 struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
1785 struct qed_ll2_info *p_ll2_conn = NULL;
1786 u16 cur_send_frag_num = 0;
1787 struct core_tx_bd *p_bd;
1788 unsigned long flags;
1789
1790 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1791 if (!p_ll2_conn)
1792 return -EINVAL;
1793
1794 if (!p_ll2_conn->tx_queue.cur_send_packet)
1795 return -EINVAL;
1796
1797 p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
1798 cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
1799
1800 if (cur_send_frag_num >= p_cur_send_packet->bd_used)
1801 return -EINVAL;
1802
1803 /* Fill the BD information, and possibly notify FW */
1804 p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
1805 DMA_REGPAIR_LE(p_bd->addr, addr);
1806 p_bd->nbytes = cpu_to_le16(nbytes);
1807 p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
1808 p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
1809
1810 p_ll2_conn->tx_queue.cur_send_frag_num++;
1811
1812 spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
1813 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1814 spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
1815
1816 return 0;
1817}
1818
1819int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1820{
1821 struct qed_ll2_info *p_ll2_conn = NULL;
1822 int rc = -EINVAL;
1823
1824 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
1825 if (!p_ll2_conn)
1826 return -EINVAL;
1827
1828 /* Stop Tx & Rx of connection, if needed */
1829 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1830 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
1831 if (rc)
1832 return rc;
1833 qed_ll2_txq_flush(p_hwfn, connection_handle);
1834 }
1835
1836 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1837 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
1838 if (rc)
1839 return rc;
1840 qed_ll2_rxq_flush(p_hwfn, connection_handle);
1841 }
1842
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001843 if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO)
1844 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1845
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001846 return rc;
1847}
1848
1849void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1850{
1851 struct qed_ll2_info *p_ll2_conn = NULL;
1852
1853 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1854 if (!p_ll2_conn)
1855 return;
1856
1857 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1858 p_ll2_conn->rx_queue.b_cb_registred = false;
1859 qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
1860 }
1861
1862 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1863 p_ll2_conn->tx_queue.b_cb_registred = false;
1864 qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
1865 }
1866
1867 kfree(p_ll2_conn->tx_queue.descq_array);
1868 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
1869
1870 kfree(p_ll2_conn->rx_queue.descq_array);
1871 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
1872 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
1873
1874 qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
1875
Yuval Mintz1d6cff42016-12-01 00:21:07 -08001876 qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
1877
Yuval Mintz0a7fb112016-10-01 21:59:55 +03001878 mutex_lock(&p_ll2_conn->mutex);
1879 p_ll2_conn->b_active = false;
1880 mutex_unlock(&p_ll2_conn->mutex);
1881}
1882
1883struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn)
1884{
1885 struct qed_ll2_info *p_ll2_connections;
1886 u8 i;
1887
1888 /* Allocate LL2's set struct */
1889 p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
1890 sizeof(struct qed_ll2_info), GFP_KERNEL);
1891 if (!p_ll2_connections) {
1892 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
1893 return NULL;
1894 }
1895
1896 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1897 p_ll2_connections[i].my_id = i;
1898
1899 return p_ll2_connections;
1900}
1901
1902void qed_ll2_setup(struct qed_hwfn *p_hwfn,
1903 struct qed_ll2_info *p_ll2_connections)
1904{
1905 int i;
1906
1907 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1908 mutex_init(&p_ll2_connections[i].mutex);
1909}
1910
1911void qed_ll2_free(struct qed_hwfn *p_hwfn,
1912 struct qed_ll2_info *p_ll2_connections)
1913{
1914 kfree(p_ll2_connections);
1915}
1916
1917static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
1918 struct qed_ptt *p_ptt,
1919 struct qed_ll2_info *p_ll2_conn,
1920 struct qed_ll2_stats *p_stats)
1921{
1922 struct core_ll2_tstorm_per_queue_stat tstats;
1923 u8 qid = p_ll2_conn->queue_id;
1924 u32 tstats_addr;
1925
1926 memset(&tstats, 0, sizeof(tstats));
1927 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1928 CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
1929 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
1930
1931 p_stats->packet_too_big_discard =
1932 HILO_64_REGPAIR(tstats.packet_too_big_discard);
1933 p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
1934}
1935
1936static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
1937 struct qed_ptt *p_ptt,
1938 struct qed_ll2_info *p_ll2_conn,
1939 struct qed_ll2_stats *p_stats)
1940{
1941 struct core_ll2_ustorm_per_queue_stat ustats;
1942 u8 qid = p_ll2_conn->queue_id;
1943 u32 ustats_addr;
1944
1945 memset(&ustats, 0, sizeof(ustats));
1946 ustats_addr = BAR0_MAP_REG_USDM_RAM +
1947 CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
1948 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
1949
1950 p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1951 p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1952 p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1953 p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1954 p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1955 p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1956}
1957
1958static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
1959 struct qed_ptt *p_ptt,
1960 struct qed_ll2_info *p_ll2_conn,
1961 struct qed_ll2_stats *p_stats)
1962{
1963 struct core_ll2_pstorm_per_queue_stat pstats;
1964 u8 stats_id = p_ll2_conn->tx_stats_id;
1965 u32 pstats_addr;
1966
1967 memset(&pstats, 0, sizeof(pstats));
1968 pstats_addr = BAR0_MAP_REG_PSDM_RAM +
1969 CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
1970 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
1971
1972 p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1973 p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1974 p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1975 p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1976 p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1977 p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1978}
1979
1980int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
1981 u8 connection_handle, struct qed_ll2_stats *p_stats)
1982{
1983 struct qed_ll2_info *p_ll2_conn = NULL;
1984 struct qed_ptt *p_ptt;
1985
1986 memset(p_stats, 0, sizeof(*p_stats));
1987
1988 if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
1989 !p_hwfn->p_ll2_info)
1990 return -EINVAL;
1991
1992 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
1993
1994 p_ptt = qed_ptt_acquire(p_hwfn);
1995 if (!p_ptt) {
1996 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1997 return -EINVAL;
1998 }
1999
2000 _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2001 _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2002 if (p_ll2_conn->tx_stats_en)
2003 _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2004
2005 qed_ptt_release(p_hwfn, p_ptt);
2006 return 0;
2007}
2008
2009static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
2010 const struct qed_ll2_cb_ops *ops,
2011 void *cookie)
2012{
2013 cdev->ll2->cbs = ops;
2014 cdev->ll2->cb_cookie = cookie;
2015}
2016
2017static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
2018{
2019 struct qed_ll2_info ll2_info;
Wei Yongjun88a24282016-10-10 14:08:28 +00002020 struct qed_ll2_buffer *buffer, *tmp_buffer;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002021 enum qed_ll2_conn_type conn_type;
2022 struct qed_ptt *p_ptt;
2023 int rc, i;
Yuval Mintzfc831822016-12-01 00:21:06 -08002024 u8 gsi_enable = 1;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002025
2026 /* Initialize LL2 locks & lists */
2027 INIT_LIST_HEAD(&cdev->ll2->list);
2028 spin_lock_init(&cdev->ll2->lock);
2029 cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
2030 L1_CACHE_BYTES + params->mtu;
2031 cdev->ll2->frags_mapped = params->frags_mapped;
2032
2033 /*Allocate memory for LL2 */
2034 DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
2035 cdev->ll2->rx_size);
2036 for (i = 0; i < QED_LL2_RX_SIZE; i++) {
2037 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2038 if (!buffer) {
2039 DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
2040 goto fail;
2041 }
2042
2043 rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
2044 &buffer->phys_addr);
2045 if (rc) {
2046 kfree(buffer);
2047 goto fail;
2048 }
2049
2050 list_add_tail(&buffer->list, &cdev->ll2->list);
2051 }
2052
2053 switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
2054 case QED_PCI_ISCSI:
2055 conn_type = QED_LL2_TYPE_ISCSI;
Yuval Mintzfc831822016-12-01 00:21:06 -08002056 gsi_enable = 0;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002057 break;
2058 case QED_PCI_ETH_ROCE:
2059 conn_type = QED_LL2_TYPE_ROCE;
2060 break;
2061 default:
2062 conn_type = QED_LL2_TYPE_TEST;
2063 }
2064
2065 /* Prepare the temporary ll2 information */
2066 memset(&ll2_info, 0, sizeof(ll2_info));
2067 ll2_info.conn_type = conn_type;
2068 ll2_info.mtu = params->mtu;
2069 ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
2070 ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
2071 ll2_info.tx_tc = 0;
2072 ll2_info.tx_dest = CORE_TX_DEST_NW;
Yuval Mintzfc831822016-12-01 00:21:06 -08002073 ll2_info.gsi_enable = gsi_enable;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002074
2075 rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info,
2076 QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
2077 &cdev->ll2->handle);
2078 if (rc) {
2079 DP_INFO(cdev, "Failed to acquire LL2 connection\n");
2080 goto fail;
2081 }
2082
2083 rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
2084 cdev->ll2->handle);
2085 if (rc) {
2086 DP_INFO(cdev, "Failed to establish LL2 connection\n");
2087 goto release_fail;
2088 }
2089
2090 /* Post all Rx buffers to FW */
2091 spin_lock_bh(&cdev->ll2->lock);
Wei Yongjun88a24282016-10-10 14:08:28 +00002092 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002093 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
2094 cdev->ll2->handle,
2095 buffer->phys_addr, 0, buffer, 1);
2096 if (rc) {
2097 DP_INFO(cdev,
2098 "Failed to post an Rx buffer; Deleting it\n");
2099 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
2100 cdev->ll2->rx_size, DMA_FROM_DEVICE);
2101 kfree(buffer->data);
2102 list_del(&buffer->list);
2103 kfree(buffer);
2104 } else {
2105 cdev->ll2->rx_cnt++;
2106 }
2107 }
2108 spin_unlock_bh(&cdev->ll2->lock);
2109
2110 if (!cdev->ll2->rx_cnt) {
2111 DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
2112 goto release_terminate;
2113 }
2114
2115 if (!is_valid_ether_addr(params->ll2_mac_address)) {
2116 DP_INFO(cdev, "Invalid Ethernet address\n");
2117 goto release_terminate;
2118 }
2119
Yuval Mintz1d6cff42016-12-01 00:21:07 -08002120 if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
2121 cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
2122 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
2123 rc = qed_ll2_start_ooo(cdev, params);
2124 if (rc) {
2125 DP_INFO(cdev,
2126 "Failed to initialize the OOO LL2 queue\n");
2127 goto release_terminate;
2128 }
2129 }
2130
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002131 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2132 if (!p_ptt) {
2133 DP_INFO(cdev, "Failed to acquire PTT\n");
2134 goto release_terminate;
2135 }
2136
2137 rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2138 params->ll2_mac_address);
2139 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2140 if (rc) {
2141 DP_ERR(cdev, "Failed to allocate LLH filter\n");
2142 goto release_terminate_all;
2143 }
2144
2145 ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
2146
2147 return 0;
2148
2149release_terminate_all:
2150
2151release_terminate:
2152 qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2153release_fail:
2154 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2155fail:
2156 qed_ll2_kill_buffers(cdev);
2157 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2158 return -EINVAL;
2159}
2160
2161static int qed_ll2_stop(struct qed_dev *cdev)
2162{
2163 struct qed_ptt *p_ptt;
2164 int rc;
2165
2166 if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
2167 return 0;
2168
2169 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2170 if (!p_ptt) {
2171 DP_INFO(cdev, "Failed to acquire PTT\n");
2172 goto fail;
2173 }
2174
2175 qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2176 cdev->ll2_mac_address);
2177 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2178 eth_zero_addr(cdev->ll2_mac_address);
2179
Yuval Mintz1d6cff42016-12-01 00:21:07 -08002180 if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
2181 cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
2182 qed_ll2_stop_ooo(cdev);
2183
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002184 rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
2185 cdev->ll2->handle);
2186 if (rc)
2187 DP_INFO(cdev, "Failed to terminate LL2 connection\n");
2188
2189 qed_ll2_kill_buffers(cdev);
2190
2191 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2192 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2193
2194 return rc;
2195fail:
2196 return -EINVAL;
2197}
2198
2199static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
2200{
2201 const skb_frag_t *frag;
2202 int rc = -EINVAL, i;
2203 dma_addr_t mapping;
2204 u16 vlan = 0;
2205 u8 flags = 0;
2206
2207 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
2208 DP_INFO(cdev, "Cannot transmit a checksumed packet\n");
2209 return -EINVAL;
2210 }
2211
2212 if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
2213 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
2214 1 + skb_shinfo(skb)->nr_frags);
2215 return -EINVAL;
2216 }
2217
2218 mapping = dma_map_single(&cdev->pdev->dev, skb->data,
2219 skb->len, DMA_TO_DEVICE);
2220 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2221 DP_NOTICE(cdev, "SKB mapping failed\n");
2222 return -EINVAL;
2223 }
2224
2225 /* Request HW to calculate IP csum */
2226 if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
2227 ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2228 flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
2229
2230 if (skb_vlan_tag_present(skb)) {
2231 vlan = skb_vlan_tag_get(skb);
2232 flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT);
2233 }
2234
2235 rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
2236 cdev->ll2->handle,
2237 1 + skb_shinfo(skb)->nr_frags,
Yuval Mintz1d6cff42016-12-01 00:21:07 -08002238 vlan, flags, 0, QED_LL2_TX_DEST_NW,
2239 0 /* RoCE FLAVOR */,
Ram Amraniabd49672016-10-01 22:00:01 +03002240 mapping, skb->len, skb, 1);
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002241 if (rc)
2242 goto err;
2243
2244 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2245 frag = &skb_shinfo(skb)->frags[i];
2246 if (!cdev->ll2->frags_mapped) {
2247 mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
2248 skb_frag_size(frag),
2249 DMA_TO_DEVICE);
2250
2251 if (unlikely(dma_mapping_error(&cdev->pdev->dev,
2252 mapping))) {
2253 DP_NOTICE(cdev,
2254 "Unable to map frag - dropping packet\n");
Pan Bian0ff18d22016-12-04 13:53:53 +08002255 rc = -ENOMEM;
Yuval Mintz0a7fb112016-10-01 21:59:55 +03002256 goto err;
2257 }
2258 } else {
2259 mapping = page_to_phys(skb_frag_page(frag)) |
2260 frag->page_offset;
2261 }
2262
2263 rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
2264 cdev->ll2->handle,
2265 mapping,
2266 skb_frag_size(frag));
2267
2268 /* if failed not much to do here, partial packet has been posted
2269 * we can't free memory, will need to wait for completion.
2270 */
2271 if (rc)
2272 goto err2;
2273 }
2274
2275 return 0;
2276
2277err:
2278 dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
2279
2280err2:
2281 return rc;
2282}
2283
2284static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
2285{
2286 if (!cdev->ll2)
2287 return -EINVAL;
2288
2289 return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
2290 cdev->ll2->handle, stats);
2291}
2292
2293const struct qed_ll2_ops qed_ll2_ops_pass = {
2294 .start = &qed_ll2_start,
2295 .stop = &qed_ll2_stop,
2296 .start_xmit = &qed_ll2_start_xmit,
2297 .register_cb_ops = &qed_ll2_register_cb_ops,
2298 .get_stats = &qed_ll2_stats,
2299};
2300
2301int qed_ll2_alloc_if(struct qed_dev *cdev)
2302{
2303 cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
2304 return cdev->ll2 ? 0 : -ENOMEM;
2305}
2306
2307void qed_ll2_dealloc_if(struct qed_dev *cdev)
2308{
2309 kfree(cdev->ll2);
2310 cdev->ll2 = NULL;
2311}