blob: 60a186f1609b250f7c506c057e0f49390bfada18 [file] [log] [blame]
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001/**********************************************************************
2* Author: Cavium, Inc.
3*
4* Contact: support@cavium.com
5* Please include "LiquidIO" in the subject.
6*
7* Copyright (c) 2003-2015 Cavium, Inc.
8*
9* This file is free software; you can redistribute it and/or modify
10* it under the terms of the GNU General Public License, Version 2, as
11* published by the Free Software Foundation.
12*
13* This file is distributed in the hope that it will be useful, but
14* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16* NONINFRINGEMENT. See the GNU General Public License for more
17* details.
18*
19* This file may also be available under a different license from Cavium.
20* Contact Cavium, Inc. for more information
21**********************************************************************/
22#include <linux/version.h>
23#include <linux/types.h>
24#include <linux/list.h>
25#include <linux/pci.h>
26#include <linux/kthread.h>
27#include <linux/netdevice.h>
28#include "octeon_config.h"
29#include "liquidio_common.h"
30#include "octeon_droq.h"
31#include "octeon_iq.h"
32#include "response_manager.h"
33#include "octeon_device.h"
34#include "octeon_nic.h"
35#include "octeon_main.h"
36#include "octeon_network.h"
37#include "cn66xx_regs.h"
38#include "cn66xx_device.h"
39#include "cn68xx_regs.h"
40#include "cn68xx_device.h"
41#include "liquidio_image.h"
42#include "octeon_mem_ops.h"
43
44/* #define CAVIUM_ONLY_PERF_MODE */
45
46#define CVM_MIN(d1, d2) (((d1) < (d2)) ? (d1) : (d2))
47#define CVM_MAX(d1, d2) (((d1) > (d2)) ? (d1) : (d2))
48
49struct niclist {
50 struct list_head list;
51 void *ptr;
52};
53
54struct __dispatch {
55 struct list_head list;
56 struct octeon_recv_info *rinfo;
57 octeon_dispatch_fn_t disp_fn;
58};
59
60/** Get the argument that the user set when registering dispatch
61 * function for a given opcode/subcode.
62 * @param octeon_dev - the octeon device pointer.
63 * @param opcode - the opcode for which the dispatch argument
64 * is to be checked.
65 * @param subcode - the subcode for which the dispatch argument
66 * is to be checked.
67 * @return Success: void * (argument to the dispatch function)
68 * @return Failure: NULL
69 *
70 */
71static inline void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev,
72 u16 opcode, u16 subcode)
73{
74 int idx;
75 struct list_head *dispatch;
76 void *fn_arg = NULL;
77 u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
78
79 idx = combined_opcode & OCTEON_OPCODE_MASK;
80
81 spin_lock_bh(&octeon_dev->dispatch.lock);
82
83 if (octeon_dev->dispatch.count == 0) {
84 spin_unlock_bh(&octeon_dev->dispatch.lock);
85 return NULL;
86 }
87
88 if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) {
89 fn_arg = octeon_dev->dispatch.dlist[idx].arg;
90 } else {
91 list_for_each(dispatch,
92 &octeon_dev->dispatch.dlist[idx].list) {
93 if (((struct octeon_dispatch *)dispatch)->opcode ==
94 combined_opcode) {
95 fn_arg = ((struct octeon_dispatch *)
96 dispatch)->arg;
97 break;
98 }
99 }
100 }
101
102 spin_unlock_bh(&octeon_dev->dispatch.lock);
103 return fn_arg;
104}
105
106u32 octeon_droq_check_hw_for_pkts(struct octeon_device *oct,
107 struct octeon_droq *droq)
108{
109 u32 pkt_count = 0;
110
111 pkt_count = readl(droq->pkts_sent_reg);
112 if (pkt_count) {
113 atomic_add(pkt_count, &droq->pkts_pending);
114 writel(pkt_count, droq->pkts_sent_reg);
115 }
116
117 return pkt_count;
118}
119
120static void octeon_droq_compute_max_packet_bufs(struct octeon_droq *droq)
121{
122 u32 count = 0;
123
124 /* max_empty_descs is the max. no. of descs that can have no buffers.
125 * If the empty desc count goes beyond this value, we cannot safely
126 * read in a 64K packet sent by Octeon
127 * (64K is max pkt size from Octeon)
128 */
129 droq->max_empty_descs = 0;
130
131 do {
132 droq->max_empty_descs++;
133 count += droq->buffer_size;
134 } while (count < (64 * 1024));
135
136 droq->max_empty_descs = droq->max_count - droq->max_empty_descs;
137}
138
139static void octeon_droq_reset_indices(struct octeon_droq *droq)
140{
141 droq->read_idx = 0;
142 droq->write_idx = 0;
143 droq->refill_idx = 0;
144 droq->refill_count = 0;
145 atomic_set(&droq->pkts_pending, 0);
146}
147
148static void
149octeon_droq_destroy_ring_buffers(struct octeon_device *oct,
150 struct octeon_droq *droq)
151{
152 u32 i;
153
154 for (i = 0; i < droq->max_count; i++) {
155 if (droq->recv_buf_list[i].buffer) {
156 if (droq->desc_ring) {
157 lio_unmap_ring_info(oct->pci_dev,
158 (u64)droq->
159 desc_ring[i].info_ptr,
160 OCT_DROQ_INFO_SIZE);
161 lio_unmap_ring(oct->pci_dev,
162 (u64)droq->desc_ring[i].
163 buffer_ptr,
164 droq->buffer_size);
165 }
166 recv_buffer_free(droq->recv_buf_list[i].buffer);
167 droq->recv_buf_list[i].buffer = NULL;
168 }
169 }
170
171 octeon_droq_reset_indices(droq);
172}
173
174static int
175octeon_droq_setup_ring_buffers(struct octeon_device *oct,
176 struct octeon_droq *droq)
177{
178 u32 i;
179 void *buf;
180 struct octeon_droq_desc *desc_ring = droq->desc_ring;
181
182 for (i = 0; i < droq->max_count; i++) {
183 buf = recv_buffer_alloc(oct, droq->q_no, droq->buffer_size);
184
185 if (!buf) {
186 dev_err(&oct->pci_dev->dev, "%s buffer alloc failed\n",
187 __func__);
188 return -ENOMEM;
189 }
190
191 droq->recv_buf_list[i].buffer = buf;
192 droq->recv_buf_list[i].data = get_rbd(buf);
193
194 droq->info_list[i].length = 0;
195
196 /* map ring buffers into memory */
197 desc_ring[i].info_ptr = lio_map_ring_info(droq, i);
198 desc_ring[i].buffer_ptr =
199 lio_map_ring(oct->pci_dev,
200 droq->recv_buf_list[i].buffer,
201 droq->buffer_size);
202 }
203
204 octeon_droq_reset_indices(droq);
205
206 octeon_droq_compute_max_packet_bufs(droq);
207
208 return 0;
209}
210
211int octeon_delete_droq(struct octeon_device *oct, u32 q_no)
212{
213 struct octeon_droq *droq = oct->droq[q_no];
214
215 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
216
217 octeon_droq_destroy_ring_buffers(oct, droq);
218
219 if (droq->recv_buf_list)
220 vfree(droq->recv_buf_list);
221
222 if (droq->info_base_addr)
223 cnnic_free_aligned_dma(oct->pci_dev, droq->info_list,
224 droq->info_alloc_size,
225 droq->info_base_addr,
226 droq->info_list_dma);
227
228 if (droq->desc_ring)
229 lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
230 droq->desc_ring, droq->desc_ring_dma);
231
232 memset(droq, 0, OCT_DROQ_SIZE);
233
234 return 0;
235}
236
237int octeon_init_droq(struct octeon_device *oct,
238 u32 q_no,
239 u32 num_descs,
240 u32 desc_size,
241 void *app_ctx)
242{
243 struct octeon_droq *droq;
244 u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0;
245 u32 c_pkts_per_intr = 0, c_refill_threshold = 0;
246
247 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
248
249 droq = oct->droq[q_no];
250 memset(droq, 0, OCT_DROQ_SIZE);
251
252 droq->oct_dev = oct;
253 droq->q_no = q_no;
254 if (app_ctx)
255 droq->app_ctx = app_ctx;
256 else
257 droq->app_ctx = (void *)(size_t)q_no;
258
259 c_num_descs = num_descs;
260 c_buf_size = desc_size;
261 if (OCTEON_CN6XXX(oct)) {
262 struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
263
264 c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x);
265 c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x);
266 }
267
268 droq->max_count = c_num_descs;
269 droq->buffer_size = c_buf_size;
270
271 desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE;
272 droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
273 (dma_addr_t *)&droq->desc_ring_dma);
274
275 if (!droq->desc_ring) {
276 dev_err(&oct->pci_dev->dev,
277 "Output queue %d ring alloc failed\n", q_no);
278 return 1;
279 }
280
281 dev_dbg(&oct->pci_dev->dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
282 q_no, droq->desc_ring, droq->desc_ring_dma);
283 dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no,
284 droq->max_count);
285
286 droq->info_list =
287 cnnic_alloc_aligned_dma(oct->pci_dev,
288 (droq->max_count * OCT_DROQ_INFO_SIZE),
289 &droq->info_alloc_size,
290 &droq->info_base_addr,
291 &droq->info_list_dma);
292
293 if (!droq->info_list) {
294 dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n");
295 lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
296 droq->desc_ring, droq->desc_ring_dma);
297 return 1;
298 }
299
300 droq->recv_buf_list = (struct octeon_recv_buffer *)
301 vmalloc(droq->max_count *
302 OCT_DROQ_RECVBUF_SIZE);
303 if (!droq->recv_buf_list) {
304 dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n");
305 goto init_droq_fail;
306 }
307
308 if (octeon_droq_setup_ring_buffers(oct, droq))
309 goto init_droq_fail;
310
311 droq->pkts_per_intr = c_pkts_per_intr;
312 droq->refill_threshold = c_refill_threshold;
313
314 dev_dbg(&oct->pci_dev->dev, "DROQ INIT: max_empty_descs: %d\n",
315 droq->max_empty_descs);
316
317 spin_lock_init(&droq->lock);
318
319 INIT_LIST_HEAD(&droq->dispatch_list);
320
321 /* For 56xx Pass1, this function won't be called, so no checks. */
322 oct->fn_list.setup_oq_regs(oct, q_no);
323
324 oct->io_qmask.oq |= (1 << q_no);
325
326 return 0;
327
328init_droq_fail:
329 octeon_delete_droq(oct, q_no);
330 return 1;
331}
332
333/* octeon_create_recv_info
334 * Parameters:
335 * octeon_dev - pointer to the octeon device structure
336 * droq - droq in which the packet arrived.
337 * buf_cnt - no. of buffers used by the packet.
338 * idx - index in the descriptor for the first buffer in the packet.
339 * Description:
340 * Allocates a recv_info_t and copies the buffer addresses for packet data
341 * into the recv_pkt space which starts at an 8B offset from recv_info_t.
342 * Flags the descriptors for refill later. If available descriptors go
343 * below the threshold to receive a 64K pkt, new buffers are first allocated
344 * before the recv_pkt_t is created.
345 * This routine will be called in interrupt context.
346 * Returns:
347 * Success: Pointer to recv_info_t
348 * Failure: NULL.
349 * Locks:
350 * The droq->lock is held when this routine is called.
351 */
352static inline struct octeon_recv_info *octeon_create_recv_info(
353 struct octeon_device *octeon_dev,
354 struct octeon_droq *droq,
355 u32 buf_cnt,
356 u32 idx)
357{
358 struct octeon_droq_info *info;
359 struct octeon_recv_pkt *recv_pkt;
360 struct octeon_recv_info *recv_info;
361 u32 i, bytes_left;
362
363 info = &droq->info_list[idx];
364
365 recv_info = octeon_alloc_recv_info(sizeof(struct __dispatch));
366 if (!recv_info)
367 return NULL;
368
369 recv_pkt = recv_info->recv_pkt;
370 recv_pkt->rh = info->rh;
371 recv_pkt->length = (u32)info->length;
372 recv_pkt->buffer_count = (u16)buf_cnt;
373 recv_pkt->octeon_id = (u16)octeon_dev->octeon_id;
374
375 i = 0;
376 bytes_left = (u32)info->length;
377
378 while (buf_cnt) {
379 lio_unmap_ring(octeon_dev->pci_dev,
380 (u64)droq->desc_ring[idx].buffer_ptr,
381 droq->buffer_size);
382
383 recv_pkt->buffer_size[i] =
384 (bytes_left >=
385 droq->buffer_size) ? droq->buffer_size : bytes_left;
386
387 recv_pkt->buffer_ptr[i] = droq->recv_buf_list[idx].buffer;
388 droq->recv_buf_list[idx].buffer = NULL;
389
390 INCR_INDEX_BY1(idx, droq->max_count);
391 bytes_left -= droq->buffer_size;
392 i++;
393 buf_cnt--;
394 }
395
396 return recv_info;
397}
398
399/* If we were not able to refill all buffers, try to move around
400 * the buffers that were not dispatched.
401 */
402static inline u32
403octeon_droq_refill_pullup_descs(struct octeon_droq *droq,
404 struct octeon_droq_desc *desc_ring)
405{
406 u32 desc_refilled = 0;
407
408 u32 refill_index = droq->refill_idx;
409
410 while (refill_index != droq->read_idx) {
411 if (droq->recv_buf_list[refill_index].buffer) {
412 droq->recv_buf_list[droq->refill_idx].buffer =
413 droq->recv_buf_list[refill_index].buffer;
414 droq->recv_buf_list[droq->refill_idx].data =
415 droq->recv_buf_list[refill_index].data;
416 desc_ring[droq->refill_idx].buffer_ptr =
417 desc_ring[refill_index].buffer_ptr;
418 droq->recv_buf_list[refill_index].buffer = NULL;
419 desc_ring[refill_index].buffer_ptr = 0;
420 do {
421 INCR_INDEX_BY1(droq->refill_idx,
422 droq->max_count);
423 desc_refilled++;
424 droq->refill_count--;
425 } while (droq->recv_buf_list[droq->refill_idx].
426 buffer);
427 }
428 INCR_INDEX_BY1(refill_index, droq->max_count);
429 } /* while */
430 return desc_refilled;
431}
432
433/* octeon_droq_refill
434 * Parameters:
435 * droq - droq in which descriptors require new buffers.
436 * Description:
437 * Called during normal DROQ processing in interrupt mode or by the poll
438 * thread to refill the descriptors from which buffers were dispatched
439 * to upper layers. Attempts to allocate new buffers. If that fails, moves
440 * up buffers (that were not dispatched) to form a contiguous ring.
441 * Returns:
442 * No of descriptors refilled.
443 * Locks:
444 * This routine is called with droq->lock held.
445 */
446static u32
447octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
448{
449 struct octeon_droq_desc *desc_ring;
450 void *buf = NULL;
451 u8 *data;
452 u32 desc_refilled = 0;
453
454 desc_ring = droq->desc_ring;
455
456 while (droq->refill_count && (desc_refilled < droq->max_count)) {
457 /* If a valid buffer exists (happens if there is no dispatch),
458 * reuse
459 * the buffer, else allocate.
460 */
461 if (!droq->recv_buf_list[droq->refill_idx].buffer) {
462 buf = recv_buffer_alloc(octeon_dev, droq->q_no,
463 droq->buffer_size);
464 /* If a buffer could not be allocated, no point in
465 * continuing
466 */
467 if (!buf)
468 break;
469 droq->recv_buf_list[droq->refill_idx].buffer =
470 buf;
471 data = get_rbd(buf);
472 } else {
473 data = get_rbd(droq->recv_buf_list
474 [droq->refill_idx].buffer);
475 }
476
477 droq->recv_buf_list[droq->refill_idx].data = data;
478
479 desc_ring[droq->refill_idx].buffer_ptr =
480 lio_map_ring(octeon_dev->pci_dev,
481 droq->recv_buf_list[droq->
482 refill_idx].buffer,
483 droq->buffer_size);
484
485 /* Reset any previous values in the length field. */
486 droq->info_list[droq->refill_idx].length = 0;
487
488 INCR_INDEX_BY1(droq->refill_idx, droq->max_count);
489 desc_refilled++;
490 droq->refill_count--;
491 }
492
493 if (droq->refill_count)
494 desc_refilled +=
495 octeon_droq_refill_pullup_descs(droq, desc_ring);
496
497 /* if droq->refill_count
498 * The refill count would not change in pass two. We only moved buffers
499 * to close the gap in the ring, but we would still have the same no. of
500 * buffers to refill.
501 */
502 return desc_refilled;
503}
504
505static inline u32
506octeon_droq_get_bufcount(u32 buf_size, u32 total_len)
507{
508 u32 buf_cnt = 0;
509
510 while (total_len > (buf_size * buf_cnt))
511 buf_cnt++;
512 return buf_cnt;
513}
514
515static int
516octeon_droq_dispatch_pkt(struct octeon_device *oct,
517 struct octeon_droq *droq,
518 union octeon_rh *rh,
519 struct octeon_droq_info *info)
520{
521 u32 cnt;
522 octeon_dispatch_fn_t disp_fn;
523 struct octeon_recv_info *rinfo;
524
525 cnt = octeon_droq_get_bufcount(droq->buffer_size, (u32)info->length);
526
527 disp_fn = octeon_get_dispatch(oct, (u16)rh->r.opcode,
528 (u16)rh->r.subcode);
529 if (disp_fn) {
530 rinfo = octeon_create_recv_info(oct, droq, cnt, droq->read_idx);
531 if (rinfo) {
532 struct __dispatch *rdisp = rinfo->rsvd;
533
534 rdisp->rinfo = rinfo;
535 rdisp->disp_fn = disp_fn;
536 rinfo->recv_pkt->rh = *rh;
537 list_add_tail(&rdisp->list,
538 &droq->dispatch_list);
539 } else {
540 droq->stats.dropped_nomem++;
541 }
542 } else {
543 dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function\n");
544 droq->stats.dropped_nodispatch++;
545 } /* else (dispatch_fn ... */
546
547 return cnt;
548}
549
550static inline void octeon_droq_drop_packets(struct octeon_device *oct,
551 struct octeon_droq *droq,
552 u32 cnt)
553{
554 u32 i = 0, buf_cnt;
555 struct octeon_droq_info *info;
556
557 for (i = 0; i < cnt; i++) {
558 info = &droq->info_list[droq->read_idx];
559 octeon_swap_8B_data((u64 *)info, 2);
560
561 if (info->length) {
562 info->length -= OCT_RH_SIZE;
563 droq->stats.bytes_received += info->length;
564 buf_cnt = octeon_droq_get_bufcount(droq->buffer_size,
565 (u32)info->length);
566 } else {
567 dev_err(&oct->pci_dev->dev, "DROQ: In drop: pkt with len 0\n");
568 buf_cnt = 1;
569 }
570
571 INCR_INDEX(droq->read_idx, buf_cnt, droq->max_count);
572 droq->refill_count += buf_cnt;
573 }
574}
575
576static u32
577octeon_droq_fast_process_packets(struct octeon_device *oct,
578 struct octeon_droq *droq,
579 u32 pkts_to_process)
580{
581 struct octeon_droq_info *info;
582 union octeon_rh *rh;
583 u32 pkt, total_len = 0, pkt_count;
584
585 pkt_count = pkts_to_process;
586
587 for (pkt = 0; pkt < pkt_count; pkt++) {
588 u32 pkt_len = 0;
589 struct sk_buff *nicbuf = NULL;
590
591 info = &droq->info_list[droq->read_idx];
592 octeon_swap_8B_data((u64 *)info, 2);
593
594 if (!info->length) {
595 dev_err(&oct->pci_dev->dev,
596 "DROQ[%d] idx: %d len:0, pkt_cnt: %d\n",
597 droq->q_no, droq->read_idx, pkt_count);
598 print_hex_dump_bytes("", DUMP_PREFIX_ADDRESS,
599 (u8 *)info,
600 OCT_DROQ_INFO_SIZE);
601 break;
602 }
603
604 /* Len of resp hdr in included in the received data len. */
605 info->length -= OCT_RH_SIZE;
606 rh = &info->rh;
607
608 total_len += (u32)info->length;
609
610 if (OPCODE_SLOW_PATH(rh)) {
611 u32 buf_cnt;
612
613 buf_cnt = octeon_droq_dispatch_pkt(oct, droq, rh, info);
614 INCR_INDEX(droq->read_idx, buf_cnt, droq->max_count);
615 droq->refill_count += buf_cnt;
616 } else {
617 if (info->length <= droq->buffer_size) {
618 lio_unmap_ring(oct->pci_dev,
619 (u64)droq->desc_ring[
620 droq->read_idx].buffer_ptr,
621 droq->buffer_size);
622 pkt_len = (u32)info->length;
623 nicbuf = droq->recv_buf_list[
624 droq->read_idx].buffer;
625 droq->recv_buf_list[droq->read_idx].buffer =
626 NULL;
627 INCR_INDEX_BY1(droq->read_idx, droq->max_count);
628 skb_put(nicbuf, pkt_len);
629 droq->refill_count++;
630 } else {
631 nicbuf = octeon_fast_packet_alloc(oct, droq,
632 droq->q_no,
633 (u32)
634 info->length);
635 pkt_len = 0;
636 /* nicbuf allocation can fail. We'll handle it
637 * inside the loop.
638 */
639 while (pkt_len < info->length) {
640 int cpy_len;
641
642 cpy_len = ((pkt_len +
643 droq->buffer_size) >
644 info->length) ?
645 ((u32)info->length - pkt_len) :
646 droq->buffer_size;
647
648 if (nicbuf) {
649 lio_unmap_ring(oct->pci_dev,
650 (u64)
651 droq->desc_ring
652 [droq->read_idx].
653 buffer_ptr,
654 droq->
655 buffer_size);
656 octeon_fast_packet_next(droq,
657 nicbuf,
658 cpy_len,
659 droq->
660 read_idx
661 );
662 }
663
664 pkt_len += cpy_len;
665 INCR_INDEX_BY1(droq->read_idx,
666 droq->max_count);
667 droq->refill_count++;
668 }
669 }
670
671 if (nicbuf) {
672 if (droq->ops.fptr)
673 droq->ops.fptr(oct->octeon_id,
674 nicbuf, pkt_len,
675 rh, &droq->napi);
676 else
677 recv_buffer_free(nicbuf);
678 }
679 }
680
681 if (droq->refill_count >= droq->refill_threshold) {
682 int desc_refilled = octeon_droq_refill(oct, droq);
683
684 /* Flush the droq descriptor data to memory to be sure
685 * that when we update the credits the data in memory
686 * is accurate.
687 */
688 wmb();
689 writel((desc_refilled), droq->pkts_credit_reg);
690 /* make sure mmio write completes */
691 mmiowb();
692 }
693
694 } /* for ( each packet )... */
695
696 /* Increment refill_count by the number of buffers processed. */
697 droq->stats.pkts_received += pkt;
698 droq->stats.bytes_received += total_len;
699
700 if ((droq->ops.drop_on_max) && (pkts_to_process - pkt)) {
701 octeon_droq_drop_packets(oct, droq, (pkts_to_process - pkt));
702
703 droq->stats.dropped_toomany += (pkts_to_process - pkt);
704 return pkts_to_process;
705 }
706
707 return pkt;
708}
709
710int
711octeon_droq_process_packets(struct octeon_device *oct,
712 struct octeon_droq *droq,
713 u32 budget)
714{
715 u32 pkt_count = 0, pkts_processed = 0;
716 struct list_head *tmp, *tmp2;
717
718 pkt_count = atomic_read(&droq->pkts_pending);
719 if (!pkt_count)
720 return 0;
721
722 if (pkt_count > budget)
723 pkt_count = budget;
724
725 /* Grab the lock */
726 spin_lock(&droq->lock);
727
728 pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count);
729
730 atomic_sub(pkts_processed, &droq->pkts_pending);
731
732 /* Release the spin lock */
733 spin_unlock(&droq->lock);
734
735 list_for_each_safe(tmp, tmp2, &droq->dispatch_list) {
736 struct __dispatch *rdisp = (struct __dispatch *)tmp;
737
738 list_del(tmp);
739 rdisp->disp_fn(rdisp->rinfo,
740 octeon_get_dispatch_arg
741 (oct,
742 (u16)rdisp->rinfo->recv_pkt->rh.r.opcode,
743 (u16)rdisp->rinfo->recv_pkt->rh.r.subcode));
744 }
745
746 /* If there are packets pending. schedule tasklet again */
747 if (atomic_read(&droq->pkts_pending))
748 return 1;
749
750 return 0;
751}
752
753/**
754 * Utility function to poll for packets. check_hw_for_packets must be
755 * called before calling this routine.
756 */
757
758static int
759octeon_droq_process_poll_pkts(struct octeon_device *oct,
760 struct octeon_droq *droq, u32 budget)
761{
762 struct list_head *tmp, *tmp2;
763 u32 pkts_available = 0, pkts_processed = 0;
764 u32 total_pkts_processed = 0;
765
766 if (budget > droq->max_count)
767 budget = droq->max_count;
768
769 spin_lock(&droq->lock);
770
771 while (total_pkts_processed < budget) {
772 pkts_available =
773 CVM_MIN((budget - total_pkts_processed),
774 (u32)(atomic_read(&droq->pkts_pending)));
775
776 if (pkts_available == 0)
777 break;
778
779 pkts_processed =
780 octeon_droq_fast_process_packets(oct, droq,
781 pkts_available);
782
783 atomic_sub(pkts_processed, &droq->pkts_pending);
784
785 total_pkts_processed += pkts_processed;
786
787 octeon_droq_check_hw_for_pkts(oct, droq);
788 }
789
790 spin_unlock(&droq->lock);
791
792 list_for_each_safe(tmp, tmp2, &droq->dispatch_list) {
793 struct __dispatch *rdisp = (struct __dispatch *)tmp;
794
795 list_del(tmp);
796 rdisp->disp_fn(rdisp->rinfo,
797 octeon_get_dispatch_arg
798 (oct,
799 (u16)rdisp->rinfo->recv_pkt->rh.r.opcode,
800 (u16)rdisp->rinfo->recv_pkt->rh.r.subcode));
801 }
802
803 return total_pkts_processed;
804}
805
806int
807octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, int cmd,
808 u32 arg)
809{
810 struct octeon_droq *droq;
811 struct octeon_config *oct_cfg = NULL;
812
813 oct_cfg = octeon_get_conf(oct);
814
815 if (!oct_cfg)
816 return -EINVAL;
817
818 if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
819 dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
820 __func__, q_no, (oct->num_oqs - 1));
821 return -EINVAL;
822 }
823
824 droq = oct->droq[q_no];
825
826 if (cmd == POLL_EVENT_PROCESS_PKTS)
827 return octeon_droq_process_poll_pkts(oct, droq, arg);
828
829 if (cmd == POLL_EVENT_PENDING_PKTS) {
830 u32 pkt_cnt = atomic_read(&droq->pkts_pending);
831
832 return octeon_droq_process_packets(oct, droq, pkt_cnt);
833 }
834
835 if (cmd == POLL_EVENT_ENABLE_INTR) {
836 u32 value;
837 unsigned long flags;
838
839 /* Enable Pkt Interrupt */
840 switch (oct->chip_id) {
841 case OCTEON_CN66XX:
842 case OCTEON_CN68XX: {
843 struct octeon_cn6xxx *cn6xxx =
844 (struct octeon_cn6xxx *)oct->chip;
845 spin_lock_irqsave
846 (&cn6xxx->lock_for_droq_int_enb_reg, flags);
847 value =
848 octeon_read_csr(oct,
849 CN6XXX_SLI_PKT_TIME_INT_ENB);
850 value |= (1 << q_no);
851 octeon_write_csr(oct,
852 CN6XXX_SLI_PKT_TIME_INT_ENB,
853 value);
854 value =
855 octeon_read_csr(oct,
856 CN6XXX_SLI_PKT_CNT_INT_ENB);
857 value |= (1 << q_no);
858 octeon_write_csr(oct,
859 CN6XXX_SLI_PKT_CNT_INT_ENB,
860 value);
861
862 /* don't bother flushing the enables */
863
864 spin_unlock_irqrestore
865 (&cn6xxx->lock_for_droq_int_enb_reg, flags);
866 return 0;
867 }
868 break;
869 }
870
871 return 0;
872 }
873
874 dev_err(&oct->pci_dev->dev, "%s Unknown command: %d\n", __func__, cmd);
875 return -EINVAL;
876}
877
878int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no,
879 struct octeon_droq_ops *ops)
880{
881 struct octeon_droq *droq;
882 unsigned long flags;
883 struct octeon_config *oct_cfg = NULL;
884
885 oct_cfg = octeon_get_conf(oct);
886
887 if (!oct_cfg)
888 return -EINVAL;
889
890 if (!(ops)) {
891 dev_err(&oct->pci_dev->dev, "%s: droq_ops pointer is NULL\n",
892 __func__);
893 return -EINVAL;
894 }
895
896 if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
897 dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
898 __func__, q_no, (oct->num_oqs - 1));
899 return -EINVAL;
900 }
901
902 droq = oct->droq[q_no];
903
904 spin_lock_irqsave(&droq->lock, flags);
905
906 memcpy(&droq->ops, ops, sizeof(struct octeon_droq_ops));
907
908 spin_unlock_irqrestore(&droq->lock, flags);
909
910 return 0;
911}
912
913int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no)
914{
915 unsigned long flags;
916 struct octeon_droq *droq;
917 struct octeon_config *oct_cfg = NULL;
918
919 oct_cfg = octeon_get_conf(oct);
920
921 if (!oct_cfg)
922 return -EINVAL;
923
924 if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
925 dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
926 __func__, q_no, oct->num_oqs - 1);
927 return -EINVAL;
928 }
929
930 droq = oct->droq[q_no];
931
932 if (!droq) {
933 dev_info(&oct->pci_dev->dev,
934 "Droq id (%d) not available.\n", q_no);
935 return 0;
936 }
937
938 spin_lock_irqsave(&droq->lock, flags);
939
940 droq->ops.fptr = NULL;
941 droq->ops.drop_on_max = 0;
942
943 spin_unlock_irqrestore(&droq->lock, flags);
944
945 return 0;
946}
947
948int octeon_create_droq(struct octeon_device *oct,
949 u32 q_no, u32 num_descs,
950 u32 desc_size, void *app_ctx)
951{
952 struct octeon_droq *droq;
953
954 if (oct->droq[q_no]) {
955 dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n",
956 q_no);
957 return 1;
958 }
959
960 /* Allocate the DS for the new droq. */
961 droq = vmalloc(sizeof(*droq));
962 if (!droq)
963 goto create_droq_fail;
964 memset(droq, 0, sizeof(struct octeon_droq));
965
966 /*Disable the pkt o/p for this Q */
967 octeon_set_droq_pkt_op(oct, q_no, 0);
968 oct->droq[q_no] = droq;
969
970 /* Initialize the Droq */
971 octeon_init_droq(oct, q_no, num_descs, desc_size, app_ctx);
972
973 oct->num_oqs++;
974
975 dev_dbg(&oct->pci_dev->dev, "%s: Total number of OQ: %d\n", __func__,
976 oct->num_oqs);
977
978 /* Global Droq register settings */
979
980 /* As of now not required, as setting are done for all 32 Droqs at
981 * the same time.
982 */
983 return 0;
984
985create_droq_fail:
986 octeon_delete_droq(oct, q_no);
987 return -1;
988}