blob: 8386acc1d37d2ed8aa5d2a3368ab42fe02c0c5df [file] [log] [blame]
Claudiu Manoilc535e922016-09-22 18:04:09 +03001/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
17 * later version.
18 *
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include "qman_priv.h"
32
33#define DQRR_MAXFILL 15
34#define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
35#define IRQNAME "QMan portal %d"
36#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
37#define QMAN_POLL_LIMIT 32
38#define QMAN_PIRQ_DQRR_ITHRESH 12
39#define QMAN_PIRQ_MR_ITHRESH 4
40#define QMAN_PIRQ_IPERIOD 100
41
42/* Portal register assists */
43
44/* Cache-inhibited register offsets */
45#define QM_REG_EQCR_PI_CINH 0x0000
46#define QM_REG_EQCR_CI_CINH 0x0004
47#define QM_REG_EQCR_ITR 0x0008
48#define QM_REG_DQRR_PI_CINH 0x0040
49#define QM_REG_DQRR_CI_CINH 0x0044
50#define QM_REG_DQRR_ITR 0x0048
51#define QM_REG_DQRR_DCAP 0x0050
52#define QM_REG_DQRR_SDQCR 0x0054
53#define QM_REG_DQRR_VDQCR 0x0058
54#define QM_REG_DQRR_PDQCR 0x005c
55#define QM_REG_MR_PI_CINH 0x0080
56#define QM_REG_MR_CI_CINH 0x0084
57#define QM_REG_MR_ITR 0x0088
58#define QM_REG_CFG 0x0100
59#define QM_REG_ISR 0x0e00
60#define QM_REG_IER 0x0e04
61#define QM_REG_ISDR 0x0e08
62#define QM_REG_IIR 0x0e0c
63#define QM_REG_ITPR 0x0e14
64
65/* Cache-enabled register offsets */
66#define QM_CL_EQCR 0x0000
67#define QM_CL_DQRR 0x1000
68#define QM_CL_MR 0x2000
69#define QM_CL_EQCR_PI_CENA 0x3000
70#define QM_CL_EQCR_CI_CENA 0x3100
71#define QM_CL_DQRR_PI_CENA 0x3200
72#define QM_CL_DQRR_CI_CENA 0x3300
73#define QM_CL_MR_PI_CENA 0x3400
74#define QM_CL_MR_CI_CENA 0x3500
75#define QM_CL_CR 0x3800
76#define QM_CL_RR0 0x3900
77#define QM_CL_RR1 0x3940
78
79/*
80 * BTW, the drivers (and h/w programming model) already obtain the required
81 * synchronisation for portal accesses and data-dependencies. Use of barrier()s
82 * or other order-preserving primitives simply degrade performance. Hence the
83 * use of the __raw_*() interfaces, which simply ensure that the compiler treats
84 * the portal registers as volatile
85 */
86
87/* Cache-enabled ring access */
88#define qm_cl(base, idx) ((void *)base + ((idx) << 6))
89
90/*
91 * Portal modes.
92 * Enum types;
93 * pmode == production mode
94 * cmode == consumption mode,
95 * dmode == h/w dequeue mode.
96 * Enum values use 3 letter codes. First letter matches the portal mode,
97 * remaining two letters indicate;
98 * ci == cache-inhibited portal register
99 * ce == cache-enabled portal register
100 * vb == in-band valid-bit (cache-enabled)
101 * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
102 * As for "enum qm_dqrr_dmode", it should be self-explanatory.
103 */
104enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */
105 qm_eqcr_pci = 0, /* PI index, cache-inhibited */
106 qm_eqcr_pce = 1, /* PI index, cache-enabled */
107 qm_eqcr_pvb = 2 /* valid-bit */
108};
109enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */
110 qm_dqrr_dpush = 0, /* SDQCR + VDQCR */
111 qm_dqrr_dpull = 1 /* PDQCR */
112};
113enum qm_dqrr_pmode { /* s/w-only */
114 qm_dqrr_pci, /* reads DQRR_PI_CINH */
115 qm_dqrr_pce, /* reads DQRR_PI_CENA */
116 qm_dqrr_pvb /* reads valid-bit */
117};
118enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */
119 qm_dqrr_cci = 0, /* CI index, cache-inhibited */
120 qm_dqrr_cce = 1, /* CI index, cache-enabled */
121 qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */
122};
123enum qm_mr_pmode { /* s/w-only */
124 qm_mr_pci, /* reads MR_PI_CINH */
125 qm_mr_pce, /* reads MR_PI_CENA */
126 qm_mr_pvb /* reads valid-bit */
127};
128enum qm_mr_cmode { /* matches QCSP_CFG::MM */
129 qm_mr_cci = 0, /* CI index, cache-inhibited */
130 qm_mr_cce = 1 /* CI index, cache-enabled */
131};
132
133/* --- Portal structures --- */
134
135#define QM_EQCR_SIZE 8
136#define QM_DQRR_SIZE 16
137#define QM_MR_SIZE 8
138
139/* "Enqueue Command" */
140struct qm_eqcr_entry {
141 u8 _ncw_verb; /* writes to this are non-coherent */
142 u8 dca;
143 u16 seqnum;
Claudiu Manoilb5399452016-11-16 16:40:26 +0200144 u8 __reserved[4];
Claudiu Manoilc535e922016-09-22 18:04:09 +0300145 u32 fqid; /* 24-bit */
146 u32 tag;
147 struct qm_fd fd;
148 u8 __reserved3[32];
149} __packed;
150#define QM_EQCR_VERB_VBIT 0x80
151#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
152#define QM_EQCR_VERB_CMD_ENQUEUE 0x01
153#define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */
154#define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */
155#define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */
156
157struct qm_eqcr {
158 struct qm_eqcr_entry *ring, *cursor;
159 u8 ci, available, ithresh, vbit;
160#ifdef CONFIG_FSL_DPAA_CHECKING
161 u32 busy;
162 enum qm_eqcr_pmode pmode;
163#endif
164};
165
166struct qm_dqrr {
167 const struct qm_dqrr_entry *ring, *cursor;
168 u8 pi, ci, fill, ithresh, vbit;
169#ifdef CONFIG_FSL_DPAA_CHECKING
170 enum qm_dqrr_dmode dmode;
171 enum qm_dqrr_pmode pmode;
172 enum qm_dqrr_cmode cmode;
173#endif
174};
175
176struct qm_mr {
177 union qm_mr_entry *ring, *cursor;
178 u8 pi, ci, fill, ithresh, vbit;
179#ifdef CONFIG_FSL_DPAA_CHECKING
180 enum qm_mr_pmode pmode;
181 enum qm_mr_cmode cmode;
182#endif
183};
184
185/* MC (Management Command) command */
Claudiu Manoil7ff07da2016-11-16 16:40:24 +0200186/* "FQ" command layout */
187struct qm_mcc_fq {
Claudiu Manoilc535e922016-09-22 18:04:09 +0300188 u8 _ncw_verb;
189 u8 __reserved1[3];
190 u32 fqid; /* 24-bit */
191 u8 __reserved2[56];
192} __packed;
Claudiu Manoilc535e922016-09-22 18:04:09 +0300193
Claudiu Manoil7ff07da2016-11-16 16:40:24 +0200194/* "CGR" command layout */
195struct qm_mcc_cgr {
Claudiu Manoilc535e922016-09-22 18:04:09 +0300196 u8 _ncw_verb;
197 u8 __reserved1[30];
198 u8 cgid;
199 u8 __reserved2[32];
200};
201
Claudiu Manoilc535e922016-09-22 18:04:09 +0300202#define QM_MCC_VERB_VBIT 0x80
203#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
204#define QM_MCC_VERB_INITFQ_PARKED 0x40
205#define QM_MCC_VERB_INITFQ_SCHED 0x41
206#define QM_MCC_VERB_QUERYFQ 0x44
207#define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */
208#define QM_MCC_VERB_QUERYWQ 0x46
209#define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47
210#define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */
211#define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */
212#define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */
213#define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */
214#define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */
215#define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */
216#define QM_MCC_VERB_INITCGR 0x50
217#define QM_MCC_VERB_MODIFYCGR 0x51
218#define QM_MCC_VERB_CGRTESTWRITE 0x52
219#define QM_MCC_VERB_QUERYCGR 0x58
220#define QM_MCC_VERB_QUERYCONGESTION 0x59
221union qm_mc_command {
222 struct {
223 u8 _ncw_verb; /* writes to this are non-coherent */
224 u8 __reserved[63];
225 };
226 struct qm_mcc_initfq initfq;
Claudiu Manoilc535e922016-09-22 18:04:09 +0300227 struct qm_mcc_initcgr initcgr;
Claudiu Manoil7ff07da2016-11-16 16:40:24 +0200228 struct qm_mcc_fq fq;
229 struct qm_mcc_cgr cgr;
Claudiu Manoilc535e922016-09-22 18:04:09 +0300230};
231
232/* MC (Management Command) result */
233/* "Query FQ" */
234struct qm_mcr_queryfq {
235 u8 verb;
236 u8 result;
237 u8 __reserved1[8];
238 struct qm_fqd fqd; /* the FQD fields are here */
239 u8 __reserved2[30];
240} __packed;
241
242/* "Alter FQ State Commands" */
243struct qm_mcr_alterfq {
244 u8 verb;
245 u8 result;
246 u8 fqs; /* Frame Queue Status */
247 u8 __reserved1[61];
248};
249#define QM_MCR_VERB_RRID 0x80
250#define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
251#define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
252#define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
253#define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
254#define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
255#define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
256#define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
257#define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
258#define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
259#define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
260#define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
261#define QM_MCR_RESULT_NULL 0x00
262#define QM_MCR_RESULT_OK 0xf0
263#define QM_MCR_RESULT_ERR_FQID 0xf1
264#define QM_MCR_RESULT_ERR_FQSTATE 0xf2
265#define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */
266#define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
267#define QM_MCR_RESULT_PENDING 0xf8
268#define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
269#define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
270#define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
271#define QM_MCR_TIMEOUT 10000 /* us */
272union qm_mc_result {
273 struct {
274 u8 verb;
275 u8 result;
276 u8 __reserved1[62];
277 };
278 struct qm_mcr_queryfq queryfq;
279 struct qm_mcr_alterfq alterfq;
280 struct qm_mcr_querycgr querycgr;
281 struct qm_mcr_querycongestion querycongestion;
282 struct qm_mcr_querywq querywq;
283 struct qm_mcr_queryfq_np queryfq_np;
284};
285
286struct qm_mc {
287 union qm_mc_command *cr;
288 union qm_mc_result *rr;
289 u8 rridx, vbit;
290#ifdef CONFIG_FSL_DPAA_CHECKING
291 enum {
292 /* Can be _mc_start()ed */
293 qman_mc_idle,
294 /* Can be _mc_commit()ed or _mc_abort()ed */
295 qman_mc_user,
296 /* Can only be _mc_retry()ed */
297 qman_mc_hw
298 } state;
299#endif
300};
301
302struct qm_addr {
303 void __iomem *ce; /* cache-enabled */
304 void __iomem *ci; /* cache-inhibited */
305};
306
307struct qm_portal {
308 /*
309 * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to
310 * and including 'mc' fits within a cacheline (yay!). The 'config' part
311 * is setup-only, so isn't a cause for a concern. In other words, don't
312 * rearrange this structure on a whim, there be dragons ...
313 */
314 struct qm_addr addr;
315 struct qm_eqcr eqcr;
316 struct qm_dqrr dqrr;
317 struct qm_mr mr;
318 struct qm_mc mc;
319} ____cacheline_aligned;
320
321/* Cache-inhibited register access. */
322static inline u32 qm_in(struct qm_portal *p, u32 offset)
323{
Claudiu Manoil5a42f1e2016-11-16 16:40:27 +0200324 return be32_to_cpu(__raw_readl(p->addr.ci + offset));
Claudiu Manoilc535e922016-09-22 18:04:09 +0300325}
326
327static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
328{
Claudiu Manoil5a42f1e2016-11-16 16:40:27 +0200329 __raw_writel(cpu_to_be32(val), p->addr.ci + offset);
Claudiu Manoilc535e922016-09-22 18:04:09 +0300330}
331
332/* Cache Enabled Portal Access */
333static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset)
334{
335 dpaa_invalidate(p->addr.ce + offset);
336}
337
338static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
339{
340 dpaa_touch_ro(p->addr.ce + offset);
341}
342
343static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
344{
Claudiu Manoil5a42f1e2016-11-16 16:40:27 +0200345 return be32_to_cpu(__raw_readl(p->addr.ce + offset));
Claudiu Manoilc535e922016-09-22 18:04:09 +0300346}
347
348/* --- EQCR API --- */
349
350#define EQCR_SHIFT ilog2(sizeof(struct qm_eqcr_entry))
351#define EQCR_CARRY (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT)
352
353/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
354static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p)
355{
356 uintptr_t addr = (uintptr_t)p;
357
358 addr &= ~EQCR_CARRY;
359
360 return (struct qm_eqcr_entry *)addr;
361}
362
363/* Bit-wise logic to convert a ring pointer to a ring index */
364static int eqcr_ptr2idx(struct qm_eqcr_entry *e)
365{
366 return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1);
367}
368
369/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
370static inline void eqcr_inc(struct qm_eqcr *eqcr)
371{
372 /* increment to the next EQCR pointer and handle overflow and 'vbit' */
373 struct qm_eqcr_entry *partial = eqcr->cursor + 1;
374
375 eqcr->cursor = eqcr_carryclear(partial);
376 if (partial != eqcr->cursor)
377 eqcr->vbit ^= QM_EQCR_VERB_VBIT;
378}
379
380static inline int qm_eqcr_init(struct qm_portal *portal,
381 enum qm_eqcr_pmode pmode,
382 unsigned int eq_stash_thresh,
383 int eq_stash_prio)
384{
385 struct qm_eqcr *eqcr = &portal->eqcr;
386 u32 cfg;
387 u8 pi;
388
389 eqcr->ring = portal->addr.ce + QM_CL_EQCR;
390 eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
391 qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
392 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
393 eqcr->cursor = eqcr->ring + pi;
394 eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ?
395 QM_EQCR_VERB_VBIT : 0;
396 eqcr->available = QM_EQCR_SIZE - 1 -
397 dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
398 eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR);
399#ifdef CONFIG_FSL_DPAA_CHECKING
400 eqcr->busy = 0;
401 eqcr->pmode = pmode;
402#endif
403 cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) |
404 (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
405 (eq_stash_prio << 26) | /* QCSP_CFG: EP */
406 ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
407 qm_out(portal, QM_REG_CFG, cfg);
408 return 0;
409}
410
411static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal)
412{
413 return (qm_in(portal, QM_REG_CFG) >> 28) & 0x7;
414}
415
416static inline void qm_eqcr_finish(struct qm_portal *portal)
417{
418 struct qm_eqcr *eqcr = &portal->eqcr;
419 u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
420 u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
421
422 DPAA_ASSERT(!eqcr->busy);
423 if (pi != eqcr_ptr2idx(eqcr->cursor))
Colin Ian King57907a72016-11-12 17:01:30 +0000424 pr_crit("losing uncommitted EQCR entries\n");
Claudiu Manoilc535e922016-09-22 18:04:09 +0300425 if (ci != eqcr->ci)
426 pr_crit("missing existing EQCR completions\n");
427 if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
428 pr_crit("EQCR destroyed unquiesced\n");
429}
430
431static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
432 *portal)
433{
434 struct qm_eqcr *eqcr = &portal->eqcr;
435
436 DPAA_ASSERT(!eqcr->busy);
437 if (!eqcr->available)
438 return NULL;
439
440#ifdef CONFIG_FSL_DPAA_CHECKING
441 eqcr->busy = 1;
442#endif
443 dpaa_zero(eqcr->cursor);
444 return eqcr->cursor;
445}
446
447static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
448 *portal)
449{
450 struct qm_eqcr *eqcr = &portal->eqcr;
451 u8 diff, old_ci;
452
453 DPAA_ASSERT(!eqcr->busy);
454 if (!eqcr->available) {
455 old_ci = eqcr->ci;
456 eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) &
457 (QM_EQCR_SIZE - 1);
458 diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
459 eqcr->available += diff;
460 if (!diff)
461 return NULL;
462 }
463#ifdef CONFIG_FSL_DPAA_CHECKING
464 eqcr->busy = 1;
465#endif
466 dpaa_zero(eqcr->cursor);
467 return eqcr->cursor;
468}
469
470static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
471{
472 DPAA_ASSERT(eqcr->busy);
Claudiu Manoild6753c72016-11-16 16:40:25 +0200473 DPAA_ASSERT(!(eqcr->cursor->fqid & ~QM_FQID_MASK));
Claudiu Manoilc535e922016-09-22 18:04:09 +0300474 DPAA_ASSERT(eqcr->available >= 1);
475}
476
477static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
478{
479 struct qm_eqcr *eqcr = &portal->eqcr;
480 struct qm_eqcr_entry *eqcursor;
481
482 eqcr_commit_checks(eqcr);
483 DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
484 dma_wmb();
485 eqcursor = eqcr->cursor;
486 eqcursor->_ncw_verb = myverb | eqcr->vbit;
487 dpaa_flush(eqcursor);
488 eqcr_inc(eqcr);
489 eqcr->available--;
490#ifdef CONFIG_FSL_DPAA_CHECKING
491 eqcr->busy = 0;
492#endif
493}
494
495static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
496{
497 qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA);
498}
499
500static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
501{
502 struct qm_eqcr *eqcr = &portal->eqcr;
503 u8 diff, old_ci = eqcr->ci;
504
505 eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1);
506 qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
507 diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
508 eqcr->available += diff;
509 return diff;
510}
511
512static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
513{
514 struct qm_eqcr *eqcr = &portal->eqcr;
515
516 eqcr->ithresh = ithresh;
517 qm_out(portal, QM_REG_EQCR_ITR, ithresh);
518}
519
520static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
521{
522 struct qm_eqcr *eqcr = &portal->eqcr;
523
524 return eqcr->available;
525}
526
527static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
528{
529 struct qm_eqcr *eqcr = &portal->eqcr;
530
531 return QM_EQCR_SIZE - 1 - eqcr->available;
532}
533
534/* --- DQRR API --- */
535
536#define DQRR_SHIFT ilog2(sizeof(struct qm_dqrr_entry))
537#define DQRR_CARRY (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT)
538
539static const struct qm_dqrr_entry *dqrr_carryclear(
540 const struct qm_dqrr_entry *p)
541{
542 uintptr_t addr = (uintptr_t)p;
543
544 addr &= ~DQRR_CARRY;
545
546 return (const struct qm_dqrr_entry *)addr;
547}
548
549static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e)
550{
551 return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1);
552}
553
554static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e)
555{
556 return dqrr_carryclear(e + 1);
557}
558
559static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
560{
561 qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) |
562 ((mf & (QM_DQRR_SIZE - 1)) << 20));
563}
564
565static inline int qm_dqrr_init(struct qm_portal *portal,
566 const struct qm_portal_config *config,
567 enum qm_dqrr_dmode dmode,
568 enum qm_dqrr_pmode pmode,
569 enum qm_dqrr_cmode cmode, u8 max_fill)
570{
571 struct qm_dqrr *dqrr = &portal->dqrr;
572 u32 cfg;
573
574 /* Make sure the DQRR will be idle when we enable */
575 qm_out(portal, QM_REG_DQRR_SDQCR, 0);
576 qm_out(portal, QM_REG_DQRR_VDQCR, 0);
577 qm_out(portal, QM_REG_DQRR_PDQCR, 0);
578 dqrr->ring = portal->addr.ce + QM_CL_DQRR;
579 dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
580 dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
581 dqrr->cursor = dqrr->ring + dqrr->ci;
582 dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
583 dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ?
584 QM_DQRR_VERB_VBIT : 0;
585 dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR);
586#ifdef CONFIG_FSL_DPAA_CHECKING
587 dqrr->dmode = dmode;
588 dqrr->pmode = pmode;
589 dqrr->cmode = cmode;
590#endif
591 /* Invalidate every ring entry before beginning */
592 for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
593 dpaa_invalidate(qm_cl(dqrr->ring, cfg));
594 cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) |
595 ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
596 ((dmode & 1) << 18) | /* DP */
597 ((cmode & 3) << 16) | /* DCM */
598 0xa0 | /* RE+SE */
599 (0 ? 0x40 : 0) | /* Ignore RP */
600 (0 ? 0x10 : 0); /* Ignore SP */
601 qm_out(portal, QM_REG_CFG, cfg);
602 qm_dqrr_set_maxfill(portal, max_fill);
603 return 0;
604}
605
606static inline void qm_dqrr_finish(struct qm_portal *portal)
607{
608#ifdef CONFIG_FSL_DPAA_CHECKING
609 struct qm_dqrr *dqrr = &portal->dqrr;
610
611 if (dqrr->cmode != qm_dqrr_cdc &&
612 dqrr->ci != dqrr_ptr2idx(dqrr->cursor))
613 pr_crit("Ignoring completed DQRR entries\n");
614#endif
615}
616
617static inline const struct qm_dqrr_entry *qm_dqrr_current(
618 struct qm_portal *portal)
619{
620 struct qm_dqrr *dqrr = &portal->dqrr;
621
622 if (!dqrr->fill)
623 return NULL;
624 return dqrr->cursor;
625}
626
627static inline u8 qm_dqrr_next(struct qm_portal *portal)
628{
629 struct qm_dqrr *dqrr = &portal->dqrr;
630
631 DPAA_ASSERT(dqrr->fill);
632 dqrr->cursor = dqrr_inc(dqrr->cursor);
633 return --dqrr->fill;
634}
635
636static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
637{
638 struct qm_dqrr *dqrr = &portal->dqrr;
639 struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
640
641 DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
642#ifndef CONFIG_FSL_PAMU
643 /*
644 * If PAMU is not available we need to invalidate the cache.
645 * When PAMU is available the cache is updated by stash
646 */
647 dpaa_invalidate_touch_ro(res);
648#endif
649 /*
650 * when accessing 'verb', use __raw_readb() to ensure that compiler
651 * inlining doesn't try to optimise out "excess reads".
652 */
653 if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
654 dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
655 if (!dqrr->pi)
656 dqrr->vbit ^= QM_DQRR_VERB_VBIT;
657 dqrr->fill++;
658 }
659}
660
661static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
662 const struct qm_dqrr_entry *dq,
663 int park)
664{
665 __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
666 int idx = dqrr_ptr2idx(dq);
667
668 DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
669 DPAA_ASSERT((dqrr->ring + idx) == dq);
670 DPAA_ASSERT(idx < QM_DQRR_SIZE);
671 qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
672 ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */
673 idx); /* DQRR_DCAP::DCAP_CI */
674}
675
676static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask)
677{
678 __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
679
680 DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
681 qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
682 (bitmask << 16)); /* DQRR_DCAP::DCAP_CI */
683}
684
685static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
686{
687 qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr);
688}
689
690static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
691{
692 qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr);
693}
694
695static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
696{
697 qm_out(portal, QM_REG_DQRR_ITR, ithresh);
698}
699
700/* --- MR API --- */
701
702#define MR_SHIFT ilog2(sizeof(union qm_mr_entry))
703#define MR_CARRY (uintptr_t)(QM_MR_SIZE << MR_SHIFT)
704
705static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p)
706{
707 uintptr_t addr = (uintptr_t)p;
708
709 addr &= ~MR_CARRY;
710
711 return (union qm_mr_entry *)addr;
712}
713
714static inline int mr_ptr2idx(const union qm_mr_entry *e)
715{
716 return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1);
717}
718
719static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e)
720{
721 return mr_carryclear(e + 1);
722}
723
724static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
725 enum qm_mr_cmode cmode)
726{
727 struct qm_mr *mr = &portal->mr;
728 u32 cfg;
729
730 mr->ring = portal->addr.ce + QM_CL_MR;
731 mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1);
732 mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1);
733 mr->cursor = mr->ring + mr->ci;
734 mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
735 mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE)
736 ? QM_MR_VERB_VBIT : 0;
737 mr->ithresh = qm_in(portal, QM_REG_MR_ITR);
738#ifdef CONFIG_FSL_DPAA_CHECKING
739 mr->pmode = pmode;
740 mr->cmode = cmode;
741#endif
742 cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) |
743 ((cmode & 1) << 8); /* QCSP_CFG:MM */
744 qm_out(portal, QM_REG_CFG, cfg);
745 return 0;
746}
747
748static inline void qm_mr_finish(struct qm_portal *portal)
749{
750 struct qm_mr *mr = &portal->mr;
751
752 if (mr->ci != mr_ptr2idx(mr->cursor))
753 pr_crit("Ignoring completed MR entries\n");
754}
755
756static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal)
757{
758 struct qm_mr *mr = &portal->mr;
759
760 if (!mr->fill)
761 return NULL;
762 return mr->cursor;
763}
764
765static inline int qm_mr_next(struct qm_portal *portal)
766{
767 struct qm_mr *mr = &portal->mr;
768
769 DPAA_ASSERT(mr->fill);
770 mr->cursor = mr_inc(mr->cursor);
771 return --mr->fill;
772}
773
774static inline void qm_mr_pvb_update(struct qm_portal *portal)
775{
776 struct qm_mr *mr = &portal->mr;
777 union qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
778
779 DPAA_ASSERT(mr->pmode == qm_mr_pvb);
780 /*
781 * when accessing 'verb', use __raw_readb() to ensure that compiler
782 * inlining doesn't try to optimise out "excess reads".
783 */
784 if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
785 mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
786 if (!mr->pi)
787 mr->vbit ^= QM_MR_VERB_VBIT;
788 mr->fill++;
789 res = mr_inc(res);
790 }
791 dpaa_invalidate_touch_ro(res);
792}
793
794static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
795{
796 struct qm_mr *mr = &portal->mr;
797
798 DPAA_ASSERT(mr->cmode == qm_mr_cci);
799 mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
800 qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
801}
802
803static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
804{
805 struct qm_mr *mr = &portal->mr;
806
807 DPAA_ASSERT(mr->cmode == qm_mr_cci);
808 mr->ci = mr_ptr2idx(mr->cursor);
809 qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
810}
811
812static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
813{
814 qm_out(portal, QM_REG_MR_ITR, ithresh);
815}
816
817/* --- Management command API --- */
818
819static inline int qm_mc_init(struct qm_portal *portal)
820{
821 struct qm_mc *mc = &portal->mc;
822
823 mc->cr = portal->addr.ce + QM_CL_CR;
824 mc->rr = portal->addr.ce + QM_CL_RR0;
825 mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & QM_MCC_VERB_VBIT)
826 ? 0 : 1;
827 mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
828#ifdef CONFIG_FSL_DPAA_CHECKING
829 mc->state = qman_mc_idle;
830#endif
831 return 0;
832}
833
834static inline void qm_mc_finish(struct qm_portal *portal)
835{
836#ifdef CONFIG_FSL_DPAA_CHECKING
837 struct qm_mc *mc = &portal->mc;
838
839 DPAA_ASSERT(mc->state == qman_mc_idle);
840 if (mc->state != qman_mc_idle)
841 pr_crit("Losing incomplete MC command\n");
842#endif
843}
844
845static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal)
846{
847 struct qm_mc *mc = &portal->mc;
848
849 DPAA_ASSERT(mc->state == qman_mc_idle);
850#ifdef CONFIG_FSL_DPAA_CHECKING
851 mc->state = qman_mc_user;
852#endif
853 dpaa_zero(mc->cr);
854 return mc->cr;
855}
856
857static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
858{
859 struct qm_mc *mc = &portal->mc;
860 union qm_mc_result *rr = mc->rr + mc->rridx;
861
862 DPAA_ASSERT(mc->state == qman_mc_user);
863 dma_wmb();
864 mc->cr->_ncw_verb = myverb | mc->vbit;
865 dpaa_flush(mc->cr);
866 dpaa_invalidate_touch_ro(rr);
867#ifdef CONFIG_FSL_DPAA_CHECKING
868 mc->state = qman_mc_hw;
869#endif
870}
871
872static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal)
873{
874 struct qm_mc *mc = &portal->mc;
875 union qm_mc_result *rr = mc->rr + mc->rridx;
876
877 DPAA_ASSERT(mc->state == qman_mc_hw);
878 /*
879 * The inactive response register's verb byte always returns zero until
880 * its command is submitted and completed. This includes the valid-bit,
881 * in case you were wondering...
882 */
883 if (!__raw_readb(&rr->verb)) {
884 dpaa_invalidate_touch_ro(rr);
885 return NULL;
886 }
887 mc->rridx ^= 1;
888 mc->vbit ^= QM_MCC_VERB_VBIT;
889#ifdef CONFIG_FSL_DPAA_CHECKING
890 mc->state = qman_mc_idle;
891#endif
892 return rr;
893}
894
895static inline int qm_mc_result_timeout(struct qm_portal *portal,
896 union qm_mc_result **mcr)
897{
898 int timeout = QM_MCR_TIMEOUT;
899
900 do {
901 *mcr = qm_mc_result(portal);
902 if (*mcr)
903 break;
904 udelay(1);
905 } while (--timeout);
906
907 return timeout;
908}
909
910static inline void fq_set(struct qman_fq *fq, u32 mask)
911{
912 set_bits(mask, &fq->flags);
913}
914
915static inline void fq_clear(struct qman_fq *fq, u32 mask)
916{
917 clear_bits(mask, &fq->flags);
918}
919
920static inline int fq_isset(struct qman_fq *fq, u32 mask)
921{
922 return fq->flags & mask;
923}
924
925static inline int fq_isclear(struct qman_fq *fq, u32 mask)
926{
927 return !(fq->flags & mask);
928}
929
930struct qman_portal {
931 struct qm_portal p;
932 /* PORTAL_BITS_*** - dynamic, strictly internal */
933 unsigned long bits;
934 /* interrupt sources processed by portal_isr(), configurable */
935 unsigned long irq_sources;
936 u32 use_eqcr_ci_stashing;
937 /* only 1 volatile dequeue at a time */
938 struct qman_fq *vdqcr_owned;
939 u32 sdqcr;
940 /* probing time config params for cpu-affine portals */
941 const struct qm_portal_config *config;
Claudiu Manoilc535e922016-09-22 18:04:09 +0300942 /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
943 struct qman_cgrs *cgrs;
944 /* linked-list of CSCN handlers. */
945 struct list_head cgr_cbs;
946 /* list lock */
947 spinlock_t cgr_lock;
948 struct work_struct congestion_work;
949 struct work_struct mr_work;
950 char irqname[MAX_IRQNAME];
951};
952
953static cpumask_t affine_mask;
954static DEFINE_SPINLOCK(affine_mask_lock);
955static u16 affine_channels[NR_CPUS];
956static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
957struct qman_portal *affine_portals[NR_CPUS];
958
959static inline struct qman_portal *get_affine_portal(void)
960{
961 return &get_cpu_var(qman_affine_portal);
962}
963
964static inline void put_affine_portal(void)
965{
966 put_cpu_var(qman_affine_portal);
967}
968
969static struct workqueue_struct *qm_portal_wq;
970
971int qman_wq_alloc(void)
972{
973 qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1);
974 if (!qm_portal_wq)
975 return -ENOMEM;
976 return 0;
977}
978
979/*
980 * This is what everything can wait on, even if it migrates to a different cpu
981 * to the one whose affine portal it is waiting on.
982 */
983static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
984
985static struct qman_fq **fq_table;
986static u32 num_fqids;
987
988int qman_alloc_fq_table(u32 _num_fqids)
989{
990 num_fqids = _num_fqids;
991
992 fq_table = vzalloc(num_fqids * 2 * sizeof(struct qman_fq *));
993 if (!fq_table)
994 return -ENOMEM;
995
996 pr_debug("Allocated fq lookup table at %p, entry count %u\n",
997 fq_table, num_fqids * 2);
998 return 0;
999}
1000
1001static struct qman_fq *idx_to_fq(u32 idx)
1002{
1003 struct qman_fq *fq;
1004
1005#ifdef CONFIG_FSL_DPAA_CHECKING
1006 if (WARN_ON(idx >= num_fqids * 2))
1007 return NULL;
1008#endif
1009 fq = fq_table[idx];
1010 DPAA_ASSERT(!fq || idx == fq->idx);
1011
1012 return fq;
1013}
1014
1015/*
1016 * Only returns full-service fq objects, not enqueue-only
1017 * references (QMAN_FQ_FLAG_NO_MODIFY).
1018 */
1019static struct qman_fq *fqid_to_fq(u32 fqid)
1020{
1021 return idx_to_fq(fqid * 2);
1022}
1023
1024static struct qman_fq *tag_to_fq(u32 tag)
1025{
1026#if BITS_PER_LONG == 64
1027 return idx_to_fq(tag);
1028#else
1029 return (struct qman_fq *)tag;
1030#endif
1031}
1032
1033static u32 fq_to_tag(struct qman_fq *fq)
1034{
1035#if BITS_PER_LONG == 64
1036 return fq->idx;
1037#else
1038 return (u32)fq;
1039#endif
1040}
1041
1042static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
1043static inline unsigned int __poll_portal_fast(struct qman_portal *p,
1044 unsigned int poll_limit);
1045static void qm_congestion_task(struct work_struct *work);
1046static void qm_mr_process_task(struct work_struct *work);
1047
1048static irqreturn_t portal_isr(int irq, void *ptr)
1049{
1050 struct qman_portal *p = ptr;
1051
1052 u32 clear = QM_DQAVAIL_MASK | p->irq_sources;
1053 u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
1054
1055 if (unlikely(!is))
1056 return IRQ_NONE;
1057
1058 /* DQRR-handling if it's interrupt-driven */
1059 if (is & QM_PIRQ_DQRI)
1060 __poll_portal_fast(p, QMAN_POLL_LIMIT);
1061 /* Handling of anything else that's interrupt-driven */
1062 clear |= __poll_portal_slow(p, is);
1063 qm_out(&p->p, QM_REG_ISR, clear);
1064 return IRQ_HANDLED;
1065}
1066
1067static int drain_mr_fqrni(struct qm_portal *p)
1068{
1069 const union qm_mr_entry *msg;
1070loop:
1071 msg = qm_mr_current(p);
1072 if (!msg) {
1073 /*
1074 * if MR was full and h/w had other FQRNI entries to produce, we
1075 * need to allow it time to produce those entries once the
1076 * existing entries are consumed. A worst-case situation
1077 * (fully-loaded system) means h/w sequencers may have to do 3-4
1078 * other things before servicing the portal's MR pump, each of
1079 * which (if slow) may take ~50 qman cycles (which is ~200
1080 * processor cycles). So rounding up and then multiplying this
1081 * worst-case estimate by a factor of 10, just to be
1082 * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
1083 * one entry at a time, so h/w has an opportunity to produce new
1084 * entries well before the ring has been fully consumed, so
1085 * we're being *really* paranoid here.
1086 */
1087 u64 now, then = jiffies;
1088
1089 do {
1090 now = jiffies;
1091 } while ((then + 10000) > now);
1092 msg = qm_mr_current(p);
1093 if (!msg)
1094 return 0;
1095 }
1096 if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
1097 /* We aren't draining anything but FQRNIs */
1098 pr_err("Found verb 0x%x in MR\n", msg->verb);
1099 return -1;
1100 }
1101 qm_mr_next(p);
1102 qm_mr_cci_consume(p, 1);
1103 goto loop;
1104}
1105
1106static int qman_create_portal(struct qman_portal *portal,
1107 const struct qm_portal_config *c,
1108 const struct qman_cgrs *cgrs)
1109{
1110 struct qm_portal *p;
Claudiu Manoilc535e922016-09-22 18:04:09 +03001111 int ret;
1112 u32 isdr;
1113
1114 p = &portal->p;
1115
1116#ifdef CONFIG_FSL_PAMU
1117 /* PAMU is required for stashing */
1118 portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
1119#else
1120 portal->use_eqcr_ci_stashing = 0;
1121#endif
1122 /*
1123 * prep the low-level portal struct with the mapped addresses from the
1124 * config, everything that follows depends on it and "config" is more
1125 * for (de)reference
1126 */
1127 p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
1128 p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
1129 /*
1130 * If CI-stashing is used, the current defaults use a threshold of 3,
1131 * and stash with high-than-DQRR priority.
1132 */
1133 if (qm_eqcr_init(p, qm_eqcr_pvb,
1134 portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
1135 dev_err(c->dev, "EQCR initialisation failed\n");
1136 goto fail_eqcr;
1137 }
1138 if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
1139 qm_dqrr_cdc, DQRR_MAXFILL)) {
1140 dev_err(c->dev, "DQRR initialisation failed\n");
1141 goto fail_dqrr;
1142 }
1143 if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
1144 dev_err(c->dev, "MR initialisation failed\n");
1145 goto fail_mr;
1146 }
1147 if (qm_mc_init(p)) {
1148 dev_err(c->dev, "MC initialisation failed\n");
1149 goto fail_mc;
1150 }
1151 /* static interrupt-gating controls */
1152 qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH);
1153 qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH);
1154 qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD);
1155 portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
1156 if (!portal->cgrs)
1157 goto fail_cgrs;
1158 /* initial snapshot is no-depletion */
1159 qman_cgrs_init(&portal->cgrs[1]);
1160 if (cgrs)
1161 portal->cgrs[0] = *cgrs;
1162 else
1163 /* if the given mask is NULL, assume all CGRs can be seen */
1164 qman_cgrs_fill(&portal->cgrs[0]);
1165 INIT_LIST_HEAD(&portal->cgr_cbs);
1166 spin_lock_init(&portal->cgr_lock);
1167 INIT_WORK(&portal->congestion_work, qm_congestion_task);
1168 INIT_WORK(&portal->mr_work, qm_mr_process_task);
1169 portal->bits = 0;
1170 portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
1171 QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
1172 QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
Claudiu Manoilc535e922016-09-22 18:04:09 +03001173 isdr = 0xffffffff;
1174 qm_out(p, QM_REG_ISDR, isdr);
1175 portal->irq_sources = 0;
1176 qm_out(p, QM_REG_IER, 0);
1177 qm_out(p, QM_REG_ISR, 0xffffffff);
1178 snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
1179 if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
1180 dev_err(c->dev, "request_irq() failed\n");
1181 goto fail_irq;
1182 }
1183 if (c->cpu != -1 && irq_can_set_affinity(c->irq) &&
1184 irq_set_affinity(c->irq, cpumask_of(c->cpu))) {
1185 dev_err(c->dev, "irq_set_affinity() failed\n");
1186 goto fail_affinity;
1187 }
1188
1189 /* Need EQCR to be empty before continuing */
1190 isdr &= ~QM_PIRQ_EQCI;
1191 qm_out(p, QM_REG_ISDR, isdr);
1192 ret = qm_eqcr_get_fill(p);
1193 if (ret) {
1194 dev_err(c->dev, "EQCR unclean\n");
1195 goto fail_eqcr_empty;
1196 }
1197 isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
1198 qm_out(p, QM_REG_ISDR, isdr);
1199 if (qm_dqrr_current(p)) {
1200 dev_err(c->dev, "DQRR unclean\n");
1201 qm_dqrr_cdc_consume_n(p, 0xffff);
1202 }
1203 if (qm_mr_current(p) && drain_mr_fqrni(p)) {
1204 /* special handling, drain just in case it's a few FQRNIs */
1205 const union qm_mr_entry *e = qm_mr_current(p);
1206
Claudiu Manoilb6e969d2016-11-16 16:40:19 +02001207 dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n",
1208 e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd));
Claudiu Manoilc535e922016-09-22 18:04:09 +03001209 goto fail_dqrr_mr_empty;
1210 }
1211 /* Success */
1212 portal->config = c;
1213 qm_out(p, QM_REG_ISDR, 0);
1214 qm_out(p, QM_REG_IIR, 0);
1215 /* Write a sane SDQCR */
1216 qm_dqrr_sdqcr_set(p, portal->sdqcr);
1217 return 0;
1218
1219fail_dqrr_mr_empty:
1220fail_eqcr_empty:
1221fail_affinity:
1222 free_irq(c->irq, portal);
1223fail_irq:
Claudiu Manoilc535e922016-09-22 18:04:09 +03001224 kfree(portal->cgrs);
1225fail_cgrs:
1226 qm_mc_finish(p);
1227fail_mc:
1228 qm_mr_finish(p);
1229fail_mr:
1230 qm_dqrr_finish(p);
1231fail_dqrr:
1232 qm_eqcr_finish(p);
1233fail_eqcr:
1234 return -EIO;
1235}
1236
1237struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
1238 const struct qman_cgrs *cgrs)
1239{
1240 struct qman_portal *portal;
1241 int err;
1242
1243 portal = &per_cpu(qman_affine_portal, c->cpu);
1244 err = qman_create_portal(portal, c, cgrs);
1245 if (err)
1246 return NULL;
1247
1248 spin_lock(&affine_mask_lock);
1249 cpumask_set_cpu(c->cpu, &affine_mask);
1250 affine_channels[c->cpu] = c->channel;
1251 affine_portals[c->cpu] = portal;
1252 spin_unlock(&affine_mask_lock);
1253
1254 return portal;
1255}
1256
1257static void qman_destroy_portal(struct qman_portal *qm)
1258{
1259 const struct qm_portal_config *pcfg;
1260
1261 /* Stop dequeues on the portal */
1262 qm_dqrr_sdqcr_set(&qm->p, 0);
1263
1264 /*
1265 * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
1266 * something related to QM_PIRQ_EQCI, this may need fixing.
1267 * Also, due to the prefetching model used for CI updates in the enqueue
1268 * path, this update will only invalidate the CI cacheline *after*
1269 * working on it, so we need to call this twice to ensure a full update
1270 * irrespective of where the enqueue processing was at when the teardown
1271 * began.
1272 */
1273 qm_eqcr_cce_update(&qm->p);
1274 qm_eqcr_cce_update(&qm->p);
1275 pcfg = qm->config;
1276
1277 free_irq(pcfg->irq, qm);
1278
1279 kfree(qm->cgrs);
1280 qm_mc_finish(&qm->p);
1281 qm_mr_finish(&qm->p);
1282 qm_dqrr_finish(&qm->p);
1283 qm_eqcr_finish(&qm->p);
1284
Claudiu Manoilc535e922016-09-22 18:04:09 +03001285 qm->config = NULL;
1286}
1287
1288const struct qm_portal_config *qman_destroy_affine_portal(void)
1289{
1290 struct qman_portal *qm = get_affine_portal();
1291 const struct qm_portal_config *pcfg;
1292 int cpu;
1293
1294 pcfg = qm->config;
1295 cpu = pcfg->cpu;
1296
1297 qman_destroy_portal(qm);
1298
1299 spin_lock(&affine_mask_lock);
1300 cpumask_clear_cpu(cpu, &affine_mask);
1301 spin_unlock(&affine_mask_lock);
1302 put_affine_portal();
1303 return pcfg;
1304}
1305
1306/* Inline helper to reduce nesting in __poll_portal_slow() */
1307static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
1308 const union qm_mr_entry *msg, u8 verb)
1309{
1310 switch (verb) {
1311 case QM_MR_VERB_FQRL:
1312 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
1313 fq_clear(fq, QMAN_FQ_STATE_ORL);
1314 break;
1315 case QM_MR_VERB_FQRN:
1316 DPAA_ASSERT(fq->state == qman_fq_state_parked ||
1317 fq->state == qman_fq_state_sched);
1318 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
1319 fq_clear(fq, QMAN_FQ_STATE_CHANGING);
1320 if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
1321 fq_set(fq, QMAN_FQ_STATE_NE);
1322 if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
1323 fq_set(fq, QMAN_FQ_STATE_ORL);
1324 fq->state = qman_fq_state_retired;
1325 break;
1326 case QM_MR_VERB_FQPN:
1327 DPAA_ASSERT(fq->state == qman_fq_state_sched);
1328 DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
1329 fq->state = qman_fq_state_parked;
1330 }
1331}
1332
1333static void qm_congestion_task(struct work_struct *work)
1334{
1335 struct qman_portal *p = container_of(work, struct qman_portal,
1336 congestion_work);
1337 struct qman_cgrs rr, c;
1338 union qm_mc_result *mcr;
1339 struct qman_cgr *cgr;
1340
1341 spin_lock(&p->cgr_lock);
1342 qm_mc_start(&p->p);
1343 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
1344 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1345 spin_unlock(&p->cgr_lock);
1346 dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
1347 return;
1348 }
1349 /* mask out the ones I'm not interested in */
1350 qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state,
1351 &p->cgrs[0]);
1352 /* check previous snapshot for delta, enter/exit congestion */
1353 qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
1354 /* update snapshot */
1355 qman_cgrs_cp(&p->cgrs[1], &rr);
1356 /* Invoke callback */
1357 list_for_each_entry(cgr, &p->cgr_cbs, node)
1358 if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
1359 cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
1360 spin_unlock(&p->cgr_lock);
1361}
1362
1363static void qm_mr_process_task(struct work_struct *work)
1364{
1365 struct qman_portal *p = container_of(work, struct qman_portal,
1366 mr_work);
1367 const union qm_mr_entry *msg;
1368 struct qman_fq *fq;
1369 u8 verb, num = 0;
1370
1371 preempt_disable();
1372
1373 while (1) {
1374 qm_mr_pvb_update(&p->p);
1375 msg = qm_mr_current(&p->p);
1376 if (!msg)
1377 break;
1378
1379 verb = msg->verb & QM_MR_VERB_TYPE_MASK;
1380 /* The message is a software ERN iff the 0x20 bit is clear */
1381 if (verb & 0x20) {
1382 switch (verb) {
1383 case QM_MR_VERB_FQRNI:
1384 /* nada, we drop FQRNIs on the floor */
1385 break;
1386 case QM_MR_VERB_FQRN:
1387 case QM_MR_VERB_FQRL:
1388 /* Lookup in the retirement table */
Claudiu Manoild6753c72016-11-16 16:40:25 +02001389 fq = fqid_to_fq(qm_fqid_get(&msg->fq));
Claudiu Manoilc535e922016-09-22 18:04:09 +03001390 if (WARN_ON(!fq))
1391 break;
1392 fq_state_change(p, fq, msg, verb);
1393 if (fq->cb.fqs)
1394 fq->cb.fqs(p, fq, msg);
1395 break;
1396 case QM_MR_VERB_FQPN:
1397 /* Parked */
Claudiu Manoilefe848c2016-11-16 16:40:28 +02001398 fq = tag_to_fq(msg->fq.context_b);
Claudiu Manoilc535e922016-09-22 18:04:09 +03001399 fq_state_change(p, fq, msg, verb);
1400 if (fq->cb.fqs)
1401 fq->cb.fqs(p, fq, msg);
1402 break;
1403 case QM_MR_VERB_DC_ERN:
1404 /* DCP ERN */
1405 pr_crit_once("Leaking DCP ERNs!\n");
1406 break;
1407 default:
1408 pr_crit("Invalid MR verb 0x%02x\n", verb);
1409 }
1410 } else {
1411 /* Its a software ERN */
1412 fq = tag_to_fq(msg->ern.tag);
1413 fq->cb.ern(p, fq, msg);
1414 }
1415 num++;
1416 qm_mr_next(&p->p);
1417 }
1418
1419 qm_mr_cci_consume(&p->p, num);
1420 preempt_enable();
1421}
1422
1423static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
1424{
1425 if (is & QM_PIRQ_CSCI) {
1426 queue_work_on(smp_processor_id(), qm_portal_wq,
1427 &p->congestion_work);
1428 }
1429
1430 if (is & QM_PIRQ_EQRI) {
1431 qm_eqcr_cce_update(&p->p);
1432 qm_eqcr_set_ithresh(&p->p, 0);
1433 wake_up(&affine_queue);
1434 }
1435
1436 if (is & QM_PIRQ_MRI) {
1437 queue_work_on(smp_processor_id(), qm_portal_wq,
1438 &p->mr_work);
1439 }
1440
1441 return is;
1442}
1443
1444/*
1445 * remove some slowish-path stuff from the "fast path" and make sure it isn't
1446 * inlined.
1447 */
1448static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
1449{
1450 p->vdqcr_owned = NULL;
1451 fq_clear(fq, QMAN_FQ_STATE_VDQCR);
1452 wake_up(&affine_queue);
1453}
1454
1455/*
1456 * The only states that would conflict with other things if they ran at the
1457 * same time on the same cpu are:
1458 *
1459 * (i) setting/clearing vdqcr_owned, and
1460 * (ii) clearing the NE (Not Empty) flag.
1461 *
1462 * Both are safe. Because;
1463 *
1464 * (i) this clearing can only occur after qman_volatile_dequeue() has set the
1465 * vdqcr_owned field (which it does before setting VDQCR), and
1466 * qman_volatile_dequeue() blocks interrupts and preemption while this is
1467 * done so that we can't interfere.
1468 * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
1469 * with (i) that API prevents us from interfering until it's safe.
1470 *
1471 * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
1472 * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
1473 * advantage comes from this function not having to "lock" anything at all.
1474 *
1475 * Note also that the callbacks are invoked at points which are safe against the
1476 * above potential conflicts, but that this function itself is not re-entrant
1477 * (this is because the function tracks one end of each FIFO in the portal and
1478 * we do *not* want to lock that). So the consequence is that it is safe for
1479 * user callbacks to call into any QMan API.
1480 */
1481static inline unsigned int __poll_portal_fast(struct qman_portal *p,
1482 unsigned int poll_limit)
1483{
1484 const struct qm_dqrr_entry *dq;
1485 struct qman_fq *fq;
1486 enum qman_cb_dqrr_result res;
1487 unsigned int limit = 0;
1488
1489 do {
1490 qm_dqrr_pvb_update(&p->p);
1491 dq = qm_dqrr_current(&p->p);
1492 if (!dq)
1493 break;
1494
1495 if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
1496 /*
Claudiu Manoilefe848c2016-11-16 16:40:28 +02001497 * VDQCR: don't trust context_b as the FQ may have
Claudiu Manoilc535e922016-09-22 18:04:09 +03001498 * been configured for h/w consumption and we're
1499 * draining it post-retirement.
1500 */
1501 fq = p->vdqcr_owned;
1502 /*
1503 * We only set QMAN_FQ_STATE_NE when retiring, so we
1504 * only need to check for clearing it when doing
1505 * volatile dequeues. It's one less thing to check
1506 * in the critical path (SDQCR).
1507 */
1508 if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
1509 fq_clear(fq, QMAN_FQ_STATE_NE);
1510 /*
1511 * This is duplicated from the SDQCR code, but we
1512 * have stuff to do before *and* after this callback,
1513 * and we don't want multiple if()s in the critical
1514 * path (SDQCR).
1515 */
1516 res = fq->cb.dqrr(p, fq, dq);
1517 if (res == qman_cb_dqrr_stop)
1518 break;
1519 /* Check for VDQCR completion */
1520 if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
1521 clear_vdqcr(p, fq);
1522 } else {
Claudiu Manoilefe848c2016-11-16 16:40:28 +02001523 /* SDQCR: context_b points to the FQ */
1524 fq = tag_to_fq(dq->context_b);
Claudiu Manoilc535e922016-09-22 18:04:09 +03001525 /* Now let the callback do its stuff */
1526 res = fq->cb.dqrr(p, fq, dq);
1527 /*
1528 * The callback can request that we exit without
1529 * consuming this entry nor advancing;
1530 */
1531 if (res == qman_cb_dqrr_stop)
1532 break;
1533 }
1534 /* Interpret 'dq' from a driver perspective. */
1535 /*
1536 * Parking isn't possible unless HELDACTIVE was set. NB,
1537 * FORCEELIGIBLE implies HELDACTIVE, so we only need to
1538 * check for HELDACTIVE to cover both.
1539 */
1540 DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
1541 (res != qman_cb_dqrr_park));
1542 /* just means "skip it, I'll consume it myself later on" */
1543 if (res != qman_cb_dqrr_defer)
1544 qm_dqrr_cdc_consume_1ptr(&p->p, dq,
1545 res == qman_cb_dqrr_park);
1546 /* Move forward */
1547 qm_dqrr_next(&p->p);
1548 /*
1549 * Entry processed and consumed, increment our counter. The
1550 * callback can request that we exit after consuming the
1551 * entry, and we also exit if we reach our processing limit,
1552 * so loop back only if neither of these conditions is met.
1553 */
1554 } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
1555
1556 return limit;
1557}
1558
1559void qman_p_irqsource_add(struct qman_portal *p, u32 bits)
1560{
1561 unsigned long irqflags;
1562
1563 local_irq_save(irqflags);
1564 set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources);
1565 qm_out(&p->p, QM_REG_IER, p->irq_sources);
1566 local_irq_restore(irqflags);
1567}
1568EXPORT_SYMBOL(qman_p_irqsource_add);
1569
1570void qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
1571{
1572 unsigned long irqflags;
1573 u32 ier;
1574
1575 /*
1576 * Our interrupt handler only processes+clears status register bits that
1577 * are in p->irq_sources. As we're trimming that mask, if one of them
1578 * were to assert in the status register just before we remove it from
1579 * the enable register, there would be an interrupt-storm when we
1580 * release the IRQ lock. So we wait for the enable register update to
1581 * take effect in h/w (by reading it back) and then clear all other bits
1582 * in the status register. Ie. we clear them from ISR once it's certain
1583 * IER won't allow them to reassert.
1584 */
1585 local_irq_save(irqflags);
1586 bits &= QM_PIRQ_VISIBLE;
1587 clear_bits(bits, &p->irq_sources);
1588 qm_out(&p->p, QM_REG_IER, p->irq_sources);
1589 ier = qm_in(&p->p, QM_REG_IER);
1590 /*
1591 * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
1592 * data-dependency, ie. to protect against re-ordering.
1593 */
1594 qm_out(&p->p, QM_REG_ISR, ~ier);
1595 local_irq_restore(irqflags);
1596}
1597EXPORT_SYMBOL(qman_p_irqsource_remove);
1598
1599const cpumask_t *qman_affine_cpus(void)
1600{
1601 return &affine_mask;
1602}
1603EXPORT_SYMBOL(qman_affine_cpus);
1604
1605u16 qman_affine_channel(int cpu)
1606{
1607 if (cpu < 0) {
1608 struct qman_portal *portal = get_affine_portal();
1609
1610 cpu = portal->config->cpu;
1611 put_affine_portal();
1612 }
1613 WARN_ON(!cpumask_test_cpu(cpu, &affine_mask));
1614 return affine_channels[cpu];
1615}
1616EXPORT_SYMBOL(qman_affine_channel);
1617
1618struct qman_portal *qman_get_affine_portal(int cpu)
1619{
1620 return affine_portals[cpu];
1621}
1622EXPORT_SYMBOL(qman_get_affine_portal);
1623
1624int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
1625{
1626 return __poll_portal_fast(p, limit);
1627}
1628EXPORT_SYMBOL(qman_p_poll_dqrr);
1629
1630void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
1631{
1632 unsigned long irqflags;
1633
1634 local_irq_save(irqflags);
1635 pools &= p->config->pools;
1636 p->sdqcr |= pools;
1637 qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1638 local_irq_restore(irqflags);
1639}
1640EXPORT_SYMBOL(qman_p_static_dequeue_add);
1641
1642/* Frame queue API */
1643
1644static const char *mcr_result_str(u8 result)
1645{
1646 switch (result) {
1647 case QM_MCR_RESULT_NULL:
1648 return "QM_MCR_RESULT_NULL";
1649 case QM_MCR_RESULT_OK:
1650 return "QM_MCR_RESULT_OK";
1651 case QM_MCR_RESULT_ERR_FQID:
1652 return "QM_MCR_RESULT_ERR_FQID";
1653 case QM_MCR_RESULT_ERR_FQSTATE:
1654 return "QM_MCR_RESULT_ERR_FQSTATE";
1655 case QM_MCR_RESULT_ERR_NOTEMPTY:
1656 return "QM_MCR_RESULT_ERR_NOTEMPTY";
1657 case QM_MCR_RESULT_PENDING:
1658 return "QM_MCR_RESULT_PENDING";
1659 case QM_MCR_RESULT_ERR_BADCOMMAND:
1660 return "QM_MCR_RESULT_ERR_BADCOMMAND";
1661 }
1662 return "<unknown MCR result>";
1663}
1664
1665int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
1666{
1667 if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
1668 int ret = qman_alloc_fqid(&fqid);
1669
1670 if (ret)
1671 return ret;
1672 }
1673 fq->fqid = fqid;
1674 fq->flags = flags;
1675 fq->state = qman_fq_state_oos;
1676 fq->cgr_groupid = 0;
1677
1678 /* A context_b of 0 is allegedly special, so don't use that fqid */
1679 if (fqid == 0 || fqid >= num_fqids) {
1680 WARN(1, "bad fqid %d\n", fqid);
1681 return -EINVAL;
1682 }
1683
1684 fq->idx = fqid * 2;
1685 if (flags & QMAN_FQ_FLAG_NO_MODIFY)
1686 fq->idx++;
1687
1688 WARN_ON(fq_table[fq->idx]);
1689 fq_table[fq->idx] = fq;
1690
1691 return 0;
1692}
1693EXPORT_SYMBOL(qman_create_fq);
1694
1695void qman_destroy_fq(struct qman_fq *fq)
1696{
1697 /*
1698 * We don't need to lock the FQ as it is a pre-condition that the FQ be
1699 * quiesced. Instead, run some checks.
1700 */
1701 switch (fq->state) {
1702 case qman_fq_state_parked:
1703 case qman_fq_state_oos:
1704 if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
1705 qman_release_fqid(fq->fqid);
1706
1707 DPAA_ASSERT(fq_table[fq->idx]);
1708 fq_table[fq->idx] = NULL;
1709 return;
1710 default:
1711 break;
1712 }
1713 DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
1714}
1715EXPORT_SYMBOL(qman_destroy_fq);
1716
1717u32 qman_fq_fqid(struct qman_fq *fq)
1718{
1719 return fq->fqid;
1720}
1721EXPORT_SYMBOL(qman_fq_fqid);
1722
1723int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
1724{
1725 union qm_mc_command *mcc;
1726 union qm_mc_result *mcr;
1727 struct qman_portal *p;
1728 u8 res, myverb;
1729 int ret = 0;
1730
1731 myverb = (flags & QMAN_INITFQ_FLAG_SCHED)
1732 ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
1733
1734 if (fq->state != qman_fq_state_oos &&
1735 fq->state != qman_fq_state_parked)
1736 return -EINVAL;
1737#ifdef CONFIG_FSL_DPAA_CHECKING
1738 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1739 return -EINVAL;
1740#endif
1741 if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
1742 /* And can't be set at the same time as TDTHRESH */
1743 if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
1744 return -EINVAL;
1745 }
1746 /* Issue an INITFQ_[PARKED|SCHED] management command */
1747 p = get_affine_portal();
1748 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1749 (fq->state != qman_fq_state_oos &&
1750 fq->state != qman_fq_state_parked)) {
1751 ret = -EBUSY;
1752 goto out;
1753 }
1754 mcc = qm_mc_start(&p->p);
1755 if (opts)
1756 mcc->initfq = *opts;
Claudiu Manoild6753c72016-11-16 16:40:25 +02001757 qm_fqid_set(&mcc->fq, fq->fqid);
Claudiu Manoilc535e922016-09-22 18:04:09 +03001758 mcc->initfq.count = 0;
1759 /*
Claudiu Manoilefe848c2016-11-16 16:40:28 +02001760 * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
Claudiu Manoilc535e922016-09-22 18:04:09 +03001761 * demux pointer. Otherwise, the caller-provided value is allowed to
1762 * stand, don't overwrite it.
1763 */
1764 if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
1765 dma_addr_t phys_fq;
1766
1767 mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
1768 mcc->initfq.fqd.context_b = fq_to_tag(fq);
1769 /*
1770 * and the physical address - NB, if the user wasn't trying to
1771 * set CONTEXTA, clear the stashing settings.
1772 */
1773 if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
1774 mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
1775 memset(&mcc->initfq.fqd.context_a, 0,
1776 sizeof(mcc->initfq.fqd.context_a));
1777 } else {
Claudiu Manoil0fbeac32016-11-16 16:40:21 +02001778 struct qman_portal *p = qman_dma_portal;
1779
1780 phys_fq = dma_map_single(p->config->dev, fq,
1781 sizeof(*fq), DMA_TO_DEVICE);
1782 if (dma_mapping_error(p->config->dev, phys_fq)) {
1783 dev_err(p->config->dev, "dma_mapping failed\n");
1784 ret = -EIO;
1785 goto out;
1786 }
1787
Claudiu Manoilc535e922016-09-22 18:04:09 +03001788 qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
1789 }
1790 }
1791 if (flags & QMAN_INITFQ_FLAG_LOCAL) {
1792 int wq = 0;
1793
1794 if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
1795 mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
1796 wq = 4;
1797 }
1798 qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
1799 }
1800 qm_mc_commit(&p->p, myverb);
1801 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1802 dev_err(p->config->dev, "MCR timeout\n");
1803 ret = -ETIMEDOUT;
1804 goto out;
1805 }
1806
1807 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1808 res = mcr->result;
1809 if (res != QM_MCR_RESULT_OK) {
1810 ret = -EIO;
1811 goto out;
1812 }
1813 if (opts) {
1814 if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
1815 if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
1816 fq_set(fq, QMAN_FQ_STATE_CGR_EN);
1817 else
1818 fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
1819 }
1820 if (opts->we_mask & QM_INITFQ_WE_CGID)
1821 fq->cgr_groupid = opts->fqd.cgid;
1822 }
1823 fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1824 qman_fq_state_sched : qman_fq_state_parked;
1825
1826out:
1827 put_affine_portal();
1828 return ret;
1829}
1830EXPORT_SYMBOL(qman_init_fq);
1831
1832int qman_schedule_fq(struct qman_fq *fq)
1833{
1834 union qm_mc_command *mcc;
1835 union qm_mc_result *mcr;
1836 struct qman_portal *p;
1837 int ret = 0;
1838
1839 if (fq->state != qman_fq_state_parked)
1840 return -EINVAL;
1841#ifdef CONFIG_FSL_DPAA_CHECKING
1842 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1843 return -EINVAL;
1844#endif
1845 /* Issue a ALTERFQ_SCHED management command */
1846 p = get_affine_portal();
1847 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1848 fq->state != qman_fq_state_parked) {
1849 ret = -EBUSY;
1850 goto out;
1851 }
1852 mcc = qm_mc_start(&p->p);
Claudiu Manoild6753c72016-11-16 16:40:25 +02001853 qm_fqid_set(&mcc->fq, fq->fqid);
Claudiu Manoilc535e922016-09-22 18:04:09 +03001854 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
1855 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1856 dev_err(p->config->dev, "ALTER_SCHED timeout\n");
1857 ret = -ETIMEDOUT;
1858 goto out;
1859 }
1860
1861 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
1862 if (mcr->result != QM_MCR_RESULT_OK) {
1863 ret = -EIO;
1864 goto out;
1865 }
1866 fq->state = qman_fq_state_sched;
1867out:
1868 put_affine_portal();
1869 return ret;
1870}
1871EXPORT_SYMBOL(qman_schedule_fq);
1872
1873int qman_retire_fq(struct qman_fq *fq, u32 *flags)
1874{
1875 union qm_mc_command *mcc;
1876 union qm_mc_result *mcr;
1877 struct qman_portal *p;
1878 int ret;
1879 u8 res;
1880
1881 if (fq->state != qman_fq_state_parked &&
1882 fq->state != qman_fq_state_sched)
1883 return -EINVAL;
1884#ifdef CONFIG_FSL_DPAA_CHECKING
1885 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1886 return -EINVAL;
1887#endif
1888 p = get_affine_portal();
1889 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1890 fq->state == qman_fq_state_retired ||
1891 fq->state == qman_fq_state_oos) {
1892 ret = -EBUSY;
1893 goto out;
1894 }
1895 mcc = qm_mc_start(&p->p);
Claudiu Manoild6753c72016-11-16 16:40:25 +02001896 qm_fqid_set(&mcc->fq, fq->fqid);
Claudiu Manoilc535e922016-09-22 18:04:09 +03001897 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
1898 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1899 dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
1900 ret = -ETIMEDOUT;
1901 goto out;
1902 }
1903
1904 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
1905 res = mcr->result;
1906 /*
1907 * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
1908 * and defer the flags until FQRNI or FQRN (respectively) show up. But
1909 * "Friendly" is to process OK immediately, and not set CHANGING. We do
1910 * friendly, otherwise the caller doesn't necessarily have a fully
1911 * "retired" FQ on return even if the retirement was immediate. However
1912 * this does mean some code duplication between here and
1913 * fq_state_change().
1914 */
1915 if (res == QM_MCR_RESULT_OK) {
1916 ret = 0;
1917 /* Process 'fq' right away, we'll ignore FQRNI */
1918 if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
1919 fq_set(fq, QMAN_FQ_STATE_NE);
1920 if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
1921 fq_set(fq, QMAN_FQ_STATE_ORL);
1922 if (flags)
1923 *flags = fq->flags;
1924 fq->state = qman_fq_state_retired;
1925 if (fq->cb.fqs) {
1926 /*
1927 * Another issue with supporting "immediate" retirement
1928 * is that we're forced to drop FQRNIs, because by the
1929 * time they're seen it may already be "too late" (the
1930 * fq may have been OOS'd and free()'d already). But if
1931 * the upper layer wants a callback whether it's
1932 * immediate or not, we have to fake a "MR" entry to
1933 * look like an FQRNI...
1934 */
1935 union qm_mr_entry msg;
1936
1937 msg.verb = QM_MR_VERB_FQRNI;
1938 msg.fq.fqs = mcr->alterfq.fqs;
Claudiu Manoild6753c72016-11-16 16:40:25 +02001939 qm_fqid_set(&msg.fq, fq->fqid);
Claudiu Manoilefe848c2016-11-16 16:40:28 +02001940 msg.fq.context_b = fq_to_tag(fq);
Claudiu Manoilc535e922016-09-22 18:04:09 +03001941 fq->cb.fqs(p, fq, &msg);
1942 }
1943 } else if (res == QM_MCR_RESULT_PENDING) {
1944 ret = 1;
1945 fq_set(fq, QMAN_FQ_STATE_CHANGING);
1946 } else {
1947 ret = -EIO;
1948 }
1949out:
1950 put_affine_portal();
1951 return ret;
1952}
1953EXPORT_SYMBOL(qman_retire_fq);
1954
1955int qman_oos_fq(struct qman_fq *fq)
1956{
1957 union qm_mc_command *mcc;
1958 union qm_mc_result *mcr;
1959 struct qman_portal *p;
1960 int ret = 0;
1961
1962 if (fq->state != qman_fq_state_retired)
1963 return -EINVAL;
1964#ifdef CONFIG_FSL_DPAA_CHECKING
1965 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1966 return -EINVAL;
1967#endif
1968 p = get_affine_portal();
1969 if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) ||
1970 fq->state != qman_fq_state_retired) {
1971 ret = -EBUSY;
1972 goto out;
1973 }
1974 mcc = qm_mc_start(&p->p);
Claudiu Manoild6753c72016-11-16 16:40:25 +02001975 qm_fqid_set(&mcc->fq, fq->fqid);
Claudiu Manoilc535e922016-09-22 18:04:09 +03001976 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
1977 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1978 ret = -ETIMEDOUT;
1979 goto out;
1980 }
1981 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
1982 if (mcr->result != QM_MCR_RESULT_OK) {
1983 ret = -EIO;
1984 goto out;
1985 }
1986 fq->state = qman_fq_state_oos;
1987out:
1988 put_affine_portal();
1989 return ret;
1990}
1991EXPORT_SYMBOL(qman_oos_fq);
1992
1993int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
1994{
1995 union qm_mc_command *mcc;
1996 union qm_mc_result *mcr;
1997 struct qman_portal *p = get_affine_portal();
1998 int ret = 0;
1999
2000 mcc = qm_mc_start(&p->p);
Claudiu Manoild6753c72016-11-16 16:40:25 +02002001 qm_fqid_set(&mcc->fq, fq->fqid);
Claudiu Manoilc535e922016-09-22 18:04:09 +03002002 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
2003 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2004 ret = -ETIMEDOUT;
2005 goto out;
2006 }
2007
2008 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2009 if (mcr->result == QM_MCR_RESULT_OK)
2010 *fqd = mcr->queryfq.fqd;
2011 else
2012 ret = -EIO;
2013out:
2014 put_affine_portal();
2015 return ret;
2016}
2017
2018static int qman_query_fq_np(struct qman_fq *fq,
2019 struct qm_mcr_queryfq_np *np)
2020{
2021 union qm_mc_command *mcc;
2022 union qm_mc_result *mcr;
2023 struct qman_portal *p = get_affine_portal();
2024 int ret = 0;
2025
2026 mcc = qm_mc_start(&p->p);
Claudiu Manoild6753c72016-11-16 16:40:25 +02002027 qm_fqid_set(&mcc->fq, fq->fqid);
Claudiu Manoilc535e922016-09-22 18:04:09 +03002028 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
2029 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2030 ret = -ETIMEDOUT;
2031 goto out;
2032 }
2033
2034 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2035 if (mcr->result == QM_MCR_RESULT_OK)
2036 *np = mcr->queryfq_np;
2037 else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
2038 ret = -ERANGE;
2039 else
2040 ret = -EIO;
2041out:
2042 put_affine_portal();
2043 return ret;
2044}
2045
2046static int qman_query_cgr(struct qman_cgr *cgr,
2047 struct qm_mcr_querycgr *cgrd)
2048{
2049 union qm_mc_command *mcc;
2050 union qm_mc_result *mcr;
2051 struct qman_portal *p = get_affine_portal();
2052 int ret = 0;
2053
2054 mcc = qm_mc_start(&p->p);
Claudiu Manoil7ff07da2016-11-16 16:40:24 +02002055 mcc->cgr.cgid = cgr->cgrid;
Claudiu Manoilc535e922016-09-22 18:04:09 +03002056 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
2057 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2058 ret = -ETIMEDOUT;
2059 goto out;
2060 }
2061 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
2062 if (mcr->result == QM_MCR_RESULT_OK)
2063 *cgrd = mcr->querycgr;
2064 else {
2065 dev_err(p->config->dev, "QUERY_CGR failed: %s\n",
2066 mcr_result_str(mcr->result));
2067 ret = -EIO;
2068 }
2069out:
2070 put_affine_portal();
2071 return ret;
2072}
2073
2074int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result)
2075{
2076 struct qm_mcr_querycgr query_cgr;
2077 int err;
2078
2079 err = qman_query_cgr(cgr, &query_cgr);
2080 if (err)
2081 return err;
2082
2083 *result = !!query_cgr.cgr.cs;
2084 return 0;
2085}
2086EXPORT_SYMBOL(qman_query_cgr_congested);
2087
2088/* internal function used as a wait_event() expression */
2089static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
2090{
2091 unsigned long irqflags;
2092 int ret = -EBUSY;
2093
2094 local_irq_save(irqflags);
2095 if (p->vdqcr_owned)
2096 goto out;
2097 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2098 goto out;
2099
2100 fq_set(fq, QMAN_FQ_STATE_VDQCR);
2101 p->vdqcr_owned = fq;
2102 qm_dqrr_vdqcr_set(&p->p, vdqcr);
2103 ret = 0;
2104out:
2105 local_irq_restore(irqflags);
2106 return ret;
2107}
2108
2109static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
2110{
2111 int ret;
2112
2113 *p = get_affine_portal();
2114 ret = set_p_vdqcr(*p, fq, vdqcr);
2115 put_affine_portal();
2116 return ret;
2117}
2118
2119static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
2120 u32 vdqcr, u32 flags)
2121{
2122 int ret = 0;
2123
2124 if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
2125 ret = wait_event_interruptible(affine_queue,
2126 !set_vdqcr(p, fq, vdqcr));
2127 else
2128 wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr));
2129 return ret;
2130}
2131
2132int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr)
2133{
2134 struct qman_portal *p;
2135 int ret;
2136
2137 if (fq->state != qman_fq_state_parked &&
2138 fq->state != qman_fq_state_retired)
2139 return -EINVAL;
2140 if (vdqcr & QM_VDQCR_FQID_MASK)
2141 return -EINVAL;
2142 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2143 return -EBUSY;
2144 vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
2145 if (flags & QMAN_VOLATILE_FLAG_WAIT)
2146 ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
2147 else
2148 ret = set_vdqcr(&p, fq, vdqcr);
2149 if (ret)
2150 return ret;
2151 /* VDQCR is set */
2152 if (flags & QMAN_VOLATILE_FLAG_FINISH) {
2153 if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
2154 /*
2155 * NB: don't propagate any error - the caller wouldn't
2156 * know whether the VDQCR was issued or not. A signal
2157 * could arrive after returning anyway, so the caller
2158 * can check signal_pending() if that's an issue.
2159 */
2160 wait_event_interruptible(affine_queue,
2161 !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
2162 else
2163 wait_event(affine_queue,
2164 !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
2165 }
2166 return 0;
2167}
2168EXPORT_SYMBOL(qman_volatile_dequeue);
2169
2170static void update_eqcr_ci(struct qman_portal *p, u8 avail)
2171{
2172 if (avail)
2173 qm_eqcr_cce_prefetch(&p->p);
2174 else
2175 qm_eqcr_cce_update(&p->p);
2176}
2177
2178int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
2179{
2180 struct qman_portal *p;
2181 struct qm_eqcr_entry *eq;
2182 unsigned long irqflags;
2183 u8 avail;
2184
2185 p = get_affine_portal();
2186 local_irq_save(irqflags);
2187
2188 if (p->use_eqcr_ci_stashing) {
2189 /*
2190 * The stashing case is easy, only update if we need to in
2191 * order to try and liberate ring entries.
2192 */
2193 eq = qm_eqcr_start_stash(&p->p);
2194 } else {
2195 /*
2196 * The non-stashing case is harder, need to prefetch ahead of
2197 * time.
2198 */
2199 avail = qm_eqcr_get_avail(&p->p);
2200 if (avail < 2)
2201 update_eqcr_ci(p, avail);
2202 eq = qm_eqcr_start_no_stash(&p->p);
2203 }
2204
2205 if (unlikely(!eq))
2206 goto out;
2207
Claudiu Manoild6753c72016-11-16 16:40:25 +02002208 qm_fqid_set(eq, fq->fqid);
Claudiu Manoilc535e922016-09-22 18:04:09 +03002209 eq->tag = fq_to_tag(fq);
2210 eq->fd = *fd;
2211
2212 qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
2213out:
2214 local_irq_restore(irqflags);
2215 put_affine_portal();
2216 return 0;
2217}
2218EXPORT_SYMBOL(qman_enqueue);
2219
2220static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags,
2221 struct qm_mcc_initcgr *opts)
2222{
2223 union qm_mc_command *mcc;
2224 union qm_mc_result *mcr;
2225 struct qman_portal *p = get_affine_portal();
2226 u8 verb = QM_MCC_VERB_MODIFYCGR;
2227 int ret = 0;
2228
2229 mcc = qm_mc_start(&p->p);
2230 if (opts)
2231 mcc->initcgr = *opts;
2232 mcc->initcgr.cgid = cgr->cgrid;
2233 if (flags & QMAN_CGR_FLAG_USE_INIT)
2234 verb = QM_MCC_VERB_INITCGR;
2235 qm_mc_commit(&p->p, verb);
2236 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2237 ret = -ETIMEDOUT;
2238 goto out;
2239 }
2240
2241 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
2242 if (mcr->result != QM_MCR_RESULT_OK)
2243 ret = -EIO;
2244
2245out:
2246 put_affine_portal();
2247 return ret;
2248}
2249
2250#define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
2251#define TARG_MASK(n) (BIT(31) >> PORTAL_IDX(n))
2252
2253static u8 qman_cgr_cpus[CGR_NUM];
2254
2255void qman_init_cgr_all(void)
2256{
2257 struct qman_cgr cgr;
2258 int err_cnt = 0;
2259
2260 for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) {
2261 if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL))
2262 err_cnt++;
2263 }
2264
2265 if (err_cnt)
2266 pr_err("Warning: %d error%s while initialising CGR h/w\n",
2267 err_cnt, (err_cnt > 1) ? "s" : "");
2268}
2269
2270int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
2271 struct qm_mcc_initcgr *opts)
2272{
2273 struct qm_mcr_querycgr cgr_state;
Claudiu Manoilc535e922016-09-22 18:04:09 +03002274 int ret;
2275 struct qman_portal *p;
2276
2277 /*
2278 * We have to check that the provided CGRID is within the limits of the
2279 * data-structures, for obvious reasons. However we'll let h/w take
2280 * care of determining whether it's within the limits of what exists on
2281 * the SoC.
2282 */
2283 if (cgr->cgrid >= CGR_NUM)
2284 return -EINVAL;
2285
2286 preempt_disable();
2287 p = get_affine_portal();
2288 qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
2289 preempt_enable();
2290
2291 cgr->chan = p->config->channel;
2292 spin_lock(&p->cgr_lock);
2293
2294 if (opts) {
Claudiu Manoile5caf692016-11-16 16:40:23 +02002295 struct qm_mcc_initcgr local_opts = *opts;
2296
Claudiu Manoilc535e922016-09-22 18:04:09 +03002297 ret = qman_query_cgr(cgr, &cgr_state);
2298 if (ret)
2299 goto out;
Claudiu Manoile5caf692016-11-16 16:40:23 +02002300
Claudiu Manoilc535e922016-09-22 18:04:09 +03002301 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2302 local_opts.cgr.cscn_targ_upd_ctrl =
2303 QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
2304 else
2305 /* Overwrite TARG */
2306 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
2307 TARG_MASK(p);
2308 local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
2309
2310 /* send init if flags indicate so */
Claudiu Manoile5caf692016-11-16 16:40:23 +02002311 if (flags & QMAN_CGR_FLAG_USE_INIT)
Claudiu Manoilc535e922016-09-22 18:04:09 +03002312 ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
2313 &local_opts);
2314 else
2315 ret = qm_modify_cgr(cgr, 0, &local_opts);
2316 if (ret)
2317 goto out;
2318 }
2319
2320 list_add(&cgr->node, &p->cgr_cbs);
2321
2322 /* Determine if newly added object requires its callback to be called */
2323 ret = qman_query_cgr(cgr, &cgr_state);
2324 if (ret) {
2325 /* we can't go back, so proceed and return success */
2326 dev_err(p->config->dev, "CGR HW state partially modified\n");
2327 ret = 0;
2328 goto out;
2329 }
2330 if (cgr->cb && cgr_state.cgr.cscn_en &&
2331 qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
2332 cgr->cb(p, cgr, 1);
2333out:
2334 spin_unlock(&p->cgr_lock);
2335 put_affine_portal();
2336 return ret;
2337}
2338EXPORT_SYMBOL(qman_create_cgr);
2339
2340int qman_delete_cgr(struct qman_cgr *cgr)
2341{
2342 unsigned long irqflags;
2343 struct qm_mcr_querycgr cgr_state;
2344 struct qm_mcc_initcgr local_opts;
2345 int ret = 0;
2346 struct qman_cgr *i;
2347 struct qman_portal *p = get_affine_portal();
2348
2349 if (cgr->chan != p->config->channel) {
2350 /* attempt to delete from other portal than creator */
2351 dev_err(p->config->dev, "CGR not owned by current portal");
2352 dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n",
2353 cgr->chan, p->config->channel);
2354
2355 ret = -EINVAL;
2356 goto put_portal;
2357 }
2358 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2359 spin_lock_irqsave(&p->cgr_lock, irqflags);
2360 list_del(&cgr->node);
2361 /*
2362 * If there are no other CGR objects for this CGRID in the list,
2363 * update CSCN_TARG accordingly
2364 */
2365 list_for_each_entry(i, &p->cgr_cbs, node)
2366 if (i->cgrid == cgr->cgrid && i->cb)
2367 goto release_lock;
2368 ret = qman_query_cgr(cgr, &cgr_state);
2369 if (ret) {
2370 /* add back to the list */
2371 list_add(&cgr->node, &p->cgr_cbs);
2372 goto release_lock;
2373 }
2374 /* Overwrite TARG */
2375 local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
2376 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2377 local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
2378 else
2379 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
2380 ~(TARG_MASK(p));
2381 ret = qm_modify_cgr(cgr, 0, &local_opts);
2382 if (ret)
2383 /* add back to the list */
2384 list_add(&cgr->node, &p->cgr_cbs);
2385release_lock:
2386 spin_unlock_irqrestore(&p->cgr_lock, irqflags);
2387put_portal:
2388 put_affine_portal();
2389 return ret;
2390}
2391EXPORT_SYMBOL(qman_delete_cgr);
2392
2393struct cgr_comp {
2394 struct qman_cgr *cgr;
2395 struct completion completion;
2396};
2397
2398static int qman_delete_cgr_thread(void *p)
2399{
2400 struct cgr_comp *cgr_comp = (struct cgr_comp *)p;
2401 int ret;
2402
2403 ret = qman_delete_cgr(cgr_comp->cgr);
2404 complete(&cgr_comp->completion);
2405
2406 return ret;
2407}
2408
2409void qman_delete_cgr_safe(struct qman_cgr *cgr)
2410{
2411 struct task_struct *thread;
2412 struct cgr_comp cgr_comp;
2413
2414 preempt_disable();
2415 if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
2416 init_completion(&cgr_comp.completion);
2417 cgr_comp.cgr = cgr;
2418 thread = kthread_create(qman_delete_cgr_thread, &cgr_comp,
2419 "cgr_del");
2420
2421 if (IS_ERR(thread))
2422 goto out;
2423
2424 kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]);
2425 wake_up_process(thread);
2426 wait_for_completion(&cgr_comp.completion);
2427 preempt_enable();
2428 return;
2429 }
2430out:
2431 qman_delete_cgr(cgr);
2432 preempt_enable();
2433}
2434EXPORT_SYMBOL(qman_delete_cgr_safe);
2435
2436/* Cleanup FQs */
2437
2438static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v)
2439{
2440 const union qm_mr_entry *msg;
2441 int found = 0;
2442
2443 qm_mr_pvb_update(p);
2444 msg = qm_mr_current(p);
2445 while (msg) {
2446 if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v)
2447 found = 1;
2448 qm_mr_next(p);
2449 qm_mr_cci_consume_to_current(p);
2450 qm_mr_pvb_update(p);
2451 msg = qm_mr_current(p);
2452 }
2453 return found;
2454}
2455
2456static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
2457 bool wait)
2458{
2459 const struct qm_dqrr_entry *dqrr;
2460 int found = 0;
2461
2462 do {
2463 qm_dqrr_pvb_update(p);
2464 dqrr = qm_dqrr_current(p);
2465 if (!dqrr)
2466 cpu_relax();
2467 } while (wait && !dqrr);
2468
2469 while (dqrr) {
Claudiu Manoild6753c72016-11-16 16:40:25 +02002470 if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s))
Claudiu Manoilc535e922016-09-22 18:04:09 +03002471 found = 1;
2472 qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
2473 qm_dqrr_pvb_update(p);
2474 qm_dqrr_next(p);
2475 dqrr = qm_dqrr_current(p);
2476 }
2477 return found;
2478}
2479
2480#define qm_mr_drain(p, V) \
2481 _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V)
2482
2483#define qm_dqrr_drain(p, f, S) \
2484 _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false)
2485
2486#define qm_dqrr_drain_wait(p, f, S) \
2487 _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true)
2488
2489#define qm_dqrr_drain_nomatch(p) \
2490 _qm_dqrr_consume_and_match(p, 0, 0, false)
2491
2492static int qman_shutdown_fq(u32 fqid)
2493{
2494 struct qman_portal *p;
2495 struct device *dev;
2496 union qm_mc_command *mcc;
2497 union qm_mc_result *mcr;
2498 int orl_empty, drain = 0, ret = 0;
2499 u32 channel, wq, res;
2500 u8 state;
2501
2502 p = get_affine_portal();
2503 dev = p->config->dev;
2504 /* Determine the state of the FQID */
2505 mcc = qm_mc_start(&p->p);
Claudiu Manoild6753c72016-11-16 16:40:25 +02002506 qm_fqid_set(&mcc->fq, fqid);
Claudiu Manoilc535e922016-09-22 18:04:09 +03002507 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
2508 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2509 dev_err(dev, "QUERYFQ_NP timeout\n");
2510 ret = -ETIMEDOUT;
2511 goto out;
2512 }
2513
2514 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2515 state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
2516 if (state == QM_MCR_NP_STATE_OOS)
2517 goto out; /* Already OOS, no need to do anymore checks */
2518
2519 /* Query which channel the FQ is using */
2520 mcc = qm_mc_start(&p->p);
Claudiu Manoild6753c72016-11-16 16:40:25 +02002521 qm_fqid_set(&mcc->fq, fqid);
Claudiu Manoilc535e922016-09-22 18:04:09 +03002522 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
2523 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2524 dev_err(dev, "QUERYFQ timeout\n");
2525 ret = -ETIMEDOUT;
2526 goto out;
2527 }
2528
2529 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2530 /* Need to store these since the MCR gets reused */
2531 channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
2532 wq = qm_fqd_get_wq(&mcr->queryfq.fqd);
2533
2534 switch (state) {
2535 case QM_MCR_NP_STATE_TEN_SCHED:
2536 case QM_MCR_NP_STATE_TRU_SCHED:
2537 case QM_MCR_NP_STATE_ACTIVE:
2538 case QM_MCR_NP_STATE_PARKED:
2539 orl_empty = 0;
2540 mcc = qm_mc_start(&p->p);
Claudiu Manoild6753c72016-11-16 16:40:25 +02002541 qm_fqid_set(&mcc->fq, fqid);
Claudiu Manoilc535e922016-09-22 18:04:09 +03002542 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
2543 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2544 dev_err(dev, "QUERYFQ_NP timeout\n");
2545 ret = -ETIMEDOUT;
2546 goto out;
2547 }
2548 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2549 QM_MCR_VERB_ALTER_RETIRE);
2550 res = mcr->result; /* Make a copy as we reuse MCR below */
2551
2552 if (res == QM_MCR_RESULT_PENDING) {
2553 /*
2554 * Need to wait for the FQRN in the message ring, which
2555 * will only occur once the FQ has been drained. In
2556 * order for the FQ to drain the portal needs to be set
2557 * to dequeue from the channel the FQ is scheduled on
2558 */
2559 int found_fqrn = 0;
2560 u16 dequeue_wq = 0;
2561
2562 /* Flag that we need to drain FQ */
2563 drain = 1;
2564
2565 if (channel >= qm_channel_pool1 &&
2566 channel < qm_channel_pool1 + 15) {
2567 /* Pool channel, enable the bit in the portal */
2568 dequeue_wq = (channel -
2569 qm_channel_pool1 + 1)<<4 | wq;
2570 } else if (channel < qm_channel_pool1) {
2571 /* Dedicated channel */
2572 dequeue_wq = wq;
2573 } else {
2574 dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x",
2575 fqid, channel);
2576 ret = -EBUSY;
2577 goto out;
2578 }
2579 /* Set the sdqcr to drain this channel */
2580 if (channel < qm_channel_pool1)
2581 qm_dqrr_sdqcr_set(&p->p,
2582 QM_SDQCR_TYPE_ACTIVE |
2583 QM_SDQCR_CHANNELS_DEDICATED);
2584 else
2585 qm_dqrr_sdqcr_set(&p->p,
2586 QM_SDQCR_TYPE_ACTIVE |
2587 QM_SDQCR_CHANNELS_POOL_CONV
2588 (channel));
2589 do {
2590 /* Keep draining DQRR while checking the MR*/
2591 qm_dqrr_drain_nomatch(&p->p);
2592 /* Process message ring too */
2593 found_fqrn = qm_mr_drain(&p->p, FQRN);
2594 cpu_relax();
2595 } while (!found_fqrn);
2596
2597 }
2598 if (res != QM_MCR_RESULT_OK &&
2599 res != QM_MCR_RESULT_PENDING) {
2600 dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n",
2601 fqid, res);
2602 ret = -EIO;
2603 goto out;
2604 }
2605 if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
2606 /*
2607 * ORL had no entries, no need to wait until the
2608 * ERNs come in
2609 */
2610 orl_empty = 1;
2611 }
2612 /*
2613 * Retirement succeeded, check to see if FQ needs
2614 * to be drained
2615 */
2616 if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
2617 /* FQ is Not Empty, drain using volatile DQ commands */
2618 do {
2619 u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
2620
2621 qm_dqrr_vdqcr_set(&p->p, vdqcr);
2622 /*
2623 * Wait for a dequeue and process the dequeues,
2624 * making sure to empty the ring completely
2625 */
2626 } while (qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
2627 }
2628 qm_dqrr_sdqcr_set(&p->p, 0);
2629
2630 while (!orl_empty) {
2631 /* Wait for the ORL to have been completely drained */
2632 orl_empty = qm_mr_drain(&p->p, FQRL);
2633 cpu_relax();
2634 }
2635 mcc = qm_mc_start(&p->p);
Claudiu Manoild6753c72016-11-16 16:40:25 +02002636 qm_fqid_set(&mcc->fq, fqid);
Claudiu Manoilc535e922016-09-22 18:04:09 +03002637 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2638 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2639 ret = -ETIMEDOUT;
2640 goto out;
2641 }
2642
2643 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2644 QM_MCR_VERB_ALTER_OOS);
2645 if (mcr->result != QM_MCR_RESULT_OK) {
2646 dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n",
2647 fqid, mcr->result);
2648 ret = -EIO;
2649 goto out;
2650 }
2651 break;
2652
2653 case QM_MCR_NP_STATE_RETIRED:
2654 /* Send OOS Command */
2655 mcc = qm_mc_start(&p->p);
Claudiu Manoild6753c72016-11-16 16:40:25 +02002656 qm_fqid_set(&mcc->fq, fqid);
Claudiu Manoilc535e922016-09-22 18:04:09 +03002657 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2658 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2659 ret = -ETIMEDOUT;
2660 goto out;
2661 }
2662
2663 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2664 QM_MCR_VERB_ALTER_OOS);
2665 if (mcr->result) {
2666 dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n",
2667 fqid, mcr->result);
2668 ret = -EIO;
2669 goto out;
2670 }
2671 break;
2672
2673 case QM_MCR_NP_STATE_OOS:
2674 /* Done */
2675 break;
2676
2677 default:
2678 ret = -EIO;
2679 }
2680
2681out:
2682 put_affine_portal();
2683 return ret;
2684}
2685
2686const struct qm_portal_config *qman_get_qm_portal_config(
2687 struct qman_portal *portal)
2688{
2689 return portal->config;
2690}
Claudiu Manoil021ba012016-11-16 16:40:22 +02002691EXPORT_SYMBOL(qman_get_qm_portal_config);
Claudiu Manoilc535e922016-09-22 18:04:09 +03002692
2693struct gen_pool *qm_fqalloc; /* FQID allocator */
2694struct gen_pool *qm_qpalloc; /* pool-channel allocator */
2695struct gen_pool *qm_cgralloc; /* CGR ID allocator */
2696
2697static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
2698{
2699 unsigned long addr;
2700
2701 addr = gen_pool_alloc(p, cnt);
2702 if (!addr)
2703 return -ENOMEM;
2704
2705 *result = addr & ~DPAA_GENALLOC_OFF;
2706
2707 return 0;
2708}
2709
2710int qman_alloc_fqid_range(u32 *result, u32 count)
2711{
2712 return qman_alloc_range(qm_fqalloc, result, count);
2713}
2714EXPORT_SYMBOL(qman_alloc_fqid_range);
2715
2716int qman_alloc_pool_range(u32 *result, u32 count)
2717{
2718 return qman_alloc_range(qm_qpalloc, result, count);
2719}
2720EXPORT_SYMBOL(qman_alloc_pool_range);
2721
2722int qman_alloc_cgrid_range(u32 *result, u32 count)
2723{
2724 return qman_alloc_range(qm_cgralloc, result, count);
2725}
2726EXPORT_SYMBOL(qman_alloc_cgrid_range);
2727
2728int qman_release_fqid(u32 fqid)
2729{
2730 int ret = qman_shutdown_fq(fqid);
2731
2732 if (ret) {
2733 pr_debug("FQID %d leaked\n", fqid);
2734 return ret;
2735 }
2736
2737 gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1);
2738 return 0;
2739}
2740EXPORT_SYMBOL(qman_release_fqid);
2741
2742static int qpool_cleanup(u32 qp)
2743{
2744 /*
2745 * We query all FQDs starting from
2746 * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
2747 * whose destination channel is the pool-channel being released.
2748 * When a non-OOS FQD is found we attempt to clean it up
2749 */
2750 struct qman_fq fq = {
2751 .fqid = QM_FQID_RANGE_START
2752 };
2753 int err;
2754
2755 do {
2756 struct qm_mcr_queryfq_np np;
2757
2758 err = qman_query_fq_np(&fq, &np);
Claudiu Manoild95cb0d32016-11-16 16:40:16 +02002759 if (err == -ERANGE)
Claudiu Manoilc535e922016-09-22 18:04:09 +03002760 /* FQID range exceeded, found no problems */
2761 return 0;
Claudiu Manoild95cb0d32016-11-16 16:40:16 +02002762 else if (WARN_ON(err))
2763 return err;
2764
Claudiu Manoilc535e922016-09-22 18:04:09 +03002765 if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
2766 struct qm_fqd fqd;
2767
2768 err = qman_query_fq(&fq, &fqd);
2769 if (WARN_ON(err))
Claudiu Manoild95cb0d32016-11-16 16:40:16 +02002770 return err;
Claudiu Manoilc535e922016-09-22 18:04:09 +03002771 if (qm_fqd_get_chan(&fqd) == qp) {
2772 /* The channel is the FQ's target, clean it */
2773 err = qman_shutdown_fq(fq.fqid);
2774 if (err)
2775 /*
2776 * Couldn't shut down the FQ
2777 * so the pool must be leaked
2778 */
2779 return err;
2780 }
2781 }
2782 /* Move to the next FQID */
2783 fq.fqid++;
2784 } while (1);
2785}
2786
2787int qman_release_pool(u32 qp)
2788{
2789 int ret;
2790
2791 ret = qpool_cleanup(qp);
2792 if (ret) {
2793 pr_debug("CHID %d leaked\n", qp);
2794 return ret;
2795 }
2796
2797 gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1);
2798 return 0;
2799}
2800EXPORT_SYMBOL(qman_release_pool);
2801
2802static int cgr_cleanup(u32 cgrid)
2803{
2804 /*
2805 * query all FQDs starting from FQID 1 until we get an "invalid FQID"
2806 * error, looking for non-OOS FQDs whose CGR is the CGR being released
2807 */
2808 struct qman_fq fq = {
Claudiu Manoild95cb0d32016-11-16 16:40:16 +02002809 .fqid = QM_FQID_RANGE_START
Claudiu Manoilc535e922016-09-22 18:04:09 +03002810 };
2811 int err;
2812
2813 do {
2814 struct qm_mcr_queryfq_np np;
2815
2816 err = qman_query_fq_np(&fq, &np);
Claudiu Manoild95cb0d32016-11-16 16:40:16 +02002817 if (err == -ERANGE)
Claudiu Manoilc535e922016-09-22 18:04:09 +03002818 /* FQID range exceeded, found no problems */
2819 return 0;
Claudiu Manoild95cb0d32016-11-16 16:40:16 +02002820 else if (WARN_ON(err))
2821 return err;
2822
Claudiu Manoilc535e922016-09-22 18:04:09 +03002823 if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
2824 struct qm_fqd fqd;
2825
2826 err = qman_query_fq(&fq, &fqd);
2827 if (WARN_ON(err))
Claudiu Manoild95cb0d32016-11-16 16:40:16 +02002828 return err;
Claudiu Manoilc535e922016-09-22 18:04:09 +03002829 if ((fqd.fq_ctrl & QM_FQCTRL_CGE) &&
2830 fqd.cgid == cgrid) {
2831 pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
2832 cgrid, fq.fqid);
2833 return -EIO;
2834 }
2835 }
2836 /* Move to the next FQID */
2837 fq.fqid++;
2838 } while (1);
2839}
2840
2841int qman_release_cgrid(u32 cgrid)
2842{
2843 int ret;
2844
2845 ret = cgr_cleanup(cgrid);
2846 if (ret) {
2847 pr_debug("CGRID %d leaked\n", cgrid);
2848 return ret;
2849 }
2850
2851 gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1);
2852 return 0;
2853}
2854EXPORT_SYMBOL(qman_release_cgrid);