blob: 0d2e3bcaaf9e9473c6cce86158a1d0a187f825a1 [file] [log] [blame]
Claudiu Manoilc535e922016-09-22 18:04:09 +03001/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
17 * later version.
18 *
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include "qman_priv.h"
32
33#define DQRR_MAXFILL 15
34#define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
35#define IRQNAME "QMan portal %d"
36#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
37#define QMAN_POLL_LIMIT 32
38#define QMAN_PIRQ_DQRR_ITHRESH 12
39#define QMAN_PIRQ_MR_ITHRESH 4
40#define QMAN_PIRQ_IPERIOD 100
41
42/* Portal register assists */
43
44/* Cache-inhibited register offsets */
45#define QM_REG_EQCR_PI_CINH 0x0000
46#define QM_REG_EQCR_CI_CINH 0x0004
47#define QM_REG_EQCR_ITR 0x0008
48#define QM_REG_DQRR_PI_CINH 0x0040
49#define QM_REG_DQRR_CI_CINH 0x0044
50#define QM_REG_DQRR_ITR 0x0048
51#define QM_REG_DQRR_DCAP 0x0050
52#define QM_REG_DQRR_SDQCR 0x0054
53#define QM_REG_DQRR_VDQCR 0x0058
54#define QM_REG_DQRR_PDQCR 0x005c
55#define QM_REG_MR_PI_CINH 0x0080
56#define QM_REG_MR_CI_CINH 0x0084
57#define QM_REG_MR_ITR 0x0088
58#define QM_REG_CFG 0x0100
59#define QM_REG_ISR 0x0e00
60#define QM_REG_IER 0x0e04
61#define QM_REG_ISDR 0x0e08
62#define QM_REG_IIR 0x0e0c
63#define QM_REG_ITPR 0x0e14
64
65/* Cache-enabled register offsets */
66#define QM_CL_EQCR 0x0000
67#define QM_CL_DQRR 0x1000
68#define QM_CL_MR 0x2000
69#define QM_CL_EQCR_PI_CENA 0x3000
70#define QM_CL_EQCR_CI_CENA 0x3100
71#define QM_CL_DQRR_PI_CENA 0x3200
72#define QM_CL_DQRR_CI_CENA 0x3300
73#define QM_CL_MR_PI_CENA 0x3400
74#define QM_CL_MR_CI_CENA 0x3500
75#define QM_CL_CR 0x3800
76#define QM_CL_RR0 0x3900
77#define QM_CL_RR1 0x3940
78
79/*
80 * BTW, the drivers (and h/w programming model) already obtain the required
81 * synchronisation for portal accesses and data-dependencies. Use of barrier()s
82 * or other order-preserving primitives simply degrade performance. Hence the
83 * use of the __raw_*() interfaces, which simply ensure that the compiler treats
84 * the portal registers as volatile
85 */
86
87/* Cache-enabled ring access */
88#define qm_cl(base, idx) ((void *)base + ((idx) << 6))
89
90/*
91 * Portal modes.
92 * Enum types;
93 * pmode == production mode
94 * cmode == consumption mode,
95 * dmode == h/w dequeue mode.
96 * Enum values use 3 letter codes. First letter matches the portal mode,
97 * remaining two letters indicate;
98 * ci == cache-inhibited portal register
99 * ce == cache-enabled portal register
100 * vb == in-band valid-bit (cache-enabled)
101 * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
102 * As for "enum qm_dqrr_dmode", it should be self-explanatory.
103 */
104enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */
105 qm_eqcr_pci = 0, /* PI index, cache-inhibited */
106 qm_eqcr_pce = 1, /* PI index, cache-enabled */
107 qm_eqcr_pvb = 2 /* valid-bit */
108};
109enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */
110 qm_dqrr_dpush = 0, /* SDQCR + VDQCR */
111 qm_dqrr_dpull = 1 /* PDQCR */
112};
113enum qm_dqrr_pmode { /* s/w-only */
114 qm_dqrr_pci, /* reads DQRR_PI_CINH */
115 qm_dqrr_pce, /* reads DQRR_PI_CENA */
116 qm_dqrr_pvb /* reads valid-bit */
117};
118enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */
119 qm_dqrr_cci = 0, /* CI index, cache-inhibited */
120 qm_dqrr_cce = 1, /* CI index, cache-enabled */
121 qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */
122};
123enum qm_mr_pmode { /* s/w-only */
124 qm_mr_pci, /* reads MR_PI_CINH */
125 qm_mr_pce, /* reads MR_PI_CENA */
126 qm_mr_pvb /* reads valid-bit */
127};
128enum qm_mr_cmode { /* matches QCSP_CFG::MM */
129 qm_mr_cci = 0, /* CI index, cache-inhibited */
130 qm_mr_cce = 1 /* CI index, cache-enabled */
131};
132
133/* --- Portal structures --- */
134
135#define QM_EQCR_SIZE 8
136#define QM_DQRR_SIZE 16
137#define QM_MR_SIZE 8
138
139/* "Enqueue Command" */
140struct qm_eqcr_entry {
141 u8 _ncw_verb; /* writes to this are non-coherent */
142 u8 dca;
143 u16 seqnum;
144 u32 orp; /* 24-bit */
145 u32 fqid; /* 24-bit */
146 u32 tag;
147 struct qm_fd fd;
148 u8 __reserved3[32];
149} __packed;
150#define QM_EQCR_VERB_VBIT 0x80
151#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
152#define QM_EQCR_VERB_CMD_ENQUEUE 0x01
153#define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */
154#define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */
155#define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */
156
157struct qm_eqcr {
158 struct qm_eqcr_entry *ring, *cursor;
159 u8 ci, available, ithresh, vbit;
160#ifdef CONFIG_FSL_DPAA_CHECKING
161 u32 busy;
162 enum qm_eqcr_pmode pmode;
163#endif
164};
165
166struct qm_dqrr {
167 const struct qm_dqrr_entry *ring, *cursor;
168 u8 pi, ci, fill, ithresh, vbit;
169#ifdef CONFIG_FSL_DPAA_CHECKING
170 enum qm_dqrr_dmode dmode;
171 enum qm_dqrr_pmode pmode;
172 enum qm_dqrr_cmode cmode;
173#endif
174};
175
176struct qm_mr {
177 union qm_mr_entry *ring, *cursor;
178 u8 pi, ci, fill, ithresh, vbit;
179#ifdef CONFIG_FSL_DPAA_CHECKING
180 enum qm_mr_pmode pmode;
181 enum qm_mr_cmode cmode;
182#endif
183};
184
185/* MC (Management Command) command */
Claudiu Manoil7ff07da2016-11-16 16:40:24 +0200186/* "FQ" command layout */
187struct qm_mcc_fq {
Claudiu Manoilc535e922016-09-22 18:04:09 +0300188 u8 _ncw_verb;
189 u8 __reserved1[3];
190 u32 fqid; /* 24-bit */
191 u8 __reserved2[56];
192} __packed;
Claudiu Manoilc535e922016-09-22 18:04:09 +0300193
Claudiu Manoil7ff07da2016-11-16 16:40:24 +0200194/* "CGR" command layout */
195struct qm_mcc_cgr {
Claudiu Manoilc535e922016-09-22 18:04:09 +0300196 u8 _ncw_verb;
197 u8 __reserved1[30];
198 u8 cgid;
199 u8 __reserved2[32];
200};
201
Claudiu Manoilc535e922016-09-22 18:04:09 +0300202#define QM_MCC_VERB_VBIT 0x80
203#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
204#define QM_MCC_VERB_INITFQ_PARKED 0x40
205#define QM_MCC_VERB_INITFQ_SCHED 0x41
206#define QM_MCC_VERB_QUERYFQ 0x44
207#define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */
208#define QM_MCC_VERB_QUERYWQ 0x46
209#define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47
210#define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */
211#define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */
212#define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */
213#define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */
214#define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */
215#define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */
216#define QM_MCC_VERB_INITCGR 0x50
217#define QM_MCC_VERB_MODIFYCGR 0x51
218#define QM_MCC_VERB_CGRTESTWRITE 0x52
219#define QM_MCC_VERB_QUERYCGR 0x58
220#define QM_MCC_VERB_QUERYCONGESTION 0x59
221union qm_mc_command {
222 struct {
223 u8 _ncw_verb; /* writes to this are non-coherent */
224 u8 __reserved[63];
225 };
226 struct qm_mcc_initfq initfq;
Claudiu Manoilc535e922016-09-22 18:04:09 +0300227 struct qm_mcc_initcgr initcgr;
Claudiu Manoil7ff07da2016-11-16 16:40:24 +0200228 struct qm_mcc_fq fq;
229 struct qm_mcc_cgr cgr;
Claudiu Manoilc535e922016-09-22 18:04:09 +0300230};
231
232/* MC (Management Command) result */
233/* "Query FQ" */
234struct qm_mcr_queryfq {
235 u8 verb;
236 u8 result;
237 u8 __reserved1[8];
238 struct qm_fqd fqd; /* the FQD fields are here */
239 u8 __reserved2[30];
240} __packed;
241
242/* "Alter FQ State Commands" */
243struct qm_mcr_alterfq {
244 u8 verb;
245 u8 result;
246 u8 fqs; /* Frame Queue Status */
247 u8 __reserved1[61];
248};
249#define QM_MCR_VERB_RRID 0x80
250#define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
251#define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
252#define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
253#define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
254#define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
255#define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
256#define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
257#define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
258#define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
259#define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
260#define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
261#define QM_MCR_RESULT_NULL 0x00
262#define QM_MCR_RESULT_OK 0xf0
263#define QM_MCR_RESULT_ERR_FQID 0xf1
264#define QM_MCR_RESULT_ERR_FQSTATE 0xf2
265#define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */
266#define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
267#define QM_MCR_RESULT_PENDING 0xf8
268#define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
269#define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
270#define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
271#define QM_MCR_TIMEOUT 10000 /* us */
272union qm_mc_result {
273 struct {
274 u8 verb;
275 u8 result;
276 u8 __reserved1[62];
277 };
278 struct qm_mcr_queryfq queryfq;
279 struct qm_mcr_alterfq alterfq;
280 struct qm_mcr_querycgr querycgr;
281 struct qm_mcr_querycongestion querycongestion;
282 struct qm_mcr_querywq querywq;
283 struct qm_mcr_queryfq_np queryfq_np;
284};
285
286struct qm_mc {
287 union qm_mc_command *cr;
288 union qm_mc_result *rr;
289 u8 rridx, vbit;
290#ifdef CONFIG_FSL_DPAA_CHECKING
291 enum {
292 /* Can be _mc_start()ed */
293 qman_mc_idle,
294 /* Can be _mc_commit()ed or _mc_abort()ed */
295 qman_mc_user,
296 /* Can only be _mc_retry()ed */
297 qman_mc_hw
298 } state;
299#endif
300};
301
302struct qm_addr {
303 void __iomem *ce; /* cache-enabled */
304 void __iomem *ci; /* cache-inhibited */
305};
306
307struct qm_portal {
308 /*
309 * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to
310 * and including 'mc' fits within a cacheline (yay!). The 'config' part
311 * is setup-only, so isn't a cause for a concern. In other words, don't
312 * rearrange this structure on a whim, there be dragons ...
313 */
314 struct qm_addr addr;
315 struct qm_eqcr eqcr;
316 struct qm_dqrr dqrr;
317 struct qm_mr mr;
318 struct qm_mc mc;
319} ____cacheline_aligned;
320
321/* Cache-inhibited register access. */
322static inline u32 qm_in(struct qm_portal *p, u32 offset)
323{
324 return __raw_readl(p->addr.ci + offset);
325}
326
327static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
328{
329 __raw_writel(val, p->addr.ci + offset);
330}
331
332/* Cache Enabled Portal Access */
333static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset)
334{
335 dpaa_invalidate(p->addr.ce + offset);
336}
337
338static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
339{
340 dpaa_touch_ro(p->addr.ce + offset);
341}
342
343static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
344{
345 return __raw_readl(p->addr.ce + offset);
346}
347
348/* --- EQCR API --- */
349
350#define EQCR_SHIFT ilog2(sizeof(struct qm_eqcr_entry))
351#define EQCR_CARRY (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT)
352
353/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
354static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p)
355{
356 uintptr_t addr = (uintptr_t)p;
357
358 addr &= ~EQCR_CARRY;
359
360 return (struct qm_eqcr_entry *)addr;
361}
362
363/* Bit-wise logic to convert a ring pointer to a ring index */
364static int eqcr_ptr2idx(struct qm_eqcr_entry *e)
365{
366 return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1);
367}
368
369/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
370static inline void eqcr_inc(struct qm_eqcr *eqcr)
371{
372 /* increment to the next EQCR pointer and handle overflow and 'vbit' */
373 struct qm_eqcr_entry *partial = eqcr->cursor + 1;
374
375 eqcr->cursor = eqcr_carryclear(partial);
376 if (partial != eqcr->cursor)
377 eqcr->vbit ^= QM_EQCR_VERB_VBIT;
378}
379
380static inline int qm_eqcr_init(struct qm_portal *portal,
381 enum qm_eqcr_pmode pmode,
382 unsigned int eq_stash_thresh,
383 int eq_stash_prio)
384{
385 struct qm_eqcr *eqcr = &portal->eqcr;
386 u32 cfg;
387 u8 pi;
388
389 eqcr->ring = portal->addr.ce + QM_CL_EQCR;
390 eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
391 qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
392 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
393 eqcr->cursor = eqcr->ring + pi;
394 eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ?
395 QM_EQCR_VERB_VBIT : 0;
396 eqcr->available = QM_EQCR_SIZE - 1 -
397 dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
398 eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR);
399#ifdef CONFIG_FSL_DPAA_CHECKING
400 eqcr->busy = 0;
401 eqcr->pmode = pmode;
402#endif
403 cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) |
404 (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
405 (eq_stash_prio << 26) | /* QCSP_CFG: EP */
406 ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
407 qm_out(portal, QM_REG_CFG, cfg);
408 return 0;
409}
410
411static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal)
412{
413 return (qm_in(portal, QM_REG_CFG) >> 28) & 0x7;
414}
415
416static inline void qm_eqcr_finish(struct qm_portal *portal)
417{
418 struct qm_eqcr *eqcr = &portal->eqcr;
419 u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
420 u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
421
422 DPAA_ASSERT(!eqcr->busy);
423 if (pi != eqcr_ptr2idx(eqcr->cursor))
Colin Ian King57907a72016-11-12 17:01:30 +0000424 pr_crit("losing uncommitted EQCR entries\n");
Claudiu Manoilc535e922016-09-22 18:04:09 +0300425 if (ci != eqcr->ci)
426 pr_crit("missing existing EQCR completions\n");
427 if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
428 pr_crit("EQCR destroyed unquiesced\n");
429}
430
431static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
432 *portal)
433{
434 struct qm_eqcr *eqcr = &portal->eqcr;
435
436 DPAA_ASSERT(!eqcr->busy);
437 if (!eqcr->available)
438 return NULL;
439
440#ifdef CONFIG_FSL_DPAA_CHECKING
441 eqcr->busy = 1;
442#endif
443 dpaa_zero(eqcr->cursor);
444 return eqcr->cursor;
445}
446
447static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
448 *portal)
449{
450 struct qm_eqcr *eqcr = &portal->eqcr;
451 u8 diff, old_ci;
452
453 DPAA_ASSERT(!eqcr->busy);
454 if (!eqcr->available) {
455 old_ci = eqcr->ci;
456 eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) &
457 (QM_EQCR_SIZE - 1);
458 diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
459 eqcr->available += diff;
460 if (!diff)
461 return NULL;
462 }
463#ifdef CONFIG_FSL_DPAA_CHECKING
464 eqcr->busy = 1;
465#endif
466 dpaa_zero(eqcr->cursor);
467 return eqcr->cursor;
468}
469
470static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
471{
472 DPAA_ASSERT(eqcr->busy);
473 DPAA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff));
474 DPAA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff));
475 DPAA_ASSERT(eqcr->available >= 1);
476}
477
478static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
479{
480 struct qm_eqcr *eqcr = &portal->eqcr;
481 struct qm_eqcr_entry *eqcursor;
482
483 eqcr_commit_checks(eqcr);
484 DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
485 dma_wmb();
486 eqcursor = eqcr->cursor;
487 eqcursor->_ncw_verb = myverb | eqcr->vbit;
488 dpaa_flush(eqcursor);
489 eqcr_inc(eqcr);
490 eqcr->available--;
491#ifdef CONFIG_FSL_DPAA_CHECKING
492 eqcr->busy = 0;
493#endif
494}
495
496static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
497{
498 qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA);
499}
500
501static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
502{
503 struct qm_eqcr *eqcr = &portal->eqcr;
504 u8 diff, old_ci = eqcr->ci;
505
506 eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1);
507 qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
508 diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
509 eqcr->available += diff;
510 return diff;
511}
512
513static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
514{
515 struct qm_eqcr *eqcr = &portal->eqcr;
516
517 eqcr->ithresh = ithresh;
518 qm_out(portal, QM_REG_EQCR_ITR, ithresh);
519}
520
521static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
522{
523 struct qm_eqcr *eqcr = &portal->eqcr;
524
525 return eqcr->available;
526}
527
528static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
529{
530 struct qm_eqcr *eqcr = &portal->eqcr;
531
532 return QM_EQCR_SIZE - 1 - eqcr->available;
533}
534
535/* --- DQRR API --- */
536
537#define DQRR_SHIFT ilog2(sizeof(struct qm_dqrr_entry))
538#define DQRR_CARRY (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT)
539
540static const struct qm_dqrr_entry *dqrr_carryclear(
541 const struct qm_dqrr_entry *p)
542{
543 uintptr_t addr = (uintptr_t)p;
544
545 addr &= ~DQRR_CARRY;
546
547 return (const struct qm_dqrr_entry *)addr;
548}
549
550static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e)
551{
552 return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1);
553}
554
555static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e)
556{
557 return dqrr_carryclear(e + 1);
558}
559
560static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
561{
562 qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) |
563 ((mf & (QM_DQRR_SIZE - 1)) << 20));
564}
565
566static inline int qm_dqrr_init(struct qm_portal *portal,
567 const struct qm_portal_config *config,
568 enum qm_dqrr_dmode dmode,
569 enum qm_dqrr_pmode pmode,
570 enum qm_dqrr_cmode cmode, u8 max_fill)
571{
572 struct qm_dqrr *dqrr = &portal->dqrr;
573 u32 cfg;
574
575 /* Make sure the DQRR will be idle when we enable */
576 qm_out(portal, QM_REG_DQRR_SDQCR, 0);
577 qm_out(portal, QM_REG_DQRR_VDQCR, 0);
578 qm_out(portal, QM_REG_DQRR_PDQCR, 0);
579 dqrr->ring = portal->addr.ce + QM_CL_DQRR;
580 dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
581 dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
582 dqrr->cursor = dqrr->ring + dqrr->ci;
583 dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
584 dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ?
585 QM_DQRR_VERB_VBIT : 0;
586 dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR);
587#ifdef CONFIG_FSL_DPAA_CHECKING
588 dqrr->dmode = dmode;
589 dqrr->pmode = pmode;
590 dqrr->cmode = cmode;
591#endif
592 /* Invalidate every ring entry before beginning */
593 for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
594 dpaa_invalidate(qm_cl(dqrr->ring, cfg));
595 cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) |
596 ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
597 ((dmode & 1) << 18) | /* DP */
598 ((cmode & 3) << 16) | /* DCM */
599 0xa0 | /* RE+SE */
600 (0 ? 0x40 : 0) | /* Ignore RP */
601 (0 ? 0x10 : 0); /* Ignore SP */
602 qm_out(portal, QM_REG_CFG, cfg);
603 qm_dqrr_set_maxfill(portal, max_fill);
604 return 0;
605}
606
607static inline void qm_dqrr_finish(struct qm_portal *portal)
608{
609#ifdef CONFIG_FSL_DPAA_CHECKING
610 struct qm_dqrr *dqrr = &portal->dqrr;
611
612 if (dqrr->cmode != qm_dqrr_cdc &&
613 dqrr->ci != dqrr_ptr2idx(dqrr->cursor))
614 pr_crit("Ignoring completed DQRR entries\n");
615#endif
616}
617
618static inline const struct qm_dqrr_entry *qm_dqrr_current(
619 struct qm_portal *portal)
620{
621 struct qm_dqrr *dqrr = &portal->dqrr;
622
623 if (!dqrr->fill)
624 return NULL;
625 return dqrr->cursor;
626}
627
628static inline u8 qm_dqrr_next(struct qm_portal *portal)
629{
630 struct qm_dqrr *dqrr = &portal->dqrr;
631
632 DPAA_ASSERT(dqrr->fill);
633 dqrr->cursor = dqrr_inc(dqrr->cursor);
634 return --dqrr->fill;
635}
636
637static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
638{
639 struct qm_dqrr *dqrr = &portal->dqrr;
640 struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
641
642 DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
643#ifndef CONFIG_FSL_PAMU
644 /*
645 * If PAMU is not available we need to invalidate the cache.
646 * When PAMU is available the cache is updated by stash
647 */
648 dpaa_invalidate_touch_ro(res);
649#endif
650 /*
651 * when accessing 'verb', use __raw_readb() to ensure that compiler
652 * inlining doesn't try to optimise out "excess reads".
653 */
654 if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
655 dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
656 if (!dqrr->pi)
657 dqrr->vbit ^= QM_DQRR_VERB_VBIT;
658 dqrr->fill++;
659 }
660}
661
662static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
663 const struct qm_dqrr_entry *dq,
664 int park)
665{
666 __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
667 int idx = dqrr_ptr2idx(dq);
668
669 DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
670 DPAA_ASSERT((dqrr->ring + idx) == dq);
671 DPAA_ASSERT(idx < QM_DQRR_SIZE);
672 qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
673 ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */
674 idx); /* DQRR_DCAP::DCAP_CI */
675}
676
677static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask)
678{
679 __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
680
681 DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
682 qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
683 (bitmask << 16)); /* DQRR_DCAP::DCAP_CI */
684}
685
686static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
687{
688 qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr);
689}
690
691static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
692{
693 qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr);
694}
695
696static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
697{
698 qm_out(portal, QM_REG_DQRR_ITR, ithresh);
699}
700
701/* --- MR API --- */
702
703#define MR_SHIFT ilog2(sizeof(union qm_mr_entry))
704#define MR_CARRY (uintptr_t)(QM_MR_SIZE << MR_SHIFT)
705
706static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p)
707{
708 uintptr_t addr = (uintptr_t)p;
709
710 addr &= ~MR_CARRY;
711
712 return (union qm_mr_entry *)addr;
713}
714
715static inline int mr_ptr2idx(const union qm_mr_entry *e)
716{
717 return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1);
718}
719
720static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e)
721{
722 return mr_carryclear(e + 1);
723}
724
725static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
726 enum qm_mr_cmode cmode)
727{
728 struct qm_mr *mr = &portal->mr;
729 u32 cfg;
730
731 mr->ring = portal->addr.ce + QM_CL_MR;
732 mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1);
733 mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1);
734 mr->cursor = mr->ring + mr->ci;
735 mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
736 mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE)
737 ? QM_MR_VERB_VBIT : 0;
738 mr->ithresh = qm_in(portal, QM_REG_MR_ITR);
739#ifdef CONFIG_FSL_DPAA_CHECKING
740 mr->pmode = pmode;
741 mr->cmode = cmode;
742#endif
743 cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) |
744 ((cmode & 1) << 8); /* QCSP_CFG:MM */
745 qm_out(portal, QM_REG_CFG, cfg);
746 return 0;
747}
748
749static inline void qm_mr_finish(struct qm_portal *portal)
750{
751 struct qm_mr *mr = &portal->mr;
752
753 if (mr->ci != mr_ptr2idx(mr->cursor))
754 pr_crit("Ignoring completed MR entries\n");
755}
756
757static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal)
758{
759 struct qm_mr *mr = &portal->mr;
760
761 if (!mr->fill)
762 return NULL;
763 return mr->cursor;
764}
765
766static inline int qm_mr_next(struct qm_portal *portal)
767{
768 struct qm_mr *mr = &portal->mr;
769
770 DPAA_ASSERT(mr->fill);
771 mr->cursor = mr_inc(mr->cursor);
772 return --mr->fill;
773}
774
775static inline void qm_mr_pvb_update(struct qm_portal *portal)
776{
777 struct qm_mr *mr = &portal->mr;
778 union qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
779
780 DPAA_ASSERT(mr->pmode == qm_mr_pvb);
781 /*
782 * when accessing 'verb', use __raw_readb() to ensure that compiler
783 * inlining doesn't try to optimise out "excess reads".
784 */
785 if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
786 mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
787 if (!mr->pi)
788 mr->vbit ^= QM_MR_VERB_VBIT;
789 mr->fill++;
790 res = mr_inc(res);
791 }
792 dpaa_invalidate_touch_ro(res);
793}
794
795static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
796{
797 struct qm_mr *mr = &portal->mr;
798
799 DPAA_ASSERT(mr->cmode == qm_mr_cci);
800 mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
801 qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
802}
803
804static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
805{
806 struct qm_mr *mr = &portal->mr;
807
808 DPAA_ASSERT(mr->cmode == qm_mr_cci);
809 mr->ci = mr_ptr2idx(mr->cursor);
810 qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
811}
812
813static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
814{
815 qm_out(portal, QM_REG_MR_ITR, ithresh);
816}
817
818/* --- Management command API --- */
819
820static inline int qm_mc_init(struct qm_portal *portal)
821{
822 struct qm_mc *mc = &portal->mc;
823
824 mc->cr = portal->addr.ce + QM_CL_CR;
825 mc->rr = portal->addr.ce + QM_CL_RR0;
826 mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & QM_MCC_VERB_VBIT)
827 ? 0 : 1;
828 mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
829#ifdef CONFIG_FSL_DPAA_CHECKING
830 mc->state = qman_mc_idle;
831#endif
832 return 0;
833}
834
835static inline void qm_mc_finish(struct qm_portal *portal)
836{
837#ifdef CONFIG_FSL_DPAA_CHECKING
838 struct qm_mc *mc = &portal->mc;
839
840 DPAA_ASSERT(mc->state == qman_mc_idle);
841 if (mc->state != qman_mc_idle)
842 pr_crit("Losing incomplete MC command\n");
843#endif
844}
845
846static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal)
847{
848 struct qm_mc *mc = &portal->mc;
849
850 DPAA_ASSERT(mc->state == qman_mc_idle);
851#ifdef CONFIG_FSL_DPAA_CHECKING
852 mc->state = qman_mc_user;
853#endif
854 dpaa_zero(mc->cr);
855 return mc->cr;
856}
857
858static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
859{
860 struct qm_mc *mc = &portal->mc;
861 union qm_mc_result *rr = mc->rr + mc->rridx;
862
863 DPAA_ASSERT(mc->state == qman_mc_user);
864 dma_wmb();
865 mc->cr->_ncw_verb = myverb | mc->vbit;
866 dpaa_flush(mc->cr);
867 dpaa_invalidate_touch_ro(rr);
868#ifdef CONFIG_FSL_DPAA_CHECKING
869 mc->state = qman_mc_hw;
870#endif
871}
872
873static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal)
874{
875 struct qm_mc *mc = &portal->mc;
876 union qm_mc_result *rr = mc->rr + mc->rridx;
877
878 DPAA_ASSERT(mc->state == qman_mc_hw);
879 /*
880 * The inactive response register's verb byte always returns zero until
881 * its command is submitted and completed. This includes the valid-bit,
882 * in case you were wondering...
883 */
884 if (!__raw_readb(&rr->verb)) {
885 dpaa_invalidate_touch_ro(rr);
886 return NULL;
887 }
888 mc->rridx ^= 1;
889 mc->vbit ^= QM_MCC_VERB_VBIT;
890#ifdef CONFIG_FSL_DPAA_CHECKING
891 mc->state = qman_mc_idle;
892#endif
893 return rr;
894}
895
896static inline int qm_mc_result_timeout(struct qm_portal *portal,
897 union qm_mc_result **mcr)
898{
899 int timeout = QM_MCR_TIMEOUT;
900
901 do {
902 *mcr = qm_mc_result(portal);
903 if (*mcr)
904 break;
905 udelay(1);
906 } while (--timeout);
907
908 return timeout;
909}
910
911static inline void fq_set(struct qman_fq *fq, u32 mask)
912{
913 set_bits(mask, &fq->flags);
914}
915
916static inline void fq_clear(struct qman_fq *fq, u32 mask)
917{
918 clear_bits(mask, &fq->flags);
919}
920
921static inline int fq_isset(struct qman_fq *fq, u32 mask)
922{
923 return fq->flags & mask;
924}
925
926static inline int fq_isclear(struct qman_fq *fq, u32 mask)
927{
928 return !(fq->flags & mask);
929}
930
931struct qman_portal {
932 struct qm_portal p;
933 /* PORTAL_BITS_*** - dynamic, strictly internal */
934 unsigned long bits;
935 /* interrupt sources processed by portal_isr(), configurable */
936 unsigned long irq_sources;
937 u32 use_eqcr_ci_stashing;
938 /* only 1 volatile dequeue at a time */
939 struct qman_fq *vdqcr_owned;
940 u32 sdqcr;
941 /* probing time config params for cpu-affine portals */
942 const struct qm_portal_config *config;
Claudiu Manoilc535e922016-09-22 18:04:09 +0300943 /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
944 struct qman_cgrs *cgrs;
945 /* linked-list of CSCN handlers. */
946 struct list_head cgr_cbs;
947 /* list lock */
948 spinlock_t cgr_lock;
949 struct work_struct congestion_work;
950 struct work_struct mr_work;
951 char irqname[MAX_IRQNAME];
952};
953
954static cpumask_t affine_mask;
955static DEFINE_SPINLOCK(affine_mask_lock);
956static u16 affine_channels[NR_CPUS];
957static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
958struct qman_portal *affine_portals[NR_CPUS];
959
960static inline struct qman_portal *get_affine_portal(void)
961{
962 return &get_cpu_var(qman_affine_portal);
963}
964
965static inline void put_affine_portal(void)
966{
967 put_cpu_var(qman_affine_portal);
968}
969
970static struct workqueue_struct *qm_portal_wq;
971
972int qman_wq_alloc(void)
973{
974 qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1);
975 if (!qm_portal_wq)
976 return -ENOMEM;
977 return 0;
978}
979
980/*
981 * This is what everything can wait on, even if it migrates to a different cpu
982 * to the one whose affine portal it is waiting on.
983 */
984static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
985
986static struct qman_fq **fq_table;
987static u32 num_fqids;
988
989int qman_alloc_fq_table(u32 _num_fqids)
990{
991 num_fqids = _num_fqids;
992
993 fq_table = vzalloc(num_fqids * 2 * sizeof(struct qman_fq *));
994 if (!fq_table)
995 return -ENOMEM;
996
997 pr_debug("Allocated fq lookup table at %p, entry count %u\n",
998 fq_table, num_fqids * 2);
999 return 0;
1000}
1001
1002static struct qman_fq *idx_to_fq(u32 idx)
1003{
1004 struct qman_fq *fq;
1005
1006#ifdef CONFIG_FSL_DPAA_CHECKING
1007 if (WARN_ON(idx >= num_fqids * 2))
1008 return NULL;
1009#endif
1010 fq = fq_table[idx];
1011 DPAA_ASSERT(!fq || idx == fq->idx);
1012
1013 return fq;
1014}
1015
1016/*
1017 * Only returns full-service fq objects, not enqueue-only
1018 * references (QMAN_FQ_FLAG_NO_MODIFY).
1019 */
1020static struct qman_fq *fqid_to_fq(u32 fqid)
1021{
1022 return idx_to_fq(fqid * 2);
1023}
1024
1025static struct qman_fq *tag_to_fq(u32 tag)
1026{
1027#if BITS_PER_LONG == 64
1028 return idx_to_fq(tag);
1029#else
1030 return (struct qman_fq *)tag;
1031#endif
1032}
1033
1034static u32 fq_to_tag(struct qman_fq *fq)
1035{
1036#if BITS_PER_LONG == 64
1037 return fq->idx;
1038#else
1039 return (u32)fq;
1040#endif
1041}
1042
1043static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
1044static inline unsigned int __poll_portal_fast(struct qman_portal *p,
1045 unsigned int poll_limit);
1046static void qm_congestion_task(struct work_struct *work);
1047static void qm_mr_process_task(struct work_struct *work);
1048
1049static irqreturn_t portal_isr(int irq, void *ptr)
1050{
1051 struct qman_portal *p = ptr;
1052
1053 u32 clear = QM_DQAVAIL_MASK | p->irq_sources;
1054 u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
1055
1056 if (unlikely(!is))
1057 return IRQ_NONE;
1058
1059 /* DQRR-handling if it's interrupt-driven */
1060 if (is & QM_PIRQ_DQRI)
1061 __poll_portal_fast(p, QMAN_POLL_LIMIT);
1062 /* Handling of anything else that's interrupt-driven */
1063 clear |= __poll_portal_slow(p, is);
1064 qm_out(&p->p, QM_REG_ISR, clear);
1065 return IRQ_HANDLED;
1066}
1067
1068static int drain_mr_fqrni(struct qm_portal *p)
1069{
1070 const union qm_mr_entry *msg;
1071loop:
1072 msg = qm_mr_current(p);
1073 if (!msg) {
1074 /*
1075 * if MR was full and h/w had other FQRNI entries to produce, we
1076 * need to allow it time to produce those entries once the
1077 * existing entries are consumed. A worst-case situation
1078 * (fully-loaded system) means h/w sequencers may have to do 3-4
1079 * other things before servicing the portal's MR pump, each of
1080 * which (if slow) may take ~50 qman cycles (which is ~200
1081 * processor cycles). So rounding up and then multiplying this
1082 * worst-case estimate by a factor of 10, just to be
1083 * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
1084 * one entry at a time, so h/w has an opportunity to produce new
1085 * entries well before the ring has been fully consumed, so
1086 * we're being *really* paranoid here.
1087 */
1088 u64 now, then = jiffies;
1089
1090 do {
1091 now = jiffies;
1092 } while ((then + 10000) > now);
1093 msg = qm_mr_current(p);
1094 if (!msg)
1095 return 0;
1096 }
1097 if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
1098 /* We aren't draining anything but FQRNIs */
1099 pr_err("Found verb 0x%x in MR\n", msg->verb);
1100 return -1;
1101 }
1102 qm_mr_next(p);
1103 qm_mr_cci_consume(p, 1);
1104 goto loop;
1105}
1106
1107static int qman_create_portal(struct qman_portal *portal,
1108 const struct qm_portal_config *c,
1109 const struct qman_cgrs *cgrs)
1110{
1111 struct qm_portal *p;
Claudiu Manoilc535e922016-09-22 18:04:09 +03001112 int ret;
1113 u32 isdr;
1114
1115 p = &portal->p;
1116
1117#ifdef CONFIG_FSL_PAMU
1118 /* PAMU is required for stashing */
1119 portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
1120#else
1121 portal->use_eqcr_ci_stashing = 0;
1122#endif
1123 /*
1124 * prep the low-level portal struct with the mapped addresses from the
1125 * config, everything that follows depends on it and "config" is more
1126 * for (de)reference
1127 */
1128 p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
1129 p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
1130 /*
1131 * If CI-stashing is used, the current defaults use a threshold of 3,
1132 * and stash with high-than-DQRR priority.
1133 */
1134 if (qm_eqcr_init(p, qm_eqcr_pvb,
1135 portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
1136 dev_err(c->dev, "EQCR initialisation failed\n");
1137 goto fail_eqcr;
1138 }
1139 if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
1140 qm_dqrr_cdc, DQRR_MAXFILL)) {
1141 dev_err(c->dev, "DQRR initialisation failed\n");
1142 goto fail_dqrr;
1143 }
1144 if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
1145 dev_err(c->dev, "MR initialisation failed\n");
1146 goto fail_mr;
1147 }
1148 if (qm_mc_init(p)) {
1149 dev_err(c->dev, "MC initialisation failed\n");
1150 goto fail_mc;
1151 }
1152 /* static interrupt-gating controls */
1153 qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH);
1154 qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH);
1155 qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD);
1156 portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
1157 if (!portal->cgrs)
1158 goto fail_cgrs;
1159 /* initial snapshot is no-depletion */
1160 qman_cgrs_init(&portal->cgrs[1]);
1161 if (cgrs)
1162 portal->cgrs[0] = *cgrs;
1163 else
1164 /* if the given mask is NULL, assume all CGRs can be seen */
1165 qman_cgrs_fill(&portal->cgrs[0]);
1166 INIT_LIST_HEAD(&portal->cgr_cbs);
1167 spin_lock_init(&portal->cgr_lock);
1168 INIT_WORK(&portal->congestion_work, qm_congestion_task);
1169 INIT_WORK(&portal->mr_work, qm_mr_process_task);
1170 portal->bits = 0;
1171 portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
1172 QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
1173 QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
Claudiu Manoilc535e922016-09-22 18:04:09 +03001174 isdr = 0xffffffff;
1175 qm_out(p, QM_REG_ISDR, isdr);
1176 portal->irq_sources = 0;
1177 qm_out(p, QM_REG_IER, 0);
1178 qm_out(p, QM_REG_ISR, 0xffffffff);
1179 snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
1180 if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
1181 dev_err(c->dev, "request_irq() failed\n");
1182 goto fail_irq;
1183 }
1184 if (c->cpu != -1 && irq_can_set_affinity(c->irq) &&
1185 irq_set_affinity(c->irq, cpumask_of(c->cpu))) {
1186 dev_err(c->dev, "irq_set_affinity() failed\n");
1187 goto fail_affinity;
1188 }
1189
1190 /* Need EQCR to be empty before continuing */
1191 isdr &= ~QM_PIRQ_EQCI;
1192 qm_out(p, QM_REG_ISDR, isdr);
1193 ret = qm_eqcr_get_fill(p);
1194 if (ret) {
1195 dev_err(c->dev, "EQCR unclean\n");
1196 goto fail_eqcr_empty;
1197 }
1198 isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
1199 qm_out(p, QM_REG_ISDR, isdr);
1200 if (qm_dqrr_current(p)) {
1201 dev_err(c->dev, "DQRR unclean\n");
1202 qm_dqrr_cdc_consume_n(p, 0xffff);
1203 }
1204 if (qm_mr_current(p) && drain_mr_fqrni(p)) {
1205 /* special handling, drain just in case it's a few FQRNIs */
1206 const union qm_mr_entry *e = qm_mr_current(p);
1207
Claudiu Manoilb6e969d2016-11-16 16:40:19 +02001208 dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n",
1209 e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd));
Claudiu Manoilc535e922016-09-22 18:04:09 +03001210 goto fail_dqrr_mr_empty;
1211 }
1212 /* Success */
1213 portal->config = c;
1214 qm_out(p, QM_REG_ISDR, 0);
1215 qm_out(p, QM_REG_IIR, 0);
1216 /* Write a sane SDQCR */
1217 qm_dqrr_sdqcr_set(p, portal->sdqcr);
1218 return 0;
1219
1220fail_dqrr_mr_empty:
1221fail_eqcr_empty:
1222fail_affinity:
1223 free_irq(c->irq, portal);
1224fail_irq:
Claudiu Manoilc535e922016-09-22 18:04:09 +03001225 kfree(portal->cgrs);
1226fail_cgrs:
1227 qm_mc_finish(p);
1228fail_mc:
1229 qm_mr_finish(p);
1230fail_mr:
1231 qm_dqrr_finish(p);
1232fail_dqrr:
1233 qm_eqcr_finish(p);
1234fail_eqcr:
1235 return -EIO;
1236}
1237
1238struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
1239 const struct qman_cgrs *cgrs)
1240{
1241 struct qman_portal *portal;
1242 int err;
1243
1244 portal = &per_cpu(qman_affine_portal, c->cpu);
1245 err = qman_create_portal(portal, c, cgrs);
1246 if (err)
1247 return NULL;
1248
1249 spin_lock(&affine_mask_lock);
1250 cpumask_set_cpu(c->cpu, &affine_mask);
1251 affine_channels[c->cpu] = c->channel;
1252 affine_portals[c->cpu] = portal;
1253 spin_unlock(&affine_mask_lock);
1254
1255 return portal;
1256}
1257
1258static void qman_destroy_portal(struct qman_portal *qm)
1259{
1260 const struct qm_portal_config *pcfg;
1261
1262 /* Stop dequeues on the portal */
1263 qm_dqrr_sdqcr_set(&qm->p, 0);
1264
1265 /*
1266 * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
1267 * something related to QM_PIRQ_EQCI, this may need fixing.
1268 * Also, due to the prefetching model used for CI updates in the enqueue
1269 * path, this update will only invalidate the CI cacheline *after*
1270 * working on it, so we need to call this twice to ensure a full update
1271 * irrespective of where the enqueue processing was at when the teardown
1272 * began.
1273 */
1274 qm_eqcr_cce_update(&qm->p);
1275 qm_eqcr_cce_update(&qm->p);
1276 pcfg = qm->config;
1277
1278 free_irq(pcfg->irq, qm);
1279
1280 kfree(qm->cgrs);
1281 qm_mc_finish(&qm->p);
1282 qm_mr_finish(&qm->p);
1283 qm_dqrr_finish(&qm->p);
1284 qm_eqcr_finish(&qm->p);
1285
Claudiu Manoilc535e922016-09-22 18:04:09 +03001286 qm->config = NULL;
1287}
1288
1289const struct qm_portal_config *qman_destroy_affine_portal(void)
1290{
1291 struct qman_portal *qm = get_affine_portal();
1292 const struct qm_portal_config *pcfg;
1293 int cpu;
1294
1295 pcfg = qm->config;
1296 cpu = pcfg->cpu;
1297
1298 qman_destroy_portal(qm);
1299
1300 spin_lock(&affine_mask_lock);
1301 cpumask_clear_cpu(cpu, &affine_mask);
1302 spin_unlock(&affine_mask_lock);
1303 put_affine_portal();
1304 return pcfg;
1305}
1306
1307/* Inline helper to reduce nesting in __poll_portal_slow() */
1308static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
1309 const union qm_mr_entry *msg, u8 verb)
1310{
1311 switch (verb) {
1312 case QM_MR_VERB_FQRL:
1313 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
1314 fq_clear(fq, QMAN_FQ_STATE_ORL);
1315 break;
1316 case QM_MR_VERB_FQRN:
1317 DPAA_ASSERT(fq->state == qman_fq_state_parked ||
1318 fq->state == qman_fq_state_sched);
1319 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
1320 fq_clear(fq, QMAN_FQ_STATE_CHANGING);
1321 if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
1322 fq_set(fq, QMAN_FQ_STATE_NE);
1323 if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
1324 fq_set(fq, QMAN_FQ_STATE_ORL);
1325 fq->state = qman_fq_state_retired;
1326 break;
1327 case QM_MR_VERB_FQPN:
1328 DPAA_ASSERT(fq->state == qman_fq_state_sched);
1329 DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
1330 fq->state = qman_fq_state_parked;
1331 }
1332}
1333
1334static void qm_congestion_task(struct work_struct *work)
1335{
1336 struct qman_portal *p = container_of(work, struct qman_portal,
1337 congestion_work);
1338 struct qman_cgrs rr, c;
1339 union qm_mc_result *mcr;
1340 struct qman_cgr *cgr;
1341
1342 spin_lock(&p->cgr_lock);
1343 qm_mc_start(&p->p);
1344 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
1345 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1346 spin_unlock(&p->cgr_lock);
1347 dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
1348 return;
1349 }
1350 /* mask out the ones I'm not interested in */
1351 qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state,
1352 &p->cgrs[0]);
1353 /* check previous snapshot for delta, enter/exit congestion */
1354 qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
1355 /* update snapshot */
1356 qman_cgrs_cp(&p->cgrs[1], &rr);
1357 /* Invoke callback */
1358 list_for_each_entry(cgr, &p->cgr_cbs, node)
1359 if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
1360 cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
1361 spin_unlock(&p->cgr_lock);
1362}
1363
1364static void qm_mr_process_task(struct work_struct *work)
1365{
1366 struct qman_portal *p = container_of(work, struct qman_portal,
1367 mr_work);
1368 const union qm_mr_entry *msg;
1369 struct qman_fq *fq;
1370 u8 verb, num = 0;
1371
1372 preempt_disable();
1373
1374 while (1) {
1375 qm_mr_pvb_update(&p->p);
1376 msg = qm_mr_current(&p->p);
1377 if (!msg)
1378 break;
1379
1380 verb = msg->verb & QM_MR_VERB_TYPE_MASK;
1381 /* The message is a software ERN iff the 0x20 bit is clear */
1382 if (verb & 0x20) {
1383 switch (verb) {
1384 case QM_MR_VERB_FQRNI:
1385 /* nada, we drop FQRNIs on the floor */
1386 break;
1387 case QM_MR_VERB_FQRN:
1388 case QM_MR_VERB_FQRL:
1389 /* Lookup in the retirement table */
1390 fq = fqid_to_fq(msg->fq.fqid);
1391 if (WARN_ON(!fq))
1392 break;
1393 fq_state_change(p, fq, msg, verb);
1394 if (fq->cb.fqs)
1395 fq->cb.fqs(p, fq, msg);
1396 break;
1397 case QM_MR_VERB_FQPN:
1398 /* Parked */
1399 fq = tag_to_fq(msg->fq.contextB);
1400 fq_state_change(p, fq, msg, verb);
1401 if (fq->cb.fqs)
1402 fq->cb.fqs(p, fq, msg);
1403 break;
1404 case QM_MR_VERB_DC_ERN:
1405 /* DCP ERN */
1406 pr_crit_once("Leaking DCP ERNs!\n");
1407 break;
1408 default:
1409 pr_crit("Invalid MR verb 0x%02x\n", verb);
1410 }
1411 } else {
1412 /* Its a software ERN */
1413 fq = tag_to_fq(msg->ern.tag);
1414 fq->cb.ern(p, fq, msg);
1415 }
1416 num++;
1417 qm_mr_next(&p->p);
1418 }
1419
1420 qm_mr_cci_consume(&p->p, num);
1421 preempt_enable();
1422}
1423
1424static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
1425{
1426 if (is & QM_PIRQ_CSCI) {
1427 queue_work_on(smp_processor_id(), qm_portal_wq,
1428 &p->congestion_work);
1429 }
1430
1431 if (is & QM_PIRQ_EQRI) {
1432 qm_eqcr_cce_update(&p->p);
1433 qm_eqcr_set_ithresh(&p->p, 0);
1434 wake_up(&affine_queue);
1435 }
1436
1437 if (is & QM_PIRQ_MRI) {
1438 queue_work_on(smp_processor_id(), qm_portal_wq,
1439 &p->mr_work);
1440 }
1441
1442 return is;
1443}
1444
1445/*
1446 * remove some slowish-path stuff from the "fast path" and make sure it isn't
1447 * inlined.
1448 */
1449static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
1450{
1451 p->vdqcr_owned = NULL;
1452 fq_clear(fq, QMAN_FQ_STATE_VDQCR);
1453 wake_up(&affine_queue);
1454}
1455
1456/*
1457 * The only states that would conflict with other things if they ran at the
1458 * same time on the same cpu are:
1459 *
1460 * (i) setting/clearing vdqcr_owned, and
1461 * (ii) clearing the NE (Not Empty) flag.
1462 *
1463 * Both are safe. Because;
1464 *
1465 * (i) this clearing can only occur after qman_volatile_dequeue() has set the
1466 * vdqcr_owned field (which it does before setting VDQCR), and
1467 * qman_volatile_dequeue() blocks interrupts and preemption while this is
1468 * done so that we can't interfere.
1469 * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
1470 * with (i) that API prevents us from interfering until it's safe.
1471 *
1472 * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
1473 * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
1474 * advantage comes from this function not having to "lock" anything at all.
1475 *
1476 * Note also that the callbacks are invoked at points which are safe against the
1477 * above potential conflicts, but that this function itself is not re-entrant
1478 * (this is because the function tracks one end of each FIFO in the portal and
1479 * we do *not* want to lock that). So the consequence is that it is safe for
1480 * user callbacks to call into any QMan API.
1481 */
1482static inline unsigned int __poll_portal_fast(struct qman_portal *p,
1483 unsigned int poll_limit)
1484{
1485 const struct qm_dqrr_entry *dq;
1486 struct qman_fq *fq;
1487 enum qman_cb_dqrr_result res;
1488 unsigned int limit = 0;
1489
1490 do {
1491 qm_dqrr_pvb_update(&p->p);
1492 dq = qm_dqrr_current(&p->p);
1493 if (!dq)
1494 break;
1495
1496 if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
1497 /*
1498 * VDQCR: don't trust contextB as the FQ may have
1499 * been configured for h/w consumption and we're
1500 * draining it post-retirement.
1501 */
1502 fq = p->vdqcr_owned;
1503 /*
1504 * We only set QMAN_FQ_STATE_NE when retiring, so we
1505 * only need to check for clearing it when doing
1506 * volatile dequeues. It's one less thing to check
1507 * in the critical path (SDQCR).
1508 */
1509 if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
1510 fq_clear(fq, QMAN_FQ_STATE_NE);
1511 /*
1512 * This is duplicated from the SDQCR code, but we
1513 * have stuff to do before *and* after this callback,
1514 * and we don't want multiple if()s in the critical
1515 * path (SDQCR).
1516 */
1517 res = fq->cb.dqrr(p, fq, dq);
1518 if (res == qman_cb_dqrr_stop)
1519 break;
1520 /* Check for VDQCR completion */
1521 if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
1522 clear_vdqcr(p, fq);
1523 } else {
1524 /* SDQCR: contextB points to the FQ */
1525 fq = tag_to_fq(dq->contextB);
1526 /* Now let the callback do its stuff */
1527 res = fq->cb.dqrr(p, fq, dq);
1528 /*
1529 * The callback can request that we exit without
1530 * consuming this entry nor advancing;
1531 */
1532 if (res == qman_cb_dqrr_stop)
1533 break;
1534 }
1535 /* Interpret 'dq' from a driver perspective. */
1536 /*
1537 * Parking isn't possible unless HELDACTIVE was set. NB,
1538 * FORCEELIGIBLE implies HELDACTIVE, so we only need to
1539 * check for HELDACTIVE to cover both.
1540 */
1541 DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
1542 (res != qman_cb_dqrr_park));
1543 /* just means "skip it, I'll consume it myself later on" */
1544 if (res != qman_cb_dqrr_defer)
1545 qm_dqrr_cdc_consume_1ptr(&p->p, dq,
1546 res == qman_cb_dqrr_park);
1547 /* Move forward */
1548 qm_dqrr_next(&p->p);
1549 /*
1550 * Entry processed and consumed, increment our counter. The
1551 * callback can request that we exit after consuming the
1552 * entry, and we also exit if we reach our processing limit,
1553 * so loop back only if neither of these conditions is met.
1554 */
1555 } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
1556
1557 return limit;
1558}
1559
1560void qman_p_irqsource_add(struct qman_portal *p, u32 bits)
1561{
1562 unsigned long irqflags;
1563
1564 local_irq_save(irqflags);
1565 set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources);
1566 qm_out(&p->p, QM_REG_IER, p->irq_sources);
1567 local_irq_restore(irqflags);
1568}
1569EXPORT_SYMBOL(qman_p_irqsource_add);
1570
1571void qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
1572{
1573 unsigned long irqflags;
1574 u32 ier;
1575
1576 /*
1577 * Our interrupt handler only processes+clears status register bits that
1578 * are in p->irq_sources. As we're trimming that mask, if one of them
1579 * were to assert in the status register just before we remove it from
1580 * the enable register, there would be an interrupt-storm when we
1581 * release the IRQ lock. So we wait for the enable register update to
1582 * take effect in h/w (by reading it back) and then clear all other bits
1583 * in the status register. Ie. we clear them from ISR once it's certain
1584 * IER won't allow them to reassert.
1585 */
1586 local_irq_save(irqflags);
1587 bits &= QM_PIRQ_VISIBLE;
1588 clear_bits(bits, &p->irq_sources);
1589 qm_out(&p->p, QM_REG_IER, p->irq_sources);
1590 ier = qm_in(&p->p, QM_REG_IER);
1591 /*
1592 * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
1593 * data-dependency, ie. to protect against re-ordering.
1594 */
1595 qm_out(&p->p, QM_REG_ISR, ~ier);
1596 local_irq_restore(irqflags);
1597}
1598EXPORT_SYMBOL(qman_p_irqsource_remove);
1599
1600const cpumask_t *qman_affine_cpus(void)
1601{
1602 return &affine_mask;
1603}
1604EXPORT_SYMBOL(qman_affine_cpus);
1605
1606u16 qman_affine_channel(int cpu)
1607{
1608 if (cpu < 0) {
1609 struct qman_portal *portal = get_affine_portal();
1610
1611 cpu = portal->config->cpu;
1612 put_affine_portal();
1613 }
1614 WARN_ON(!cpumask_test_cpu(cpu, &affine_mask));
1615 return affine_channels[cpu];
1616}
1617EXPORT_SYMBOL(qman_affine_channel);
1618
1619struct qman_portal *qman_get_affine_portal(int cpu)
1620{
1621 return affine_portals[cpu];
1622}
1623EXPORT_SYMBOL(qman_get_affine_portal);
1624
1625int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
1626{
1627 return __poll_portal_fast(p, limit);
1628}
1629EXPORT_SYMBOL(qman_p_poll_dqrr);
1630
1631void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
1632{
1633 unsigned long irqflags;
1634
1635 local_irq_save(irqflags);
1636 pools &= p->config->pools;
1637 p->sdqcr |= pools;
1638 qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1639 local_irq_restore(irqflags);
1640}
1641EXPORT_SYMBOL(qman_p_static_dequeue_add);
1642
1643/* Frame queue API */
1644
1645static const char *mcr_result_str(u8 result)
1646{
1647 switch (result) {
1648 case QM_MCR_RESULT_NULL:
1649 return "QM_MCR_RESULT_NULL";
1650 case QM_MCR_RESULT_OK:
1651 return "QM_MCR_RESULT_OK";
1652 case QM_MCR_RESULT_ERR_FQID:
1653 return "QM_MCR_RESULT_ERR_FQID";
1654 case QM_MCR_RESULT_ERR_FQSTATE:
1655 return "QM_MCR_RESULT_ERR_FQSTATE";
1656 case QM_MCR_RESULT_ERR_NOTEMPTY:
1657 return "QM_MCR_RESULT_ERR_NOTEMPTY";
1658 case QM_MCR_RESULT_PENDING:
1659 return "QM_MCR_RESULT_PENDING";
1660 case QM_MCR_RESULT_ERR_BADCOMMAND:
1661 return "QM_MCR_RESULT_ERR_BADCOMMAND";
1662 }
1663 return "<unknown MCR result>";
1664}
1665
1666int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
1667{
1668 if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
1669 int ret = qman_alloc_fqid(&fqid);
1670
1671 if (ret)
1672 return ret;
1673 }
1674 fq->fqid = fqid;
1675 fq->flags = flags;
1676 fq->state = qman_fq_state_oos;
1677 fq->cgr_groupid = 0;
1678
1679 /* A context_b of 0 is allegedly special, so don't use that fqid */
1680 if (fqid == 0 || fqid >= num_fqids) {
1681 WARN(1, "bad fqid %d\n", fqid);
1682 return -EINVAL;
1683 }
1684
1685 fq->idx = fqid * 2;
1686 if (flags & QMAN_FQ_FLAG_NO_MODIFY)
1687 fq->idx++;
1688
1689 WARN_ON(fq_table[fq->idx]);
1690 fq_table[fq->idx] = fq;
1691
1692 return 0;
1693}
1694EXPORT_SYMBOL(qman_create_fq);
1695
1696void qman_destroy_fq(struct qman_fq *fq)
1697{
1698 /*
1699 * We don't need to lock the FQ as it is a pre-condition that the FQ be
1700 * quiesced. Instead, run some checks.
1701 */
1702 switch (fq->state) {
1703 case qman_fq_state_parked:
1704 case qman_fq_state_oos:
1705 if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
1706 qman_release_fqid(fq->fqid);
1707
1708 DPAA_ASSERT(fq_table[fq->idx]);
1709 fq_table[fq->idx] = NULL;
1710 return;
1711 default:
1712 break;
1713 }
1714 DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
1715}
1716EXPORT_SYMBOL(qman_destroy_fq);
1717
1718u32 qman_fq_fqid(struct qman_fq *fq)
1719{
1720 return fq->fqid;
1721}
1722EXPORT_SYMBOL(qman_fq_fqid);
1723
1724int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
1725{
1726 union qm_mc_command *mcc;
1727 union qm_mc_result *mcr;
1728 struct qman_portal *p;
1729 u8 res, myverb;
1730 int ret = 0;
1731
1732 myverb = (flags & QMAN_INITFQ_FLAG_SCHED)
1733 ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
1734
1735 if (fq->state != qman_fq_state_oos &&
1736 fq->state != qman_fq_state_parked)
1737 return -EINVAL;
1738#ifdef CONFIG_FSL_DPAA_CHECKING
1739 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1740 return -EINVAL;
1741#endif
1742 if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
1743 /* And can't be set at the same time as TDTHRESH */
1744 if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
1745 return -EINVAL;
1746 }
1747 /* Issue an INITFQ_[PARKED|SCHED] management command */
1748 p = get_affine_portal();
1749 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1750 (fq->state != qman_fq_state_oos &&
1751 fq->state != qman_fq_state_parked)) {
1752 ret = -EBUSY;
1753 goto out;
1754 }
1755 mcc = qm_mc_start(&p->p);
1756 if (opts)
1757 mcc->initfq = *opts;
Claudiu Manoil7ff07da2016-11-16 16:40:24 +02001758 mcc->fq.fqid = fq->fqid;
Claudiu Manoilc535e922016-09-22 18:04:09 +03001759 mcc->initfq.count = 0;
1760 /*
1761 * If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a
1762 * demux pointer. Otherwise, the caller-provided value is allowed to
1763 * stand, don't overwrite it.
1764 */
1765 if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
1766 dma_addr_t phys_fq;
1767
1768 mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
1769 mcc->initfq.fqd.context_b = fq_to_tag(fq);
1770 /*
1771 * and the physical address - NB, if the user wasn't trying to
1772 * set CONTEXTA, clear the stashing settings.
1773 */
1774 if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
1775 mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
1776 memset(&mcc->initfq.fqd.context_a, 0,
1777 sizeof(mcc->initfq.fqd.context_a));
1778 } else {
Claudiu Manoil0fbeac32016-11-16 16:40:21 +02001779 struct qman_portal *p = qman_dma_portal;
1780
1781 phys_fq = dma_map_single(p->config->dev, fq,
1782 sizeof(*fq), DMA_TO_DEVICE);
1783 if (dma_mapping_error(p->config->dev, phys_fq)) {
1784 dev_err(p->config->dev, "dma_mapping failed\n");
1785 ret = -EIO;
1786 goto out;
1787 }
1788
Claudiu Manoilc535e922016-09-22 18:04:09 +03001789 qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
1790 }
1791 }
1792 if (flags & QMAN_INITFQ_FLAG_LOCAL) {
1793 int wq = 0;
1794
1795 if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
1796 mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
1797 wq = 4;
1798 }
1799 qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
1800 }
1801 qm_mc_commit(&p->p, myverb);
1802 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1803 dev_err(p->config->dev, "MCR timeout\n");
1804 ret = -ETIMEDOUT;
1805 goto out;
1806 }
1807
1808 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1809 res = mcr->result;
1810 if (res != QM_MCR_RESULT_OK) {
1811 ret = -EIO;
1812 goto out;
1813 }
1814 if (opts) {
1815 if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
1816 if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
1817 fq_set(fq, QMAN_FQ_STATE_CGR_EN);
1818 else
1819 fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
1820 }
1821 if (opts->we_mask & QM_INITFQ_WE_CGID)
1822 fq->cgr_groupid = opts->fqd.cgid;
1823 }
1824 fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1825 qman_fq_state_sched : qman_fq_state_parked;
1826
1827out:
1828 put_affine_portal();
1829 return ret;
1830}
1831EXPORT_SYMBOL(qman_init_fq);
1832
1833int qman_schedule_fq(struct qman_fq *fq)
1834{
1835 union qm_mc_command *mcc;
1836 union qm_mc_result *mcr;
1837 struct qman_portal *p;
1838 int ret = 0;
1839
1840 if (fq->state != qman_fq_state_parked)
1841 return -EINVAL;
1842#ifdef CONFIG_FSL_DPAA_CHECKING
1843 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1844 return -EINVAL;
1845#endif
1846 /* Issue a ALTERFQ_SCHED management command */
1847 p = get_affine_portal();
1848 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1849 fq->state != qman_fq_state_parked) {
1850 ret = -EBUSY;
1851 goto out;
1852 }
1853 mcc = qm_mc_start(&p->p);
Claudiu Manoil7ff07da2016-11-16 16:40:24 +02001854 mcc->fq.fqid = fq->fqid;
Claudiu Manoilc535e922016-09-22 18:04:09 +03001855 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
1856 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1857 dev_err(p->config->dev, "ALTER_SCHED timeout\n");
1858 ret = -ETIMEDOUT;
1859 goto out;
1860 }
1861
1862 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
1863 if (mcr->result != QM_MCR_RESULT_OK) {
1864 ret = -EIO;
1865 goto out;
1866 }
1867 fq->state = qman_fq_state_sched;
1868out:
1869 put_affine_portal();
1870 return ret;
1871}
1872EXPORT_SYMBOL(qman_schedule_fq);
1873
1874int qman_retire_fq(struct qman_fq *fq, u32 *flags)
1875{
1876 union qm_mc_command *mcc;
1877 union qm_mc_result *mcr;
1878 struct qman_portal *p;
1879 int ret;
1880 u8 res;
1881
1882 if (fq->state != qman_fq_state_parked &&
1883 fq->state != qman_fq_state_sched)
1884 return -EINVAL;
1885#ifdef CONFIG_FSL_DPAA_CHECKING
1886 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1887 return -EINVAL;
1888#endif
1889 p = get_affine_portal();
1890 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1891 fq->state == qman_fq_state_retired ||
1892 fq->state == qman_fq_state_oos) {
1893 ret = -EBUSY;
1894 goto out;
1895 }
1896 mcc = qm_mc_start(&p->p);
Claudiu Manoil7ff07da2016-11-16 16:40:24 +02001897 mcc->fq.fqid = fq->fqid;
Claudiu Manoilc535e922016-09-22 18:04:09 +03001898 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
1899 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1900 dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
1901 ret = -ETIMEDOUT;
1902 goto out;
1903 }
1904
1905 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
1906 res = mcr->result;
1907 /*
1908 * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
1909 * and defer the flags until FQRNI or FQRN (respectively) show up. But
1910 * "Friendly" is to process OK immediately, and not set CHANGING. We do
1911 * friendly, otherwise the caller doesn't necessarily have a fully
1912 * "retired" FQ on return even if the retirement was immediate. However
1913 * this does mean some code duplication between here and
1914 * fq_state_change().
1915 */
1916 if (res == QM_MCR_RESULT_OK) {
1917 ret = 0;
1918 /* Process 'fq' right away, we'll ignore FQRNI */
1919 if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
1920 fq_set(fq, QMAN_FQ_STATE_NE);
1921 if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
1922 fq_set(fq, QMAN_FQ_STATE_ORL);
1923 if (flags)
1924 *flags = fq->flags;
1925 fq->state = qman_fq_state_retired;
1926 if (fq->cb.fqs) {
1927 /*
1928 * Another issue with supporting "immediate" retirement
1929 * is that we're forced to drop FQRNIs, because by the
1930 * time they're seen it may already be "too late" (the
1931 * fq may have been OOS'd and free()'d already). But if
1932 * the upper layer wants a callback whether it's
1933 * immediate or not, we have to fake a "MR" entry to
1934 * look like an FQRNI...
1935 */
1936 union qm_mr_entry msg;
1937
1938 msg.verb = QM_MR_VERB_FQRNI;
1939 msg.fq.fqs = mcr->alterfq.fqs;
1940 msg.fq.fqid = fq->fqid;
1941 msg.fq.contextB = fq_to_tag(fq);
1942 fq->cb.fqs(p, fq, &msg);
1943 }
1944 } else if (res == QM_MCR_RESULT_PENDING) {
1945 ret = 1;
1946 fq_set(fq, QMAN_FQ_STATE_CHANGING);
1947 } else {
1948 ret = -EIO;
1949 }
1950out:
1951 put_affine_portal();
1952 return ret;
1953}
1954EXPORT_SYMBOL(qman_retire_fq);
1955
1956int qman_oos_fq(struct qman_fq *fq)
1957{
1958 union qm_mc_command *mcc;
1959 union qm_mc_result *mcr;
1960 struct qman_portal *p;
1961 int ret = 0;
1962
1963 if (fq->state != qman_fq_state_retired)
1964 return -EINVAL;
1965#ifdef CONFIG_FSL_DPAA_CHECKING
1966 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1967 return -EINVAL;
1968#endif
1969 p = get_affine_portal();
1970 if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) ||
1971 fq->state != qman_fq_state_retired) {
1972 ret = -EBUSY;
1973 goto out;
1974 }
1975 mcc = qm_mc_start(&p->p);
Claudiu Manoil7ff07da2016-11-16 16:40:24 +02001976 mcc->fq.fqid = fq->fqid;
Claudiu Manoilc535e922016-09-22 18:04:09 +03001977 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
1978 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1979 ret = -ETIMEDOUT;
1980 goto out;
1981 }
1982 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
1983 if (mcr->result != QM_MCR_RESULT_OK) {
1984 ret = -EIO;
1985 goto out;
1986 }
1987 fq->state = qman_fq_state_oos;
1988out:
1989 put_affine_portal();
1990 return ret;
1991}
1992EXPORT_SYMBOL(qman_oos_fq);
1993
1994int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
1995{
1996 union qm_mc_command *mcc;
1997 union qm_mc_result *mcr;
1998 struct qman_portal *p = get_affine_portal();
1999 int ret = 0;
2000
2001 mcc = qm_mc_start(&p->p);
Claudiu Manoil7ff07da2016-11-16 16:40:24 +02002002 mcc->fq.fqid = fq->fqid;
Claudiu Manoilc535e922016-09-22 18:04:09 +03002003 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
2004 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2005 ret = -ETIMEDOUT;
2006 goto out;
2007 }
2008
2009 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2010 if (mcr->result == QM_MCR_RESULT_OK)
2011 *fqd = mcr->queryfq.fqd;
2012 else
2013 ret = -EIO;
2014out:
2015 put_affine_portal();
2016 return ret;
2017}
2018
2019static int qman_query_fq_np(struct qman_fq *fq,
2020 struct qm_mcr_queryfq_np *np)
2021{
2022 union qm_mc_command *mcc;
2023 union qm_mc_result *mcr;
2024 struct qman_portal *p = get_affine_portal();
2025 int ret = 0;
2026
2027 mcc = qm_mc_start(&p->p);
Claudiu Manoil7ff07da2016-11-16 16:40:24 +02002028 mcc->fq.fqid = fq->fqid;
Claudiu Manoilc535e922016-09-22 18:04:09 +03002029 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
2030 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2031 ret = -ETIMEDOUT;
2032 goto out;
2033 }
2034
2035 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2036 if (mcr->result == QM_MCR_RESULT_OK)
2037 *np = mcr->queryfq_np;
2038 else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
2039 ret = -ERANGE;
2040 else
2041 ret = -EIO;
2042out:
2043 put_affine_portal();
2044 return ret;
2045}
2046
2047static int qman_query_cgr(struct qman_cgr *cgr,
2048 struct qm_mcr_querycgr *cgrd)
2049{
2050 union qm_mc_command *mcc;
2051 union qm_mc_result *mcr;
2052 struct qman_portal *p = get_affine_portal();
2053 int ret = 0;
2054
2055 mcc = qm_mc_start(&p->p);
Claudiu Manoil7ff07da2016-11-16 16:40:24 +02002056 mcc->cgr.cgid = cgr->cgrid;
Claudiu Manoilc535e922016-09-22 18:04:09 +03002057 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
2058 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2059 ret = -ETIMEDOUT;
2060 goto out;
2061 }
2062 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
2063 if (mcr->result == QM_MCR_RESULT_OK)
2064 *cgrd = mcr->querycgr;
2065 else {
2066 dev_err(p->config->dev, "QUERY_CGR failed: %s\n",
2067 mcr_result_str(mcr->result));
2068 ret = -EIO;
2069 }
2070out:
2071 put_affine_portal();
2072 return ret;
2073}
2074
2075int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result)
2076{
2077 struct qm_mcr_querycgr query_cgr;
2078 int err;
2079
2080 err = qman_query_cgr(cgr, &query_cgr);
2081 if (err)
2082 return err;
2083
2084 *result = !!query_cgr.cgr.cs;
2085 return 0;
2086}
2087EXPORT_SYMBOL(qman_query_cgr_congested);
2088
2089/* internal function used as a wait_event() expression */
2090static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
2091{
2092 unsigned long irqflags;
2093 int ret = -EBUSY;
2094
2095 local_irq_save(irqflags);
2096 if (p->vdqcr_owned)
2097 goto out;
2098 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2099 goto out;
2100
2101 fq_set(fq, QMAN_FQ_STATE_VDQCR);
2102 p->vdqcr_owned = fq;
2103 qm_dqrr_vdqcr_set(&p->p, vdqcr);
2104 ret = 0;
2105out:
2106 local_irq_restore(irqflags);
2107 return ret;
2108}
2109
2110static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
2111{
2112 int ret;
2113
2114 *p = get_affine_portal();
2115 ret = set_p_vdqcr(*p, fq, vdqcr);
2116 put_affine_portal();
2117 return ret;
2118}
2119
2120static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
2121 u32 vdqcr, u32 flags)
2122{
2123 int ret = 0;
2124
2125 if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
2126 ret = wait_event_interruptible(affine_queue,
2127 !set_vdqcr(p, fq, vdqcr));
2128 else
2129 wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr));
2130 return ret;
2131}
2132
2133int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr)
2134{
2135 struct qman_portal *p;
2136 int ret;
2137
2138 if (fq->state != qman_fq_state_parked &&
2139 fq->state != qman_fq_state_retired)
2140 return -EINVAL;
2141 if (vdqcr & QM_VDQCR_FQID_MASK)
2142 return -EINVAL;
2143 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2144 return -EBUSY;
2145 vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
2146 if (flags & QMAN_VOLATILE_FLAG_WAIT)
2147 ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
2148 else
2149 ret = set_vdqcr(&p, fq, vdqcr);
2150 if (ret)
2151 return ret;
2152 /* VDQCR is set */
2153 if (flags & QMAN_VOLATILE_FLAG_FINISH) {
2154 if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
2155 /*
2156 * NB: don't propagate any error - the caller wouldn't
2157 * know whether the VDQCR was issued or not. A signal
2158 * could arrive after returning anyway, so the caller
2159 * can check signal_pending() if that's an issue.
2160 */
2161 wait_event_interruptible(affine_queue,
2162 !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
2163 else
2164 wait_event(affine_queue,
2165 !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
2166 }
2167 return 0;
2168}
2169EXPORT_SYMBOL(qman_volatile_dequeue);
2170
2171static void update_eqcr_ci(struct qman_portal *p, u8 avail)
2172{
2173 if (avail)
2174 qm_eqcr_cce_prefetch(&p->p);
2175 else
2176 qm_eqcr_cce_update(&p->p);
2177}
2178
2179int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
2180{
2181 struct qman_portal *p;
2182 struct qm_eqcr_entry *eq;
2183 unsigned long irqflags;
2184 u8 avail;
2185
2186 p = get_affine_portal();
2187 local_irq_save(irqflags);
2188
2189 if (p->use_eqcr_ci_stashing) {
2190 /*
2191 * The stashing case is easy, only update if we need to in
2192 * order to try and liberate ring entries.
2193 */
2194 eq = qm_eqcr_start_stash(&p->p);
2195 } else {
2196 /*
2197 * The non-stashing case is harder, need to prefetch ahead of
2198 * time.
2199 */
2200 avail = qm_eqcr_get_avail(&p->p);
2201 if (avail < 2)
2202 update_eqcr_ci(p, avail);
2203 eq = qm_eqcr_start_no_stash(&p->p);
2204 }
2205
2206 if (unlikely(!eq))
2207 goto out;
2208
2209 eq->fqid = fq->fqid;
2210 eq->tag = fq_to_tag(fq);
2211 eq->fd = *fd;
2212
2213 qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
2214out:
2215 local_irq_restore(irqflags);
2216 put_affine_portal();
2217 return 0;
2218}
2219EXPORT_SYMBOL(qman_enqueue);
2220
2221static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags,
2222 struct qm_mcc_initcgr *opts)
2223{
2224 union qm_mc_command *mcc;
2225 union qm_mc_result *mcr;
2226 struct qman_portal *p = get_affine_portal();
2227 u8 verb = QM_MCC_VERB_MODIFYCGR;
2228 int ret = 0;
2229
2230 mcc = qm_mc_start(&p->p);
2231 if (opts)
2232 mcc->initcgr = *opts;
2233 mcc->initcgr.cgid = cgr->cgrid;
2234 if (flags & QMAN_CGR_FLAG_USE_INIT)
2235 verb = QM_MCC_VERB_INITCGR;
2236 qm_mc_commit(&p->p, verb);
2237 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2238 ret = -ETIMEDOUT;
2239 goto out;
2240 }
2241
2242 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
2243 if (mcr->result != QM_MCR_RESULT_OK)
2244 ret = -EIO;
2245
2246out:
2247 put_affine_portal();
2248 return ret;
2249}
2250
2251#define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
2252#define TARG_MASK(n) (BIT(31) >> PORTAL_IDX(n))
2253
2254static u8 qman_cgr_cpus[CGR_NUM];
2255
2256void qman_init_cgr_all(void)
2257{
2258 struct qman_cgr cgr;
2259 int err_cnt = 0;
2260
2261 for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) {
2262 if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL))
2263 err_cnt++;
2264 }
2265
2266 if (err_cnt)
2267 pr_err("Warning: %d error%s while initialising CGR h/w\n",
2268 err_cnt, (err_cnt > 1) ? "s" : "");
2269}
2270
2271int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
2272 struct qm_mcc_initcgr *opts)
2273{
2274 struct qm_mcr_querycgr cgr_state;
Claudiu Manoilc535e922016-09-22 18:04:09 +03002275 int ret;
2276 struct qman_portal *p;
2277
2278 /*
2279 * We have to check that the provided CGRID is within the limits of the
2280 * data-structures, for obvious reasons. However we'll let h/w take
2281 * care of determining whether it's within the limits of what exists on
2282 * the SoC.
2283 */
2284 if (cgr->cgrid >= CGR_NUM)
2285 return -EINVAL;
2286
2287 preempt_disable();
2288 p = get_affine_portal();
2289 qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
2290 preempt_enable();
2291
2292 cgr->chan = p->config->channel;
2293 spin_lock(&p->cgr_lock);
2294
2295 if (opts) {
Claudiu Manoile5caf692016-11-16 16:40:23 +02002296 struct qm_mcc_initcgr local_opts = *opts;
2297
Claudiu Manoilc535e922016-09-22 18:04:09 +03002298 ret = qman_query_cgr(cgr, &cgr_state);
2299 if (ret)
2300 goto out;
Claudiu Manoile5caf692016-11-16 16:40:23 +02002301
Claudiu Manoilc535e922016-09-22 18:04:09 +03002302 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2303 local_opts.cgr.cscn_targ_upd_ctrl =
2304 QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
2305 else
2306 /* Overwrite TARG */
2307 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
2308 TARG_MASK(p);
2309 local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
2310
2311 /* send init if flags indicate so */
Claudiu Manoile5caf692016-11-16 16:40:23 +02002312 if (flags & QMAN_CGR_FLAG_USE_INIT)
Claudiu Manoilc535e922016-09-22 18:04:09 +03002313 ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
2314 &local_opts);
2315 else
2316 ret = qm_modify_cgr(cgr, 0, &local_opts);
2317 if (ret)
2318 goto out;
2319 }
2320
2321 list_add(&cgr->node, &p->cgr_cbs);
2322
2323 /* Determine if newly added object requires its callback to be called */
2324 ret = qman_query_cgr(cgr, &cgr_state);
2325 if (ret) {
2326 /* we can't go back, so proceed and return success */
2327 dev_err(p->config->dev, "CGR HW state partially modified\n");
2328 ret = 0;
2329 goto out;
2330 }
2331 if (cgr->cb && cgr_state.cgr.cscn_en &&
2332 qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
2333 cgr->cb(p, cgr, 1);
2334out:
2335 spin_unlock(&p->cgr_lock);
2336 put_affine_portal();
2337 return ret;
2338}
2339EXPORT_SYMBOL(qman_create_cgr);
2340
2341int qman_delete_cgr(struct qman_cgr *cgr)
2342{
2343 unsigned long irqflags;
2344 struct qm_mcr_querycgr cgr_state;
2345 struct qm_mcc_initcgr local_opts;
2346 int ret = 0;
2347 struct qman_cgr *i;
2348 struct qman_portal *p = get_affine_portal();
2349
2350 if (cgr->chan != p->config->channel) {
2351 /* attempt to delete from other portal than creator */
2352 dev_err(p->config->dev, "CGR not owned by current portal");
2353 dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n",
2354 cgr->chan, p->config->channel);
2355
2356 ret = -EINVAL;
2357 goto put_portal;
2358 }
2359 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2360 spin_lock_irqsave(&p->cgr_lock, irqflags);
2361 list_del(&cgr->node);
2362 /*
2363 * If there are no other CGR objects for this CGRID in the list,
2364 * update CSCN_TARG accordingly
2365 */
2366 list_for_each_entry(i, &p->cgr_cbs, node)
2367 if (i->cgrid == cgr->cgrid && i->cb)
2368 goto release_lock;
2369 ret = qman_query_cgr(cgr, &cgr_state);
2370 if (ret) {
2371 /* add back to the list */
2372 list_add(&cgr->node, &p->cgr_cbs);
2373 goto release_lock;
2374 }
2375 /* Overwrite TARG */
2376 local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
2377 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2378 local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
2379 else
2380 local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
2381 ~(TARG_MASK(p));
2382 ret = qm_modify_cgr(cgr, 0, &local_opts);
2383 if (ret)
2384 /* add back to the list */
2385 list_add(&cgr->node, &p->cgr_cbs);
2386release_lock:
2387 spin_unlock_irqrestore(&p->cgr_lock, irqflags);
2388put_portal:
2389 put_affine_portal();
2390 return ret;
2391}
2392EXPORT_SYMBOL(qman_delete_cgr);
2393
2394struct cgr_comp {
2395 struct qman_cgr *cgr;
2396 struct completion completion;
2397};
2398
2399static int qman_delete_cgr_thread(void *p)
2400{
2401 struct cgr_comp *cgr_comp = (struct cgr_comp *)p;
2402 int ret;
2403
2404 ret = qman_delete_cgr(cgr_comp->cgr);
2405 complete(&cgr_comp->completion);
2406
2407 return ret;
2408}
2409
2410void qman_delete_cgr_safe(struct qman_cgr *cgr)
2411{
2412 struct task_struct *thread;
2413 struct cgr_comp cgr_comp;
2414
2415 preempt_disable();
2416 if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
2417 init_completion(&cgr_comp.completion);
2418 cgr_comp.cgr = cgr;
2419 thread = kthread_create(qman_delete_cgr_thread, &cgr_comp,
2420 "cgr_del");
2421
2422 if (IS_ERR(thread))
2423 goto out;
2424
2425 kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]);
2426 wake_up_process(thread);
2427 wait_for_completion(&cgr_comp.completion);
2428 preempt_enable();
2429 return;
2430 }
2431out:
2432 qman_delete_cgr(cgr);
2433 preempt_enable();
2434}
2435EXPORT_SYMBOL(qman_delete_cgr_safe);
2436
2437/* Cleanup FQs */
2438
2439static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v)
2440{
2441 const union qm_mr_entry *msg;
2442 int found = 0;
2443
2444 qm_mr_pvb_update(p);
2445 msg = qm_mr_current(p);
2446 while (msg) {
2447 if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v)
2448 found = 1;
2449 qm_mr_next(p);
2450 qm_mr_cci_consume_to_current(p);
2451 qm_mr_pvb_update(p);
2452 msg = qm_mr_current(p);
2453 }
2454 return found;
2455}
2456
2457static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
2458 bool wait)
2459{
2460 const struct qm_dqrr_entry *dqrr;
2461 int found = 0;
2462
2463 do {
2464 qm_dqrr_pvb_update(p);
2465 dqrr = qm_dqrr_current(p);
2466 if (!dqrr)
2467 cpu_relax();
2468 } while (wait && !dqrr);
2469
2470 while (dqrr) {
2471 if (dqrr->fqid == fqid && (dqrr->stat & s))
2472 found = 1;
2473 qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
2474 qm_dqrr_pvb_update(p);
2475 qm_dqrr_next(p);
2476 dqrr = qm_dqrr_current(p);
2477 }
2478 return found;
2479}
2480
2481#define qm_mr_drain(p, V) \
2482 _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V)
2483
2484#define qm_dqrr_drain(p, f, S) \
2485 _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false)
2486
2487#define qm_dqrr_drain_wait(p, f, S) \
2488 _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true)
2489
2490#define qm_dqrr_drain_nomatch(p) \
2491 _qm_dqrr_consume_and_match(p, 0, 0, false)
2492
2493static int qman_shutdown_fq(u32 fqid)
2494{
2495 struct qman_portal *p;
2496 struct device *dev;
2497 union qm_mc_command *mcc;
2498 union qm_mc_result *mcr;
2499 int orl_empty, drain = 0, ret = 0;
2500 u32 channel, wq, res;
2501 u8 state;
2502
2503 p = get_affine_portal();
2504 dev = p->config->dev;
2505 /* Determine the state of the FQID */
2506 mcc = qm_mc_start(&p->p);
Claudiu Manoil7ff07da2016-11-16 16:40:24 +02002507 mcc->fq.fqid = fqid;
Claudiu Manoilc535e922016-09-22 18:04:09 +03002508 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
2509 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2510 dev_err(dev, "QUERYFQ_NP timeout\n");
2511 ret = -ETIMEDOUT;
2512 goto out;
2513 }
2514
2515 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2516 state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
2517 if (state == QM_MCR_NP_STATE_OOS)
2518 goto out; /* Already OOS, no need to do anymore checks */
2519
2520 /* Query which channel the FQ is using */
2521 mcc = qm_mc_start(&p->p);
Claudiu Manoil7ff07da2016-11-16 16:40:24 +02002522 mcc->fq.fqid = fqid;
Claudiu Manoilc535e922016-09-22 18:04:09 +03002523 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
2524 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2525 dev_err(dev, "QUERYFQ timeout\n");
2526 ret = -ETIMEDOUT;
2527 goto out;
2528 }
2529
2530 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2531 /* Need to store these since the MCR gets reused */
2532 channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
2533 wq = qm_fqd_get_wq(&mcr->queryfq.fqd);
2534
2535 switch (state) {
2536 case QM_MCR_NP_STATE_TEN_SCHED:
2537 case QM_MCR_NP_STATE_TRU_SCHED:
2538 case QM_MCR_NP_STATE_ACTIVE:
2539 case QM_MCR_NP_STATE_PARKED:
2540 orl_empty = 0;
2541 mcc = qm_mc_start(&p->p);
Claudiu Manoil7ff07da2016-11-16 16:40:24 +02002542 mcc->fq.fqid = fqid;
Claudiu Manoilc535e922016-09-22 18:04:09 +03002543 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
2544 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2545 dev_err(dev, "QUERYFQ_NP timeout\n");
2546 ret = -ETIMEDOUT;
2547 goto out;
2548 }
2549 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2550 QM_MCR_VERB_ALTER_RETIRE);
2551 res = mcr->result; /* Make a copy as we reuse MCR below */
2552
2553 if (res == QM_MCR_RESULT_PENDING) {
2554 /*
2555 * Need to wait for the FQRN in the message ring, which
2556 * will only occur once the FQ has been drained. In
2557 * order for the FQ to drain the portal needs to be set
2558 * to dequeue from the channel the FQ is scheduled on
2559 */
2560 int found_fqrn = 0;
2561 u16 dequeue_wq = 0;
2562
2563 /* Flag that we need to drain FQ */
2564 drain = 1;
2565
2566 if (channel >= qm_channel_pool1 &&
2567 channel < qm_channel_pool1 + 15) {
2568 /* Pool channel, enable the bit in the portal */
2569 dequeue_wq = (channel -
2570 qm_channel_pool1 + 1)<<4 | wq;
2571 } else if (channel < qm_channel_pool1) {
2572 /* Dedicated channel */
2573 dequeue_wq = wq;
2574 } else {
2575 dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x",
2576 fqid, channel);
2577 ret = -EBUSY;
2578 goto out;
2579 }
2580 /* Set the sdqcr to drain this channel */
2581 if (channel < qm_channel_pool1)
2582 qm_dqrr_sdqcr_set(&p->p,
2583 QM_SDQCR_TYPE_ACTIVE |
2584 QM_SDQCR_CHANNELS_DEDICATED);
2585 else
2586 qm_dqrr_sdqcr_set(&p->p,
2587 QM_SDQCR_TYPE_ACTIVE |
2588 QM_SDQCR_CHANNELS_POOL_CONV
2589 (channel));
2590 do {
2591 /* Keep draining DQRR while checking the MR*/
2592 qm_dqrr_drain_nomatch(&p->p);
2593 /* Process message ring too */
2594 found_fqrn = qm_mr_drain(&p->p, FQRN);
2595 cpu_relax();
2596 } while (!found_fqrn);
2597
2598 }
2599 if (res != QM_MCR_RESULT_OK &&
2600 res != QM_MCR_RESULT_PENDING) {
2601 dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n",
2602 fqid, res);
2603 ret = -EIO;
2604 goto out;
2605 }
2606 if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
2607 /*
2608 * ORL had no entries, no need to wait until the
2609 * ERNs come in
2610 */
2611 orl_empty = 1;
2612 }
2613 /*
2614 * Retirement succeeded, check to see if FQ needs
2615 * to be drained
2616 */
2617 if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
2618 /* FQ is Not Empty, drain using volatile DQ commands */
2619 do {
2620 u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
2621
2622 qm_dqrr_vdqcr_set(&p->p, vdqcr);
2623 /*
2624 * Wait for a dequeue and process the dequeues,
2625 * making sure to empty the ring completely
2626 */
2627 } while (qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
2628 }
2629 qm_dqrr_sdqcr_set(&p->p, 0);
2630
2631 while (!orl_empty) {
2632 /* Wait for the ORL to have been completely drained */
2633 orl_empty = qm_mr_drain(&p->p, FQRL);
2634 cpu_relax();
2635 }
2636 mcc = qm_mc_start(&p->p);
Claudiu Manoil7ff07da2016-11-16 16:40:24 +02002637 mcc->fq.fqid = fqid;
Claudiu Manoilc535e922016-09-22 18:04:09 +03002638 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2639 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2640 ret = -ETIMEDOUT;
2641 goto out;
2642 }
2643
2644 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2645 QM_MCR_VERB_ALTER_OOS);
2646 if (mcr->result != QM_MCR_RESULT_OK) {
2647 dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n",
2648 fqid, mcr->result);
2649 ret = -EIO;
2650 goto out;
2651 }
2652 break;
2653
2654 case QM_MCR_NP_STATE_RETIRED:
2655 /* Send OOS Command */
2656 mcc = qm_mc_start(&p->p);
Claudiu Manoil7ff07da2016-11-16 16:40:24 +02002657 mcc->fq.fqid = fqid;
Claudiu Manoilc535e922016-09-22 18:04:09 +03002658 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2659 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2660 ret = -ETIMEDOUT;
2661 goto out;
2662 }
2663
2664 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2665 QM_MCR_VERB_ALTER_OOS);
2666 if (mcr->result) {
2667 dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n",
2668 fqid, mcr->result);
2669 ret = -EIO;
2670 goto out;
2671 }
2672 break;
2673
2674 case QM_MCR_NP_STATE_OOS:
2675 /* Done */
2676 break;
2677
2678 default:
2679 ret = -EIO;
2680 }
2681
2682out:
2683 put_affine_portal();
2684 return ret;
2685}
2686
2687const struct qm_portal_config *qman_get_qm_portal_config(
2688 struct qman_portal *portal)
2689{
2690 return portal->config;
2691}
Claudiu Manoil021ba012016-11-16 16:40:22 +02002692EXPORT_SYMBOL(qman_get_qm_portal_config);
Claudiu Manoilc535e922016-09-22 18:04:09 +03002693
2694struct gen_pool *qm_fqalloc; /* FQID allocator */
2695struct gen_pool *qm_qpalloc; /* pool-channel allocator */
2696struct gen_pool *qm_cgralloc; /* CGR ID allocator */
2697
2698static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
2699{
2700 unsigned long addr;
2701
2702 addr = gen_pool_alloc(p, cnt);
2703 if (!addr)
2704 return -ENOMEM;
2705
2706 *result = addr & ~DPAA_GENALLOC_OFF;
2707
2708 return 0;
2709}
2710
2711int qman_alloc_fqid_range(u32 *result, u32 count)
2712{
2713 return qman_alloc_range(qm_fqalloc, result, count);
2714}
2715EXPORT_SYMBOL(qman_alloc_fqid_range);
2716
2717int qman_alloc_pool_range(u32 *result, u32 count)
2718{
2719 return qman_alloc_range(qm_qpalloc, result, count);
2720}
2721EXPORT_SYMBOL(qman_alloc_pool_range);
2722
2723int qman_alloc_cgrid_range(u32 *result, u32 count)
2724{
2725 return qman_alloc_range(qm_cgralloc, result, count);
2726}
2727EXPORT_SYMBOL(qman_alloc_cgrid_range);
2728
2729int qman_release_fqid(u32 fqid)
2730{
2731 int ret = qman_shutdown_fq(fqid);
2732
2733 if (ret) {
2734 pr_debug("FQID %d leaked\n", fqid);
2735 return ret;
2736 }
2737
2738 gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1);
2739 return 0;
2740}
2741EXPORT_SYMBOL(qman_release_fqid);
2742
2743static int qpool_cleanup(u32 qp)
2744{
2745 /*
2746 * We query all FQDs starting from
2747 * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
2748 * whose destination channel is the pool-channel being released.
2749 * When a non-OOS FQD is found we attempt to clean it up
2750 */
2751 struct qman_fq fq = {
2752 .fqid = QM_FQID_RANGE_START
2753 };
2754 int err;
2755
2756 do {
2757 struct qm_mcr_queryfq_np np;
2758
2759 err = qman_query_fq_np(&fq, &np);
Claudiu Manoild95cb0d32016-11-16 16:40:16 +02002760 if (err == -ERANGE)
Claudiu Manoilc535e922016-09-22 18:04:09 +03002761 /* FQID range exceeded, found no problems */
2762 return 0;
Claudiu Manoild95cb0d32016-11-16 16:40:16 +02002763 else if (WARN_ON(err))
2764 return err;
2765
Claudiu Manoilc535e922016-09-22 18:04:09 +03002766 if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
2767 struct qm_fqd fqd;
2768
2769 err = qman_query_fq(&fq, &fqd);
2770 if (WARN_ON(err))
Claudiu Manoild95cb0d32016-11-16 16:40:16 +02002771 return err;
Claudiu Manoilc535e922016-09-22 18:04:09 +03002772 if (qm_fqd_get_chan(&fqd) == qp) {
2773 /* The channel is the FQ's target, clean it */
2774 err = qman_shutdown_fq(fq.fqid);
2775 if (err)
2776 /*
2777 * Couldn't shut down the FQ
2778 * so the pool must be leaked
2779 */
2780 return err;
2781 }
2782 }
2783 /* Move to the next FQID */
2784 fq.fqid++;
2785 } while (1);
2786}
2787
2788int qman_release_pool(u32 qp)
2789{
2790 int ret;
2791
2792 ret = qpool_cleanup(qp);
2793 if (ret) {
2794 pr_debug("CHID %d leaked\n", qp);
2795 return ret;
2796 }
2797
2798 gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1);
2799 return 0;
2800}
2801EXPORT_SYMBOL(qman_release_pool);
2802
2803static int cgr_cleanup(u32 cgrid)
2804{
2805 /*
2806 * query all FQDs starting from FQID 1 until we get an "invalid FQID"
2807 * error, looking for non-OOS FQDs whose CGR is the CGR being released
2808 */
2809 struct qman_fq fq = {
Claudiu Manoild95cb0d32016-11-16 16:40:16 +02002810 .fqid = QM_FQID_RANGE_START
Claudiu Manoilc535e922016-09-22 18:04:09 +03002811 };
2812 int err;
2813
2814 do {
2815 struct qm_mcr_queryfq_np np;
2816
2817 err = qman_query_fq_np(&fq, &np);
Claudiu Manoild95cb0d32016-11-16 16:40:16 +02002818 if (err == -ERANGE)
Claudiu Manoilc535e922016-09-22 18:04:09 +03002819 /* FQID range exceeded, found no problems */
2820 return 0;
Claudiu Manoild95cb0d32016-11-16 16:40:16 +02002821 else if (WARN_ON(err))
2822 return err;
2823
Claudiu Manoilc535e922016-09-22 18:04:09 +03002824 if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
2825 struct qm_fqd fqd;
2826
2827 err = qman_query_fq(&fq, &fqd);
2828 if (WARN_ON(err))
Claudiu Manoild95cb0d32016-11-16 16:40:16 +02002829 return err;
Claudiu Manoilc535e922016-09-22 18:04:09 +03002830 if ((fqd.fq_ctrl & QM_FQCTRL_CGE) &&
2831 fqd.cgid == cgrid) {
2832 pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
2833 cgrid, fq.fqid);
2834 return -EIO;
2835 }
2836 }
2837 /* Move to the next FQID */
2838 fq.fqid++;
2839 } while (1);
2840}
2841
2842int qman_release_cgrid(u32 cgrid)
2843{
2844 int ret;
2845
2846 ret = cgr_cleanup(cgrid);
2847 if (ret) {
2848 pr_debug("CGRID %d leaked\n", cgrid);
2849 return ret;
2850 }
2851
2852 gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1);
2853 return 0;
2854}
2855EXPORT_SYMBOL(qman_release_cgrid);