blob: 46ad305c24c1cbfc58943ceb369fea174cded9b5 [file] [log] [blame]
Anup Pateldbc049e2017-03-15 12:10:00 +05301/* Broadcom FlexRM Mailbox Driver
2 *
3 * Copyright (C) 2017 Broadcom
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Each Broadcom FlexSparx4 offload engine is implemented as an
10 * extension to Broadcom FlexRM ring manager. The FlexRM ring
11 * manager provides a set of rings which can be used to submit
12 * work to a FlexSparx4 offload engine.
13 *
14 * This driver creates a mailbox controller using a set of FlexRM
15 * rings where each mailbox channel represents a separate FlexRM ring.
16 */
17
18#include <asm/barrier.h>
19#include <asm/byteorder.h>
Anup Patelacf7e502017-08-01 16:05:51 +053020#include <linux/atomic.h>
21#include <linux/debugfs.h>
Anup Pateldbc049e2017-03-15 12:10:00 +053022#include <linux/delay.h>
23#include <linux/device.h>
24#include <linux/dma-mapping.h>
25#include <linux/dmapool.h>
26#include <linux/err.h>
27#include <linux/idr.h>
28#include <linux/interrupt.h>
29#include <linux/kernel.h>
30#include <linux/mailbox_controller.h>
31#include <linux/mailbox_client.h>
32#include <linux/mailbox/brcm-message.h>
33#include <linux/module.h>
34#include <linux/msi.h>
35#include <linux/of_address.h>
36#include <linux/of_irq.h>
37#include <linux/platform_device.h>
38#include <linux/spinlock.h>
39
40/* ====== FlexRM register defines ===== */
41
42/* FlexRM configuration */
43#define RING_REGS_SIZE 0x10000
44#define RING_DESC_SIZE 8
45#define RING_DESC_INDEX(offset) \
46 ((offset) / RING_DESC_SIZE)
47#define RING_DESC_OFFSET(index) \
48 ((index) * RING_DESC_SIZE)
49#define RING_MAX_REQ_COUNT 1024
50#define RING_BD_ALIGN_ORDER 12
51#define RING_BD_ALIGN_CHECK(addr) \
52 (!((addr) & ((0x1 << RING_BD_ALIGN_ORDER) - 1)))
53#define RING_BD_TOGGLE_INVALID(offset) \
54 (((offset) >> RING_BD_ALIGN_ORDER) & 0x1)
55#define RING_BD_TOGGLE_VALID(offset) \
56 (!RING_BD_TOGGLE_INVALID(offset))
57#define RING_BD_DESC_PER_REQ 32
58#define RING_BD_DESC_COUNT \
59 (RING_MAX_REQ_COUNT * RING_BD_DESC_PER_REQ)
60#define RING_BD_SIZE \
61 (RING_BD_DESC_COUNT * RING_DESC_SIZE)
62#define RING_CMPL_ALIGN_ORDER 13
63#define RING_CMPL_DESC_COUNT RING_MAX_REQ_COUNT
64#define RING_CMPL_SIZE \
65 (RING_CMPL_DESC_COUNT * RING_DESC_SIZE)
66#define RING_VER_MAGIC 0x76303031
67
68/* Per-Ring register offsets */
69#define RING_VER 0x000
70#define RING_BD_START_ADDR 0x004
71#define RING_BD_READ_PTR 0x008
72#define RING_BD_WRITE_PTR 0x00c
73#define RING_BD_READ_PTR_DDR_LS 0x010
74#define RING_BD_READ_PTR_DDR_MS 0x014
75#define RING_CMPL_START_ADDR 0x018
76#define RING_CMPL_WRITE_PTR 0x01c
77#define RING_NUM_REQ_RECV_LS 0x020
78#define RING_NUM_REQ_RECV_MS 0x024
79#define RING_NUM_REQ_TRANS_LS 0x028
80#define RING_NUM_REQ_TRANS_MS 0x02c
81#define RING_NUM_REQ_OUTSTAND 0x030
82#define RING_CONTROL 0x034
83#define RING_FLUSH_DONE 0x038
84#define RING_MSI_ADDR_LS 0x03c
85#define RING_MSI_ADDR_MS 0x040
86#define RING_MSI_CONTROL 0x048
87#define RING_BD_READ_PTR_DDR_CONTROL 0x04c
88#define RING_MSI_DATA_VALUE 0x064
89
90/* Register RING_BD_START_ADDR fields */
91#define BD_LAST_UPDATE_HW_SHIFT 28
92#define BD_LAST_UPDATE_HW_MASK 0x1
93#define BD_START_ADDR_VALUE(pa) \
94 ((u32)((((dma_addr_t)(pa)) >> RING_BD_ALIGN_ORDER) & 0x0fffffff))
95#define BD_START_ADDR_DECODE(val) \
96 ((dma_addr_t)((val) & 0x0fffffff) << RING_BD_ALIGN_ORDER)
97
98/* Register RING_CMPL_START_ADDR fields */
99#define CMPL_START_ADDR_VALUE(pa) \
Anup Patel6d2061b2017-08-01 16:05:52 +0530100 ((u32)((((u64)(pa)) >> RING_CMPL_ALIGN_ORDER) & 0x07ffffff))
Anup Pateldbc049e2017-03-15 12:10:00 +0530101
102/* Register RING_CONTROL fields */
103#define CONTROL_MASK_DISABLE_CONTROL 12
104#define CONTROL_FLUSH_SHIFT 5
105#define CONTROL_ACTIVE_SHIFT 4
106#define CONTROL_RATE_ADAPT_MASK 0xf
107#define CONTROL_RATE_DYNAMIC 0x0
108#define CONTROL_RATE_FAST 0x8
109#define CONTROL_RATE_MEDIUM 0x9
110#define CONTROL_RATE_SLOW 0xa
111#define CONTROL_RATE_IDLE 0xb
112
113/* Register RING_FLUSH_DONE fields */
114#define FLUSH_DONE_MASK 0x1
115
116/* Register RING_MSI_CONTROL fields */
117#define MSI_TIMER_VAL_SHIFT 16
118#define MSI_TIMER_VAL_MASK 0xffff
119#define MSI_ENABLE_SHIFT 15
120#define MSI_ENABLE_MASK 0x1
121#define MSI_COUNT_SHIFT 0
122#define MSI_COUNT_MASK 0x3ff
123
124/* Register RING_BD_READ_PTR_DDR_CONTROL fields */
125#define BD_READ_PTR_DDR_TIMER_VAL_SHIFT 16
126#define BD_READ_PTR_DDR_TIMER_VAL_MASK 0xffff
127#define BD_READ_PTR_DDR_ENABLE_SHIFT 15
128#define BD_READ_PTR_DDR_ENABLE_MASK 0x1
129
130/* ====== FlexRM ring descriptor defines ===== */
131
132/* Completion descriptor format */
133#define CMPL_OPAQUE_SHIFT 0
134#define CMPL_OPAQUE_MASK 0xffff
135#define CMPL_ENGINE_STATUS_SHIFT 16
136#define CMPL_ENGINE_STATUS_MASK 0xffff
137#define CMPL_DME_STATUS_SHIFT 32
138#define CMPL_DME_STATUS_MASK 0xffff
139#define CMPL_RM_STATUS_SHIFT 48
140#define CMPL_RM_STATUS_MASK 0xffff
141
142/* Completion DME status code */
143#define DME_STATUS_MEM_COR_ERR BIT(0)
144#define DME_STATUS_MEM_UCOR_ERR BIT(1)
145#define DME_STATUS_FIFO_UNDERFLOW BIT(2)
146#define DME_STATUS_FIFO_OVERFLOW BIT(3)
147#define DME_STATUS_RRESP_ERR BIT(4)
148#define DME_STATUS_BRESP_ERR BIT(5)
149#define DME_STATUS_ERROR_MASK (DME_STATUS_MEM_COR_ERR | \
150 DME_STATUS_MEM_UCOR_ERR | \
151 DME_STATUS_FIFO_UNDERFLOW | \
152 DME_STATUS_FIFO_OVERFLOW | \
153 DME_STATUS_RRESP_ERR | \
154 DME_STATUS_BRESP_ERR)
155
156/* Completion RM status code */
157#define RM_STATUS_CODE_SHIFT 0
158#define RM_STATUS_CODE_MASK 0x3ff
159#define RM_STATUS_CODE_GOOD 0x0
160#define RM_STATUS_CODE_AE_TIMEOUT 0x3ff
161
162/* General descriptor format */
163#define DESC_TYPE_SHIFT 60
164#define DESC_TYPE_MASK 0xf
165#define DESC_PAYLOAD_SHIFT 0
166#define DESC_PAYLOAD_MASK 0x0fffffffffffffff
167
168/* Null descriptor format */
169#define NULL_TYPE 0
170#define NULL_TOGGLE_SHIFT 58
171#define NULL_TOGGLE_MASK 0x1
172
173/* Header descriptor format */
174#define HEADER_TYPE 1
175#define HEADER_TOGGLE_SHIFT 58
176#define HEADER_TOGGLE_MASK 0x1
177#define HEADER_ENDPKT_SHIFT 57
178#define HEADER_ENDPKT_MASK 0x1
179#define HEADER_STARTPKT_SHIFT 56
180#define HEADER_STARTPKT_MASK 0x1
181#define HEADER_BDCOUNT_SHIFT 36
182#define HEADER_BDCOUNT_MASK 0x1f
183#define HEADER_BDCOUNT_MAX HEADER_BDCOUNT_MASK
184#define HEADER_FLAGS_SHIFT 16
185#define HEADER_FLAGS_MASK 0xffff
186#define HEADER_OPAQUE_SHIFT 0
187#define HEADER_OPAQUE_MASK 0xffff
188
189/* Source (SRC) descriptor format */
190#define SRC_TYPE 2
191#define SRC_LENGTH_SHIFT 44
192#define SRC_LENGTH_MASK 0xffff
193#define SRC_ADDR_SHIFT 0
194#define SRC_ADDR_MASK 0x00000fffffffffff
195
196/* Destination (DST) descriptor format */
197#define DST_TYPE 3
198#define DST_LENGTH_SHIFT 44
199#define DST_LENGTH_MASK 0xffff
200#define DST_ADDR_SHIFT 0
201#define DST_ADDR_MASK 0x00000fffffffffff
202
203/* Immediate (IMM) descriptor format */
204#define IMM_TYPE 4
205#define IMM_DATA_SHIFT 0
206#define IMM_DATA_MASK 0x0fffffffffffffff
207
208/* Next pointer (NPTR) descriptor format */
209#define NPTR_TYPE 5
210#define NPTR_TOGGLE_SHIFT 58
211#define NPTR_TOGGLE_MASK 0x1
212#define NPTR_ADDR_SHIFT 0
213#define NPTR_ADDR_MASK 0x00000fffffffffff
214
215/* Mega source (MSRC) descriptor format */
216#define MSRC_TYPE 6
217#define MSRC_LENGTH_SHIFT 44
218#define MSRC_LENGTH_MASK 0xffff
219#define MSRC_ADDR_SHIFT 0
220#define MSRC_ADDR_MASK 0x00000fffffffffff
221
222/* Mega destination (MDST) descriptor format */
223#define MDST_TYPE 7
224#define MDST_LENGTH_SHIFT 44
225#define MDST_LENGTH_MASK 0xffff
226#define MDST_ADDR_SHIFT 0
227#define MDST_ADDR_MASK 0x00000fffffffffff
228
229/* Source with tlast (SRCT) descriptor format */
230#define SRCT_TYPE 8
231#define SRCT_LENGTH_SHIFT 44
232#define SRCT_LENGTH_MASK 0xffff
233#define SRCT_ADDR_SHIFT 0
234#define SRCT_ADDR_MASK 0x00000fffffffffff
235
236/* Destination with tlast (DSTT) descriptor format */
237#define DSTT_TYPE 9
238#define DSTT_LENGTH_SHIFT 44
239#define DSTT_LENGTH_MASK 0xffff
240#define DSTT_ADDR_SHIFT 0
241#define DSTT_ADDR_MASK 0x00000fffffffffff
242
243/* Immediate with tlast (IMMT) descriptor format */
244#define IMMT_TYPE 10
245#define IMMT_DATA_SHIFT 0
246#define IMMT_DATA_MASK 0x0fffffffffffffff
247
248/* Descriptor helper macros */
249#define DESC_DEC(_d, _s, _m) (((_d) >> (_s)) & (_m))
250#define DESC_ENC(_d, _v, _s, _m) \
251 do { \
252 (_d) &= ~((u64)(_m) << (_s)); \
253 (_d) |= (((u64)(_v) & (_m)) << (_s)); \
254 } while (0)
255
256/* ====== FlexRM data structures ===== */
257
258struct flexrm_ring {
259 /* Unprotected members */
260 int num;
261 struct flexrm_mbox *mbox;
262 void __iomem *regs;
263 bool irq_requested;
264 unsigned int irq;
Anup Patel6ac17fe2017-08-01 16:05:50 +0530265 cpumask_t irq_aff_hint;
Anup Pateldbc049e2017-03-15 12:10:00 +0530266 unsigned int msi_timer_val;
267 unsigned int msi_count_threshold;
268 struct ida requests_ida;
269 struct brcm_message *requests[RING_MAX_REQ_COUNT];
270 void *bd_base;
271 dma_addr_t bd_dma_base;
272 u32 bd_write_offset;
273 void *cmpl_base;
274 dma_addr_t cmpl_dma_base;
Anup Patelacf7e502017-08-01 16:05:51 +0530275 /* Atomic stats */
276 atomic_t msg_send_count;
277 atomic_t msg_cmpl_count;
Anup Pateldbc049e2017-03-15 12:10:00 +0530278 /* Protected members */
279 spinlock_t lock;
280 struct brcm_message *last_pending_msg;
281 u32 cmpl_read_offset;
282};
283
284struct flexrm_mbox {
285 struct device *dev;
286 void __iomem *regs;
287 u32 num_rings;
288 struct flexrm_ring *rings;
289 struct dma_pool *bd_pool;
290 struct dma_pool *cmpl_pool;
Anup Patelacf7e502017-08-01 16:05:51 +0530291 struct dentry *root;
292 struct dentry *config;
293 struct dentry *stats;
Anup Pateldbc049e2017-03-15 12:10:00 +0530294 struct mbox_controller controller;
295};
296
297/* ====== FlexRM ring descriptor helper routines ===== */
298
299static u64 flexrm_read_desc(void *desc_ptr)
300{
301 return le64_to_cpu(*((u64 *)desc_ptr));
302}
303
304static void flexrm_write_desc(void *desc_ptr, u64 desc)
305{
306 *((u64 *)desc_ptr) = cpu_to_le64(desc);
307}
308
309static u32 flexrm_cmpl_desc_to_reqid(u64 cmpl_desc)
310{
311 return (u32)(cmpl_desc & CMPL_OPAQUE_MASK);
312}
313
314static int flexrm_cmpl_desc_to_error(u64 cmpl_desc)
315{
316 u32 status;
317
318 status = DESC_DEC(cmpl_desc, CMPL_DME_STATUS_SHIFT,
319 CMPL_DME_STATUS_MASK);
320 if (status & DME_STATUS_ERROR_MASK)
321 return -EIO;
322
323 status = DESC_DEC(cmpl_desc, CMPL_RM_STATUS_SHIFT,
324 CMPL_RM_STATUS_MASK);
325 status &= RM_STATUS_CODE_MASK;
326 if (status == RM_STATUS_CODE_AE_TIMEOUT)
327 return -ETIMEDOUT;
328
329 return 0;
330}
331
332static bool flexrm_is_next_table_desc(void *desc_ptr)
333{
334 u64 desc = flexrm_read_desc(desc_ptr);
335 u32 type = DESC_DEC(desc, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
336
337 return (type == NPTR_TYPE) ? true : false;
338}
339
340static u64 flexrm_next_table_desc(u32 toggle, dma_addr_t next_addr)
341{
342 u64 desc = 0;
343
344 DESC_ENC(desc, NPTR_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
345 DESC_ENC(desc, toggle, NPTR_TOGGLE_SHIFT, NPTR_TOGGLE_MASK);
346 DESC_ENC(desc, next_addr, NPTR_ADDR_SHIFT, NPTR_ADDR_MASK);
347
348 return desc;
349}
350
351static u64 flexrm_null_desc(u32 toggle)
352{
353 u64 desc = 0;
354
355 DESC_ENC(desc, NULL_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
356 DESC_ENC(desc, toggle, NULL_TOGGLE_SHIFT, NULL_TOGGLE_MASK);
357
358 return desc;
359}
360
361static u32 flexrm_estimate_header_desc_count(u32 nhcnt)
362{
363 u32 hcnt = nhcnt / HEADER_BDCOUNT_MAX;
364
365 if (!(nhcnt % HEADER_BDCOUNT_MAX))
366 hcnt += 1;
367
368 return hcnt;
369}
370
371static void flexrm_flip_header_toogle(void *desc_ptr)
372{
373 u64 desc = flexrm_read_desc(desc_ptr);
374
375 if (desc & ((u64)0x1 << HEADER_TOGGLE_SHIFT))
376 desc &= ~((u64)0x1 << HEADER_TOGGLE_SHIFT);
377 else
378 desc |= ((u64)0x1 << HEADER_TOGGLE_SHIFT);
379
380 flexrm_write_desc(desc_ptr, desc);
381}
382
383static u64 flexrm_header_desc(u32 toggle, u32 startpkt, u32 endpkt,
384 u32 bdcount, u32 flags, u32 opaque)
385{
386 u64 desc = 0;
387
388 DESC_ENC(desc, HEADER_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
389 DESC_ENC(desc, toggle, HEADER_TOGGLE_SHIFT, HEADER_TOGGLE_MASK);
390 DESC_ENC(desc, startpkt, HEADER_STARTPKT_SHIFT, HEADER_STARTPKT_MASK);
391 DESC_ENC(desc, endpkt, HEADER_ENDPKT_SHIFT, HEADER_ENDPKT_MASK);
392 DESC_ENC(desc, bdcount, HEADER_BDCOUNT_SHIFT, HEADER_BDCOUNT_MASK);
393 DESC_ENC(desc, flags, HEADER_FLAGS_SHIFT, HEADER_FLAGS_MASK);
394 DESC_ENC(desc, opaque, HEADER_OPAQUE_SHIFT, HEADER_OPAQUE_MASK);
395
396 return desc;
397}
398
399static void flexrm_enqueue_desc(u32 nhpos, u32 nhcnt, u32 reqid,
400 u64 desc, void **desc_ptr, u32 *toggle,
401 void *start_desc, void *end_desc)
402{
403 u64 d;
404 u32 nhavail, _toggle, _startpkt, _endpkt, _bdcount;
405
406 /* Sanity check */
407 if (nhcnt <= nhpos)
408 return;
409
410 /*
411 * Each request or packet start with a HEADER descriptor followed
412 * by one or more non-HEADER descriptors (SRC, SRCT, MSRC, DST,
413 * DSTT, MDST, IMM, and IMMT). The number of non-HEADER descriptors
414 * following a HEADER descriptor is represented by BDCOUNT field
415 * of HEADER descriptor. The max value of BDCOUNT field is 31 which
416 * means we can only have 31 non-HEADER descriptors following one
417 * HEADER descriptor.
418 *
419 * In general use, number of non-HEADER descriptors can easily go
420 * beyond 31. To tackle this situation, we have packet (or request)
421 * extenstion bits (STARTPKT and ENDPKT) in the HEADER descriptor.
422 *
423 * To use packet extension, the first HEADER descriptor of request
424 * (or packet) will have STARTPKT=1 and ENDPKT=0. The intermediate
425 * HEADER descriptors will have STARTPKT=0 and ENDPKT=0. The last
426 * HEADER descriptor will have STARTPKT=0 and ENDPKT=1. Also, the
427 * TOGGLE bit of the first HEADER will be set to invalid state to
428 * ensure that FlexRM does not start fetching descriptors till all
429 * descriptors are enqueued. The user of this function will flip
430 * the TOGGLE bit of first HEADER after all descriptors are
431 * enqueued.
432 */
433
434 if ((nhpos % HEADER_BDCOUNT_MAX == 0) && (nhcnt - nhpos)) {
435 /* Prepare the header descriptor */
436 nhavail = (nhcnt - nhpos);
437 _toggle = (nhpos == 0) ? !(*toggle) : (*toggle);
438 _startpkt = (nhpos == 0) ? 0x1 : 0x0;
439 _endpkt = (nhavail <= HEADER_BDCOUNT_MAX) ? 0x1 : 0x0;
440 _bdcount = (nhavail <= HEADER_BDCOUNT_MAX) ?
441 nhavail : HEADER_BDCOUNT_MAX;
442 if (nhavail <= HEADER_BDCOUNT_MAX)
443 _bdcount = nhavail;
444 else
445 _bdcount = HEADER_BDCOUNT_MAX;
446 d = flexrm_header_desc(_toggle, _startpkt, _endpkt,
447 _bdcount, 0x0, reqid);
448
449 /* Write header descriptor */
450 flexrm_write_desc(*desc_ptr, d);
451
452 /* Point to next descriptor */
453 *desc_ptr += sizeof(desc);
454 if (*desc_ptr == end_desc)
455 *desc_ptr = start_desc;
456
457 /* Skip next pointer descriptors */
458 while (flexrm_is_next_table_desc(*desc_ptr)) {
459 *toggle = (*toggle) ? 0 : 1;
460 *desc_ptr += sizeof(desc);
461 if (*desc_ptr == end_desc)
462 *desc_ptr = start_desc;
463 }
464 }
465
466 /* Write desired descriptor */
467 flexrm_write_desc(*desc_ptr, desc);
468
469 /* Point to next descriptor */
470 *desc_ptr += sizeof(desc);
471 if (*desc_ptr == end_desc)
472 *desc_ptr = start_desc;
473
474 /* Skip next pointer descriptors */
475 while (flexrm_is_next_table_desc(*desc_ptr)) {
476 *toggle = (*toggle) ? 0 : 1;
477 *desc_ptr += sizeof(desc);
478 if (*desc_ptr == end_desc)
479 *desc_ptr = start_desc;
480 }
481}
482
483static u64 flexrm_src_desc(dma_addr_t addr, unsigned int length)
484{
485 u64 desc = 0;
486
487 DESC_ENC(desc, SRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
488 DESC_ENC(desc, length, SRC_LENGTH_SHIFT, SRC_LENGTH_MASK);
489 DESC_ENC(desc, addr, SRC_ADDR_SHIFT, SRC_ADDR_MASK);
490
491 return desc;
492}
493
494static u64 flexrm_msrc_desc(dma_addr_t addr, unsigned int length_div_16)
495{
496 u64 desc = 0;
497
498 DESC_ENC(desc, MSRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
499 DESC_ENC(desc, length_div_16, MSRC_LENGTH_SHIFT, MSRC_LENGTH_MASK);
500 DESC_ENC(desc, addr, MSRC_ADDR_SHIFT, MSRC_ADDR_MASK);
501
502 return desc;
503}
504
505static u64 flexrm_dst_desc(dma_addr_t addr, unsigned int length)
506{
507 u64 desc = 0;
508
509 DESC_ENC(desc, DST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
510 DESC_ENC(desc, length, DST_LENGTH_SHIFT, DST_LENGTH_MASK);
511 DESC_ENC(desc, addr, DST_ADDR_SHIFT, DST_ADDR_MASK);
512
513 return desc;
514}
515
516static u64 flexrm_mdst_desc(dma_addr_t addr, unsigned int length_div_16)
517{
518 u64 desc = 0;
519
520 DESC_ENC(desc, MDST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
521 DESC_ENC(desc, length_div_16, MDST_LENGTH_SHIFT, MDST_LENGTH_MASK);
522 DESC_ENC(desc, addr, MDST_ADDR_SHIFT, MDST_ADDR_MASK);
523
524 return desc;
525}
526
527static u64 flexrm_imm_desc(u64 data)
528{
529 u64 desc = 0;
530
531 DESC_ENC(desc, IMM_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
532 DESC_ENC(desc, data, IMM_DATA_SHIFT, IMM_DATA_MASK);
533
534 return desc;
535}
536
537static u64 flexrm_srct_desc(dma_addr_t addr, unsigned int length)
538{
539 u64 desc = 0;
540
541 DESC_ENC(desc, SRCT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
542 DESC_ENC(desc, length, SRCT_LENGTH_SHIFT, SRCT_LENGTH_MASK);
543 DESC_ENC(desc, addr, SRCT_ADDR_SHIFT, SRCT_ADDR_MASK);
544
545 return desc;
546}
547
548static u64 flexrm_dstt_desc(dma_addr_t addr, unsigned int length)
549{
550 u64 desc = 0;
551
552 DESC_ENC(desc, DSTT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
553 DESC_ENC(desc, length, DSTT_LENGTH_SHIFT, DSTT_LENGTH_MASK);
554 DESC_ENC(desc, addr, DSTT_ADDR_SHIFT, DSTT_ADDR_MASK);
555
556 return desc;
557}
558
559static u64 flexrm_immt_desc(u64 data)
560{
561 u64 desc = 0;
562
563 DESC_ENC(desc, IMMT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
564 DESC_ENC(desc, data, IMMT_DATA_SHIFT, IMMT_DATA_MASK);
565
566 return desc;
567}
568
569static bool flexrm_spu_sanity_check(struct brcm_message *msg)
570{
571 struct scatterlist *sg;
572
573 if (!msg->spu.src || !msg->spu.dst)
574 return false;
575 for (sg = msg->spu.src; sg; sg = sg_next(sg)) {
576 if (sg->length & 0xf) {
577 if (sg->length > SRC_LENGTH_MASK)
578 return false;
579 } else {
580 if (sg->length > (MSRC_LENGTH_MASK * 16))
581 return false;
582 }
583 }
584 for (sg = msg->spu.dst; sg; sg = sg_next(sg)) {
585 if (sg->length & 0xf) {
586 if (sg->length > DST_LENGTH_MASK)
587 return false;
588 } else {
589 if (sg->length > (MDST_LENGTH_MASK * 16))
590 return false;
591 }
592 }
593
594 return true;
595}
596
597static u32 flexrm_spu_estimate_nonheader_desc_count(struct brcm_message *msg)
598{
599 u32 cnt = 0;
600 unsigned int dst_target = 0;
601 struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst;
602
603 while (src_sg || dst_sg) {
604 if (src_sg) {
605 cnt++;
606 dst_target = src_sg->length;
607 src_sg = sg_next(src_sg);
608 } else
609 dst_target = UINT_MAX;
610
611 while (dst_target && dst_sg) {
612 cnt++;
613 if (dst_sg->length < dst_target)
614 dst_target -= dst_sg->length;
615 else
616 dst_target = 0;
617 dst_sg = sg_next(dst_sg);
618 }
619 }
620
621 return cnt;
622}
623
624static int flexrm_spu_dma_map(struct device *dev, struct brcm_message *msg)
625{
626 int rc;
627
628 rc = dma_map_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
629 DMA_TO_DEVICE);
630 if (rc < 0)
631 return rc;
632
633 rc = dma_map_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst),
634 DMA_FROM_DEVICE);
635 if (rc < 0) {
636 dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
637 DMA_TO_DEVICE);
638 return rc;
639 }
640
641 return 0;
642}
643
644static void flexrm_spu_dma_unmap(struct device *dev, struct brcm_message *msg)
645{
646 dma_unmap_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst),
647 DMA_FROM_DEVICE);
648 dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
649 DMA_TO_DEVICE);
650}
651
652static void *flexrm_spu_write_descs(struct brcm_message *msg, u32 nhcnt,
653 u32 reqid, void *desc_ptr, u32 toggle,
654 void *start_desc, void *end_desc)
655{
656 u64 d;
657 u32 nhpos = 0;
658 void *orig_desc_ptr = desc_ptr;
659 unsigned int dst_target = 0;
660 struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst;
661
662 while (src_sg || dst_sg) {
663 if (src_sg) {
664 if (sg_dma_len(src_sg) & 0xf)
665 d = flexrm_src_desc(sg_dma_address(src_sg),
666 sg_dma_len(src_sg));
667 else
668 d = flexrm_msrc_desc(sg_dma_address(src_sg),
669 sg_dma_len(src_sg)/16);
670 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
671 d, &desc_ptr, &toggle,
672 start_desc, end_desc);
673 nhpos++;
674 dst_target = sg_dma_len(src_sg);
675 src_sg = sg_next(src_sg);
676 } else
677 dst_target = UINT_MAX;
678
679 while (dst_target && dst_sg) {
680 if (sg_dma_len(dst_sg) & 0xf)
681 d = flexrm_dst_desc(sg_dma_address(dst_sg),
682 sg_dma_len(dst_sg));
683 else
684 d = flexrm_mdst_desc(sg_dma_address(dst_sg),
685 sg_dma_len(dst_sg)/16);
686 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
687 d, &desc_ptr, &toggle,
688 start_desc, end_desc);
689 nhpos++;
690 if (sg_dma_len(dst_sg) < dst_target)
691 dst_target -= sg_dma_len(dst_sg);
692 else
693 dst_target = 0;
694 dst_sg = sg_next(dst_sg);
695 }
696 }
697
698 /* Null descriptor with invalid toggle bit */
699 flexrm_write_desc(desc_ptr, flexrm_null_desc(!toggle));
700
701 /* Ensure that descriptors have been written to memory */
702 wmb();
703
704 /* Flip toggle bit in header */
705 flexrm_flip_header_toogle(orig_desc_ptr);
706
707 return desc_ptr;
708}
709
710static bool flexrm_sba_sanity_check(struct brcm_message *msg)
711{
712 u32 i;
713
714 if (!msg->sba.cmds || !msg->sba.cmds_count)
715 return false;
716
717 for (i = 0; i < msg->sba.cmds_count; i++) {
718 if (((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) ||
719 (msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C)) &&
720 (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT))
721 return false;
722 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) &&
723 (msg->sba.cmds[i].data_len > SRCT_LENGTH_MASK))
724 return false;
725 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C) &&
726 (msg->sba.cmds[i].data_len > SRCT_LENGTH_MASK))
727 return false;
728 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_RESP) &&
729 (msg->sba.cmds[i].resp_len > DSTT_LENGTH_MASK))
730 return false;
731 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT) &&
732 (msg->sba.cmds[i].data_len > DSTT_LENGTH_MASK))
733 return false;
734 }
735
736 return true;
737}
738
739static u32 flexrm_sba_estimate_nonheader_desc_count(struct brcm_message *msg)
740{
741 u32 i, cnt;
742
743 cnt = 0;
744 for (i = 0; i < msg->sba.cmds_count; i++) {
745 cnt++;
746
747 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) ||
748 (msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C))
749 cnt++;
750
751 if (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_RESP)
752 cnt++;
753
754 if (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT)
755 cnt++;
756 }
757
758 return cnt;
759}
760
761static void *flexrm_sba_write_descs(struct brcm_message *msg, u32 nhcnt,
762 u32 reqid, void *desc_ptr, u32 toggle,
763 void *start_desc, void *end_desc)
764{
765 u64 d;
766 u32 i, nhpos = 0;
767 struct brcm_sba_command *c;
768 void *orig_desc_ptr = desc_ptr;
769
770 /* Convert SBA commands into descriptors */
771 for (i = 0; i < msg->sba.cmds_count; i++) {
772 c = &msg->sba.cmds[i];
773
774 if ((c->flags & BRCM_SBA_CMD_HAS_RESP) &&
775 (c->flags & BRCM_SBA_CMD_HAS_OUTPUT)) {
776 /* Destination response descriptor */
777 d = flexrm_dst_desc(c->resp, c->resp_len);
778 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
779 d, &desc_ptr, &toggle,
780 start_desc, end_desc);
781 nhpos++;
782 } else if (c->flags & BRCM_SBA_CMD_HAS_RESP) {
783 /* Destination response with tlast descriptor */
784 d = flexrm_dstt_desc(c->resp, c->resp_len);
785 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
786 d, &desc_ptr, &toggle,
787 start_desc, end_desc);
788 nhpos++;
789 }
790
791 if (c->flags & BRCM_SBA_CMD_HAS_OUTPUT) {
792 /* Destination with tlast descriptor */
793 d = flexrm_dstt_desc(c->data, c->data_len);
794 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
795 d, &desc_ptr, &toggle,
796 start_desc, end_desc);
797 nhpos++;
798 }
799
800 if (c->flags & BRCM_SBA_CMD_TYPE_B) {
801 /* Command as immediate descriptor */
802 d = flexrm_imm_desc(c->cmd);
803 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
804 d, &desc_ptr, &toggle,
805 start_desc, end_desc);
806 nhpos++;
807 } else {
808 /* Command as immediate descriptor with tlast */
809 d = flexrm_immt_desc(c->cmd);
810 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
811 d, &desc_ptr, &toggle,
812 start_desc, end_desc);
813 nhpos++;
814 }
815
816 if ((c->flags & BRCM_SBA_CMD_TYPE_B) ||
817 (c->flags & BRCM_SBA_CMD_TYPE_C)) {
818 /* Source with tlast descriptor */
819 d = flexrm_srct_desc(c->data, c->data_len);
820 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
821 d, &desc_ptr, &toggle,
822 start_desc, end_desc);
823 nhpos++;
824 }
825 }
826
827 /* Null descriptor with invalid toggle bit */
828 flexrm_write_desc(desc_ptr, flexrm_null_desc(!toggle));
829
830 /* Ensure that descriptors have been written to memory */
831 wmb();
832
833 /* Flip toggle bit in header */
834 flexrm_flip_header_toogle(orig_desc_ptr);
835
836 return desc_ptr;
837}
838
839static bool flexrm_sanity_check(struct brcm_message *msg)
840{
841 if (!msg)
842 return false;
843
844 switch (msg->type) {
845 case BRCM_MESSAGE_SPU:
846 return flexrm_spu_sanity_check(msg);
847 case BRCM_MESSAGE_SBA:
848 return flexrm_sba_sanity_check(msg);
849 default:
850 return false;
851 };
852}
853
854static u32 flexrm_estimate_nonheader_desc_count(struct brcm_message *msg)
855{
856 if (!msg)
857 return 0;
858
859 switch (msg->type) {
860 case BRCM_MESSAGE_SPU:
861 return flexrm_spu_estimate_nonheader_desc_count(msg);
862 case BRCM_MESSAGE_SBA:
863 return flexrm_sba_estimate_nonheader_desc_count(msg);
864 default:
865 return 0;
866 };
867}
868
869static int flexrm_dma_map(struct device *dev, struct brcm_message *msg)
870{
871 if (!dev || !msg)
872 return -EINVAL;
873
874 switch (msg->type) {
875 case BRCM_MESSAGE_SPU:
876 return flexrm_spu_dma_map(dev, msg);
877 default:
878 break;
879 }
880
881 return 0;
882}
883
884static void flexrm_dma_unmap(struct device *dev, struct brcm_message *msg)
885{
886 if (!dev || !msg)
887 return;
888
889 switch (msg->type) {
890 case BRCM_MESSAGE_SPU:
891 flexrm_spu_dma_unmap(dev, msg);
892 break;
893 default:
894 break;
895 }
896}
897
898static void *flexrm_write_descs(struct brcm_message *msg, u32 nhcnt,
899 u32 reqid, void *desc_ptr, u32 toggle,
900 void *start_desc, void *end_desc)
901{
902 if (!msg || !desc_ptr || !start_desc || !end_desc)
903 return ERR_PTR(-ENOTSUPP);
904
905 if ((desc_ptr < start_desc) || (end_desc <= desc_ptr))
906 return ERR_PTR(-ERANGE);
907
908 switch (msg->type) {
909 case BRCM_MESSAGE_SPU:
910 return flexrm_spu_write_descs(msg, nhcnt, reqid,
911 desc_ptr, toggle,
912 start_desc, end_desc);
913 case BRCM_MESSAGE_SBA:
914 return flexrm_sba_write_descs(msg, nhcnt, reqid,
915 desc_ptr, toggle,
916 start_desc, end_desc);
917 default:
918 return ERR_PTR(-ENOTSUPP);
919 };
920}
921
922/* ====== FlexRM driver helper routines ===== */
923
Anup Patelacf7e502017-08-01 16:05:51 +0530924static void flexrm_write_config_in_seqfile(struct flexrm_mbox *mbox,
925 struct seq_file *file)
926{
927 int i;
928 const char *state;
929 struct flexrm_ring *ring;
930
931 seq_printf(file, "%-5s %-9s %-18s %-10s %-18s %-10s\n",
932 "Ring#", "State", "BD_Addr", "BD_Size",
933 "Cmpl_Addr", "Cmpl_Size");
934
935 for (i = 0; i < mbox->num_rings; i++) {
936 ring = &mbox->rings[i];
937 if (readl(ring->regs + RING_CONTROL) &
938 BIT(CONTROL_ACTIVE_SHIFT))
939 state = "active";
940 else
941 state = "inactive";
942 seq_printf(file,
943 "%-5d %-9s 0x%016llx 0x%08x 0x%016llx 0x%08x\n",
944 ring->num, state,
945 (unsigned long long)ring->bd_dma_base,
946 (u32)RING_BD_SIZE,
947 (unsigned long long)ring->cmpl_dma_base,
948 (u32)RING_CMPL_SIZE);
949 }
950}
951
952static void flexrm_write_stats_in_seqfile(struct flexrm_mbox *mbox,
953 struct seq_file *file)
954{
955 int i;
956 u32 val, bd_read_offset;
957 struct flexrm_ring *ring;
958
959 seq_printf(file, "%-5s %-10s %-10s %-10s %-11s %-11s\n",
960 "Ring#", "BD_Read", "BD_Write",
961 "Cmpl_Read", "Submitted", "Completed");
962
963 for (i = 0; i < mbox->num_rings; i++) {
964 ring = &mbox->rings[i];
965 bd_read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR);
966 val = readl_relaxed(ring->regs + RING_BD_START_ADDR);
967 bd_read_offset *= RING_DESC_SIZE;
968 bd_read_offset += (u32)(BD_START_ADDR_DECODE(val) -
969 ring->bd_dma_base);
970 seq_printf(file, "%-5d 0x%08x 0x%08x 0x%08x %-11d %-11d\n",
971 ring->num,
972 (u32)bd_read_offset,
973 (u32)ring->bd_write_offset,
974 (u32)ring->cmpl_read_offset,
975 (u32)atomic_read(&ring->msg_send_count),
976 (u32)atomic_read(&ring->msg_cmpl_count));
977 }
978}
979
Anup Pateldbc049e2017-03-15 12:10:00 +0530980static int flexrm_new_request(struct flexrm_ring *ring,
981 struct brcm_message *batch_msg,
982 struct brcm_message *msg)
983{
984 void *next;
985 unsigned long flags;
986 u32 val, count, nhcnt;
987 u32 read_offset, write_offset;
988 bool exit_cleanup = false;
989 int ret = 0, reqid;
990
991 /* Do sanity check on message */
992 if (!flexrm_sanity_check(msg))
993 return -EIO;
994 msg->error = 0;
995
996 /* If no requests possible then save data pointer and goto done. */
997 reqid = ida_simple_get(&ring->requests_ida, 0,
998 RING_MAX_REQ_COUNT, GFP_KERNEL);
999 if (reqid < 0) {
1000 spin_lock_irqsave(&ring->lock, flags);
1001 if (batch_msg)
1002 ring->last_pending_msg = batch_msg;
1003 else
1004 ring->last_pending_msg = msg;
1005 spin_unlock_irqrestore(&ring->lock, flags);
1006 return 0;
1007 }
1008 ring->requests[reqid] = msg;
1009
1010 /* Do DMA mappings for the message */
1011 ret = flexrm_dma_map(ring->mbox->dev, msg);
1012 if (ret < 0) {
1013 ring->requests[reqid] = NULL;
1014 ida_simple_remove(&ring->requests_ida, reqid);
1015 return ret;
1016 }
1017
1018 /* If last_pending_msg is already set then goto done with error */
1019 spin_lock_irqsave(&ring->lock, flags);
1020 if (ring->last_pending_msg)
1021 ret = -ENOSPC;
1022 spin_unlock_irqrestore(&ring->lock, flags);
1023 if (ret < 0) {
1024 dev_warn(ring->mbox->dev, "no space in ring %d\n", ring->num);
1025 exit_cleanup = true;
1026 goto exit;
1027 }
1028
1029 /* Determine current HW BD read offset */
1030 read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR);
1031 val = readl_relaxed(ring->regs + RING_BD_START_ADDR);
1032 read_offset *= RING_DESC_SIZE;
1033 read_offset += (u32)(BD_START_ADDR_DECODE(val) - ring->bd_dma_base);
1034
1035 /*
1036 * Number required descriptors = number of non-header descriptors +
1037 * number of header descriptors +
1038 * 1x null descriptor
1039 */
1040 nhcnt = flexrm_estimate_nonheader_desc_count(msg);
1041 count = flexrm_estimate_header_desc_count(nhcnt) + nhcnt + 1;
1042
1043 /* Check for available descriptor space. */
1044 write_offset = ring->bd_write_offset;
1045 while (count) {
1046 if (!flexrm_is_next_table_desc(ring->bd_base + write_offset))
1047 count--;
1048 write_offset += RING_DESC_SIZE;
1049 if (write_offset == RING_BD_SIZE)
1050 write_offset = 0x0;
1051 if (write_offset == read_offset)
1052 break;
1053 }
1054 if (count) {
1055 spin_lock_irqsave(&ring->lock, flags);
1056 if (batch_msg)
1057 ring->last_pending_msg = batch_msg;
1058 else
1059 ring->last_pending_msg = msg;
1060 spin_unlock_irqrestore(&ring->lock, flags);
1061 ret = 0;
1062 exit_cleanup = true;
1063 goto exit;
1064 }
1065
1066 /* Write descriptors to ring */
1067 next = flexrm_write_descs(msg, nhcnt, reqid,
1068 ring->bd_base + ring->bd_write_offset,
1069 RING_BD_TOGGLE_VALID(ring->bd_write_offset),
1070 ring->bd_base, ring->bd_base + RING_BD_SIZE);
1071 if (IS_ERR(next)) {
1072 ret = PTR_ERR(next);
1073 exit_cleanup = true;
1074 goto exit;
1075 }
1076
1077 /* Save ring BD write offset */
1078 ring->bd_write_offset = (unsigned long)(next - ring->bd_base);
1079
Anup Patelacf7e502017-08-01 16:05:51 +05301080 /* Increment number of messages sent */
1081 atomic_inc_return(&ring->msg_send_count);
1082
Anup Pateldbc049e2017-03-15 12:10:00 +05301083exit:
1084 /* Update error status in message */
1085 msg->error = ret;
1086
1087 /* Cleanup if we failed */
1088 if (exit_cleanup) {
1089 flexrm_dma_unmap(ring->mbox->dev, msg);
1090 ring->requests[reqid] = NULL;
1091 ida_simple_remove(&ring->requests_ida, reqid);
1092 }
1093
1094 return ret;
1095}
1096
1097static int flexrm_process_completions(struct flexrm_ring *ring)
1098{
1099 u64 desc;
1100 int err, count = 0;
1101 unsigned long flags;
1102 struct brcm_message *msg = NULL;
1103 u32 reqid, cmpl_read_offset, cmpl_write_offset;
1104 struct mbox_chan *chan = &ring->mbox->controller.chans[ring->num];
1105
1106 spin_lock_irqsave(&ring->lock, flags);
1107
1108 /* Check last_pending_msg */
1109 if (ring->last_pending_msg) {
1110 msg = ring->last_pending_msg;
1111 ring->last_pending_msg = NULL;
1112 }
1113
1114 /*
1115 * Get current completion read and write offset
1116 *
1117 * Note: We should read completion write pointer atleast once
1118 * after we get a MSI interrupt because HW maintains internal
1119 * MSI status which will allow next MSI interrupt only after
1120 * completion write pointer is read.
1121 */
1122 cmpl_write_offset = readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR);
1123 cmpl_write_offset *= RING_DESC_SIZE;
1124 cmpl_read_offset = ring->cmpl_read_offset;
1125 ring->cmpl_read_offset = cmpl_write_offset;
1126
1127 spin_unlock_irqrestore(&ring->lock, flags);
1128
1129 /* If last_pending_msg was set then queue it back */
1130 if (msg)
1131 mbox_send_message(chan, msg);
1132
1133 /* For each completed request notify mailbox clients */
1134 reqid = 0;
1135 while (cmpl_read_offset != cmpl_write_offset) {
1136 /* Dequeue next completion descriptor */
1137 desc = *((u64 *)(ring->cmpl_base + cmpl_read_offset));
1138
1139 /* Next read offset */
1140 cmpl_read_offset += RING_DESC_SIZE;
1141 if (cmpl_read_offset == RING_CMPL_SIZE)
1142 cmpl_read_offset = 0;
1143
1144 /* Decode error from completion descriptor */
1145 err = flexrm_cmpl_desc_to_error(desc);
1146 if (err < 0) {
1147 dev_warn(ring->mbox->dev,
1148 "got completion desc=0x%lx with error %d",
1149 (unsigned long)desc, err);
1150 }
1151
1152 /* Determine request id from completion descriptor */
1153 reqid = flexrm_cmpl_desc_to_reqid(desc);
1154
1155 /* Determine message pointer based on reqid */
1156 msg = ring->requests[reqid];
1157 if (!msg) {
1158 dev_warn(ring->mbox->dev,
1159 "null msg pointer for completion desc=0x%lx",
1160 (unsigned long)desc);
1161 continue;
1162 }
1163
1164 /* Release reqid for recycling */
1165 ring->requests[reqid] = NULL;
1166 ida_simple_remove(&ring->requests_ida, reqid);
1167
1168 /* Unmap DMA mappings */
1169 flexrm_dma_unmap(ring->mbox->dev, msg);
1170
1171 /* Give-back message to mailbox client */
1172 msg->error = err;
1173 mbox_chan_received_data(chan, msg);
1174
1175 /* Increment number of completions processed */
Anup Patelacf7e502017-08-01 16:05:51 +05301176 atomic_inc_return(&ring->msg_cmpl_count);
Anup Pateldbc049e2017-03-15 12:10:00 +05301177 count++;
1178 }
1179
1180 return count;
1181}
1182
Anup Patelacf7e502017-08-01 16:05:51 +05301183/* ====== FlexRM Debugfs callbacks ====== */
1184
1185static int flexrm_debugfs_conf_show(struct seq_file *file, void *offset)
1186{
1187 struct platform_device *pdev = to_platform_device(file->private);
1188 struct flexrm_mbox *mbox = platform_get_drvdata(pdev);
1189
1190 /* Write config in file */
1191 flexrm_write_config_in_seqfile(mbox, file);
1192
1193 return 0;
1194}
1195
1196static int flexrm_debugfs_stats_show(struct seq_file *file, void *offset)
1197{
1198 struct platform_device *pdev = to_platform_device(file->private);
1199 struct flexrm_mbox *mbox = platform_get_drvdata(pdev);
1200
1201 /* Write stats in file */
1202 flexrm_write_stats_in_seqfile(mbox, file);
1203
1204 return 0;
1205}
1206
Anup Pateldbc049e2017-03-15 12:10:00 +05301207/* ====== FlexRM interrupt handler ===== */
1208
1209static irqreturn_t flexrm_irq_event(int irq, void *dev_id)
1210{
1211 /* We only have MSI for completions so just wakeup IRQ thread */
1212 /* Ring related errors will be informed via completion descriptors */
1213
1214 return IRQ_WAKE_THREAD;
1215}
1216
1217static irqreturn_t flexrm_irq_thread(int irq, void *dev_id)
1218{
1219 flexrm_process_completions(dev_id);
1220
1221 return IRQ_HANDLED;
1222}
1223
1224/* ====== FlexRM mailbox callbacks ===== */
1225
1226static int flexrm_send_data(struct mbox_chan *chan, void *data)
1227{
1228 int i, rc;
1229 struct flexrm_ring *ring = chan->con_priv;
1230 struct brcm_message *msg = data;
1231
1232 if (msg->type == BRCM_MESSAGE_BATCH) {
1233 for (i = msg->batch.msgs_queued;
1234 i < msg->batch.msgs_count; i++) {
1235 rc = flexrm_new_request(ring, msg,
1236 &msg->batch.msgs[i]);
1237 if (rc) {
1238 msg->error = rc;
1239 return rc;
1240 }
1241 msg->batch.msgs_queued++;
1242 }
1243 return 0;
1244 }
1245
1246 return flexrm_new_request(ring, NULL, data);
1247}
1248
1249static bool flexrm_peek_data(struct mbox_chan *chan)
1250{
1251 int cnt = flexrm_process_completions(chan->con_priv);
1252
1253 return (cnt > 0) ? true : false;
1254}
1255
1256static int flexrm_startup(struct mbox_chan *chan)
1257{
1258 u64 d;
1259 u32 val, off;
1260 int ret = 0;
1261 dma_addr_t next_addr;
1262 struct flexrm_ring *ring = chan->con_priv;
1263
1264 /* Allocate BD memory */
1265 ring->bd_base = dma_pool_alloc(ring->mbox->bd_pool,
1266 GFP_KERNEL, &ring->bd_dma_base);
1267 if (!ring->bd_base) {
1268 dev_err(ring->mbox->dev, "can't allocate BD memory\n");
1269 ret = -ENOMEM;
1270 goto fail;
1271 }
1272
1273 /* Configure next table pointer entries in BD memory */
1274 for (off = 0; off < RING_BD_SIZE; off += RING_DESC_SIZE) {
1275 next_addr = off + RING_DESC_SIZE;
1276 if (next_addr == RING_BD_SIZE)
1277 next_addr = 0;
1278 next_addr += ring->bd_dma_base;
1279 if (RING_BD_ALIGN_CHECK(next_addr))
1280 d = flexrm_next_table_desc(RING_BD_TOGGLE_VALID(off),
1281 next_addr);
1282 else
1283 d = flexrm_null_desc(RING_BD_TOGGLE_INVALID(off));
1284 flexrm_write_desc(ring->bd_base + off, d);
1285 }
1286
1287 /* Allocate completion memory */
1288 ring->cmpl_base = dma_pool_alloc(ring->mbox->cmpl_pool,
1289 GFP_KERNEL, &ring->cmpl_dma_base);
1290 if (!ring->cmpl_base) {
1291 dev_err(ring->mbox->dev, "can't allocate completion memory\n");
1292 ret = -ENOMEM;
1293 goto fail_free_bd_memory;
1294 }
1295 memset(ring->cmpl_base, 0, RING_CMPL_SIZE);
1296
1297 /* Request IRQ */
1298 if (ring->irq == UINT_MAX) {
1299 dev_err(ring->mbox->dev, "ring IRQ not available\n");
1300 ret = -ENODEV;
1301 goto fail_free_cmpl_memory;
1302 }
1303 ret = request_threaded_irq(ring->irq,
1304 flexrm_irq_event,
1305 flexrm_irq_thread,
1306 0, dev_name(ring->mbox->dev), ring);
1307 if (ret) {
1308 dev_err(ring->mbox->dev, "failed to request ring IRQ\n");
1309 goto fail_free_cmpl_memory;
1310 }
1311 ring->irq_requested = true;
1312
Anup Patel6ac17fe2017-08-01 16:05:50 +05301313 /* Set IRQ affinity hint */
1314 ring->irq_aff_hint = CPU_MASK_NONE;
1315 val = ring->mbox->num_rings;
1316 val = (num_online_cpus() < val) ? val / num_online_cpus() : 1;
1317 cpumask_set_cpu((ring->num / val) % num_online_cpus(),
1318 &ring->irq_aff_hint);
1319 ret = irq_set_affinity_hint(ring->irq, &ring->irq_aff_hint);
1320 if (ret) {
1321 dev_err(ring->mbox->dev, "failed to set IRQ affinity hint\n");
1322 goto fail_free_irq;
1323 }
1324
Anup Pateldbc049e2017-03-15 12:10:00 +05301325 /* Disable/inactivate ring */
1326 writel_relaxed(0x0, ring->regs + RING_CONTROL);
1327
1328 /* Program BD start address */
1329 val = BD_START_ADDR_VALUE(ring->bd_dma_base);
1330 writel_relaxed(val, ring->regs + RING_BD_START_ADDR);
1331
1332 /* BD write pointer will be same as HW write pointer */
1333 ring->bd_write_offset =
1334 readl_relaxed(ring->regs + RING_BD_WRITE_PTR);
1335 ring->bd_write_offset *= RING_DESC_SIZE;
1336
1337 /* Program completion start address */
1338 val = CMPL_START_ADDR_VALUE(ring->cmpl_dma_base);
1339 writel_relaxed(val, ring->regs + RING_CMPL_START_ADDR);
1340
1341 /* Ensure last pending message is cleared */
1342 ring->last_pending_msg = NULL;
1343
1344 /* Completion read pointer will be same as HW write pointer */
1345 ring->cmpl_read_offset =
1346 readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR);
1347 ring->cmpl_read_offset *= RING_DESC_SIZE;
1348
1349 /* Read ring Tx, Rx, and Outstanding counts to clear */
1350 readl_relaxed(ring->regs + RING_NUM_REQ_RECV_LS);
1351 readl_relaxed(ring->regs + RING_NUM_REQ_RECV_MS);
1352 readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_LS);
1353 readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_MS);
1354 readl_relaxed(ring->regs + RING_NUM_REQ_OUTSTAND);
1355
1356 /* Configure RING_MSI_CONTROL */
1357 val = 0;
1358 val |= (ring->msi_timer_val << MSI_TIMER_VAL_SHIFT);
1359 val |= BIT(MSI_ENABLE_SHIFT);
1360 val |= (ring->msi_count_threshold & MSI_COUNT_MASK) << MSI_COUNT_SHIFT;
1361 writel_relaxed(val, ring->regs + RING_MSI_CONTROL);
1362
1363 /* Enable/activate ring */
1364 val = BIT(CONTROL_ACTIVE_SHIFT);
1365 writel_relaxed(val, ring->regs + RING_CONTROL);
1366
Anup Patelacf7e502017-08-01 16:05:51 +05301367 /* Reset stats to zero */
1368 atomic_set(&ring->msg_send_count, 0);
1369 atomic_set(&ring->msg_cmpl_count, 0);
1370
Anup Pateldbc049e2017-03-15 12:10:00 +05301371 return 0;
1372
Anup Patel6ac17fe2017-08-01 16:05:50 +05301373fail_free_irq:
1374 free_irq(ring->irq, ring);
1375 ring->irq_requested = false;
Anup Pateldbc049e2017-03-15 12:10:00 +05301376fail_free_cmpl_memory:
1377 dma_pool_free(ring->mbox->cmpl_pool,
1378 ring->cmpl_base, ring->cmpl_dma_base);
1379 ring->cmpl_base = NULL;
1380fail_free_bd_memory:
1381 dma_pool_free(ring->mbox->bd_pool,
1382 ring->bd_base, ring->bd_dma_base);
1383 ring->bd_base = NULL;
1384fail:
1385 return ret;
1386}
1387
1388static void flexrm_shutdown(struct mbox_chan *chan)
1389{
1390 u32 reqid;
1391 unsigned int timeout;
1392 struct brcm_message *msg;
1393 struct flexrm_ring *ring = chan->con_priv;
1394
1395 /* Disable/inactivate ring */
1396 writel_relaxed(0x0, ring->regs + RING_CONTROL);
1397
1398 /* Flush ring with timeout of 1s */
1399 timeout = 1000;
1400 writel_relaxed(BIT(CONTROL_FLUSH_SHIFT),
1401 ring->regs + RING_CONTROL);
1402 do {
1403 if (readl_relaxed(ring->regs + RING_FLUSH_DONE) &
1404 FLUSH_DONE_MASK)
1405 break;
1406 mdelay(1);
1407 } while (timeout--);
1408
1409 /* Abort all in-flight requests */
1410 for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) {
1411 msg = ring->requests[reqid];
1412 if (!msg)
1413 continue;
1414
1415 /* Release reqid for recycling */
1416 ring->requests[reqid] = NULL;
1417 ida_simple_remove(&ring->requests_ida, reqid);
1418
1419 /* Unmap DMA mappings */
1420 flexrm_dma_unmap(ring->mbox->dev, msg);
1421
1422 /* Give-back message to mailbox client */
1423 msg->error = -EIO;
1424 mbox_chan_received_data(chan, msg);
1425 }
1426
1427 /* Release IRQ */
1428 if (ring->irq_requested) {
Anup Patel6ac17fe2017-08-01 16:05:50 +05301429 irq_set_affinity_hint(ring->irq, NULL);
Anup Pateldbc049e2017-03-15 12:10:00 +05301430 free_irq(ring->irq, ring);
1431 ring->irq_requested = false;
1432 }
1433
1434 /* Free-up completion descriptor ring */
1435 if (ring->cmpl_base) {
1436 dma_pool_free(ring->mbox->cmpl_pool,
1437 ring->cmpl_base, ring->cmpl_dma_base);
1438 ring->cmpl_base = NULL;
1439 }
1440
1441 /* Free-up BD descriptor ring */
1442 if (ring->bd_base) {
1443 dma_pool_free(ring->mbox->bd_pool,
1444 ring->bd_base, ring->bd_dma_base);
1445 ring->bd_base = NULL;
1446 }
1447}
1448
1449static bool flexrm_last_tx_done(struct mbox_chan *chan)
1450{
1451 bool ret;
1452 unsigned long flags;
1453 struct flexrm_ring *ring = chan->con_priv;
1454
1455 spin_lock_irqsave(&ring->lock, flags);
1456 ret = (ring->last_pending_msg) ? false : true;
1457 spin_unlock_irqrestore(&ring->lock, flags);
1458
1459 return ret;
1460}
1461
1462static const struct mbox_chan_ops flexrm_mbox_chan_ops = {
1463 .send_data = flexrm_send_data,
1464 .startup = flexrm_startup,
1465 .shutdown = flexrm_shutdown,
1466 .last_tx_done = flexrm_last_tx_done,
1467 .peek_data = flexrm_peek_data,
1468};
1469
1470static struct mbox_chan *flexrm_mbox_of_xlate(struct mbox_controller *cntlr,
1471 const struct of_phandle_args *pa)
1472{
1473 struct mbox_chan *chan;
1474 struct flexrm_ring *ring;
1475
1476 if (pa->args_count < 3)
1477 return ERR_PTR(-EINVAL);
1478
1479 if (pa->args[0] >= cntlr->num_chans)
1480 return ERR_PTR(-ENOENT);
1481
1482 if (pa->args[1] > MSI_COUNT_MASK)
1483 return ERR_PTR(-EINVAL);
1484
1485 if (pa->args[2] > MSI_TIMER_VAL_MASK)
1486 return ERR_PTR(-EINVAL);
1487
1488 chan = &cntlr->chans[pa->args[0]];
1489 ring = chan->con_priv;
1490 ring->msi_count_threshold = pa->args[1];
1491 ring->msi_timer_val = pa->args[2];
1492
1493 return chan;
1494}
1495
1496/* ====== FlexRM platform driver ===== */
1497
1498static void flexrm_mbox_msi_write(struct msi_desc *desc, struct msi_msg *msg)
1499{
1500 struct device *dev = msi_desc_to_dev(desc);
1501 struct flexrm_mbox *mbox = dev_get_drvdata(dev);
1502 struct flexrm_ring *ring = &mbox->rings[desc->platform.msi_index];
1503
1504 /* Configure per-Ring MSI registers */
1505 writel_relaxed(msg->address_lo, ring->regs + RING_MSI_ADDR_LS);
1506 writel_relaxed(msg->address_hi, ring->regs + RING_MSI_ADDR_MS);
1507 writel_relaxed(msg->data, ring->regs + RING_MSI_DATA_VALUE);
1508}
1509
1510static int flexrm_mbox_probe(struct platform_device *pdev)
1511{
1512 int index, ret = 0;
1513 void __iomem *regs;
1514 void __iomem *regs_end;
1515 struct msi_desc *desc;
1516 struct resource *iomem;
1517 struct flexrm_ring *ring;
1518 struct flexrm_mbox *mbox;
1519 struct device *dev = &pdev->dev;
1520
1521 /* Allocate driver mailbox struct */
1522 mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
1523 if (!mbox) {
1524 ret = -ENOMEM;
1525 goto fail;
1526 }
1527 mbox->dev = dev;
1528 platform_set_drvdata(pdev, mbox);
1529
1530 /* Get resource for registers */
1531 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1532 if (!iomem || (resource_size(iomem) < RING_REGS_SIZE)) {
1533 ret = -ENODEV;
1534 goto fail;
1535 }
1536
1537 /* Map registers of all rings */
1538 mbox->regs = devm_ioremap_resource(&pdev->dev, iomem);
1539 if (IS_ERR(mbox->regs)) {
1540 ret = PTR_ERR(mbox->regs);
1541 dev_err(&pdev->dev, "Failed to remap mailbox regs: %d\n", ret);
1542 goto fail;
1543 }
1544 regs_end = mbox->regs + resource_size(iomem);
1545
1546 /* Scan and count available rings */
1547 mbox->num_rings = 0;
1548 for (regs = mbox->regs; regs < regs_end; regs += RING_REGS_SIZE) {
1549 if (readl_relaxed(regs + RING_VER) == RING_VER_MAGIC)
1550 mbox->num_rings++;
1551 }
1552 if (!mbox->num_rings) {
1553 ret = -ENODEV;
1554 goto fail;
1555 }
1556
1557 /* Allocate driver ring structs */
1558 ring = devm_kcalloc(dev, mbox->num_rings, sizeof(*ring), GFP_KERNEL);
1559 if (!ring) {
1560 ret = -ENOMEM;
1561 goto fail;
1562 }
1563 mbox->rings = ring;
1564
1565 /* Initialize members of driver ring structs */
1566 regs = mbox->regs;
1567 for (index = 0; index < mbox->num_rings; index++) {
1568 ring = &mbox->rings[index];
1569 ring->num = index;
1570 ring->mbox = mbox;
1571 while ((regs < regs_end) &&
1572 (readl_relaxed(regs + RING_VER) != RING_VER_MAGIC))
1573 regs += RING_REGS_SIZE;
1574 if (regs_end <= regs) {
1575 ret = -ENODEV;
1576 goto fail;
1577 }
1578 ring->regs = regs;
1579 regs += RING_REGS_SIZE;
1580 ring->irq = UINT_MAX;
1581 ring->irq_requested = false;
1582 ring->msi_timer_val = MSI_TIMER_VAL_MASK;
1583 ring->msi_count_threshold = 0x1;
1584 ida_init(&ring->requests_ida);
1585 memset(ring->requests, 0, sizeof(ring->requests));
1586 ring->bd_base = NULL;
1587 ring->bd_dma_base = 0;
1588 ring->cmpl_base = NULL;
1589 ring->cmpl_dma_base = 0;
Anup Patelacf7e502017-08-01 16:05:51 +05301590 atomic_set(&ring->msg_send_count, 0);
1591 atomic_set(&ring->msg_cmpl_count, 0);
Anup Pateldbc049e2017-03-15 12:10:00 +05301592 spin_lock_init(&ring->lock);
1593 ring->last_pending_msg = NULL;
1594 ring->cmpl_read_offset = 0;
1595 }
1596
1597 /* FlexRM is capable of 40-bit physical addresses only */
1598 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
1599 if (ret) {
1600 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
1601 if (ret)
1602 goto fail;
1603 }
1604
1605 /* Create DMA pool for ring BD memory */
1606 mbox->bd_pool = dma_pool_create("bd", dev, RING_BD_SIZE,
1607 1 << RING_BD_ALIGN_ORDER, 0);
1608 if (!mbox->bd_pool) {
1609 ret = -ENOMEM;
1610 goto fail;
1611 }
1612
1613 /* Create DMA pool for ring completion memory */
1614 mbox->cmpl_pool = dma_pool_create("cmpl", dev, RING_CMPL_SIZE,
1615 1 << RING_CMPL_ALIGN_ORDER, 0);
1616 if (!mbox->cmpl_pool) {
1617 ret = -ENOMEM;
1618 goto fail_destroy_bd_pool;
1619 }
1620
1621 /* Allocate platform MSIs for each ring */
1622 ret = platform_msi_domain_alloc_irqs(dev, mbox->num_rings,
1623 flexrm_mbox_msi_write);
1624 if (ret)
1625 goto fail_destroy_cmpl_pool;
1626
1627 /* Save alloced IRQ numbers for each ring */
1628 for_each_msi_entry(desc, dev) {
1629 ring = &mbox->rings[desc->platform.msi_index];
1630 ring->irq = desc->irq;
1631 }
1632
Anup Patelacf7e502017-08-01 16:05:51 +05301633 /* Check availability of debugfs */
1634 if (!debugfs_initialized())
1635 goto skip_debugfs;
1636
1637 /* Create debugfs root entry */
1638 mbox->root = debugfs_create_dir(dev_name(mbox->dev), NULL);
1639 if (IS_ERR_OR_NULL(mbox->root)) {
1640 ret = PTR_ERR_OR_ZERO(mbox->root);
1641 goto fail_free_msis;
1642 }
1643
1644 /* Create debugfs config entry */
1645 mbox->config = debugfs_create_devm_seqfile(mbox->dev,
1646 "config", mbox->root,
1647 flexrm_debugfs_conf_show);
1648 if (IS_ERR_OR_NULL(mbox->config)) {
1649 ret = PTR_ERR_OR_ZERO(mbox->config);
1650 goto fail_free_debugfs_root;
1651 }
1652
1653 /* Create debugfs stats entry */
1654 mbox->stats = debugfs_create_devm_seqfile(mbox->dev,
1655 "stats", mbox->root,
1656 flexrm_debugfs_stats_show);
1657 if (IS_ERR_OR_NULL(mbox->stats)) {
1658 ret = PTR_ERR_OR_ZERO(mbox->stats);
1659 goto fail_free_debugfs_root;
1660 }
1661skip_debugfs:
1662
Anup Pateldbc049e2017-03-15 12:10:00 +05301663 /* Initialize mailbox controller */
1664 mbox->controller.txdone_irq = false;
1665 mbox->controller.txdone_poll = true;
1666 mbox->controller.txpoll_period = 1;
1667 mbox->controller.ops = &flexrm_mbox_chan_ops;
1668 mbox->controller.dev = dev;
1669 mbox->controller.num_chans = mbox->num_rings;
1670 mbox->controller.of_xlate = flexrm_mbox_of_xlate;
1671 mbox->controller.chans = devm_kcalloc(dev, mbox->num_rings,
1672 sizeof(*mbox->controller.chans), GFP_KERNEL);
1673 if (!mbox->controller.chans) {
1674 ret = -ENOMEM;
Anup Patelacf7e502017-08-01 16:05:51 +05301675 goto fail_free_debugfs_root;
Anup Pateldbc049e2017-03-15 12:10:00 +05301676 }
1677 for (index = 0; index < mbox->num_rings; index++)
1678 mbox->controller.chans[index].con_priv = &mbox->rings[index];
1679
1680 /* Register mailbox controller */
1681 ret = mbox_controller_register(&mbox->controller);
1682 if (ret)
Anup Patelacf7e502017-08-01 16:05:51 +05301683 goto fail_free_debugfs_root;
Anup Pateldbc049e2017-03-15 12:10:00 +05301684
1685 dev_info(dev, "registered flexrm mailbox with %d channels\n",
1686 mbox->controller.num_chans);
1687
1688 return 0;
1689
Anup Patelacf7e502017-08-01 16:05:51 +05301690fail_free_debugfs_root:
1691 debugfs_remove_recursive(mbox->root);
Anup Pateldbc049e2017-03-15 12:10:00 +05301692fail_free_msis:
1693 platform_msi_domain_free_irqs(dev);
1694fail_destroy_cmpl_pool:
1695 dma_pool_destroy(mbox->cmpl_pool);
1696fail_destroy_bd_pool:
1697 dma_pool_destroy(mbox->bd_pool);
1698fail:
1699 return ret;
1700}
1701
1702static int flexrm_mbox_remove(struct platform_device *pdev)
1703{
1704 int index;
1705 struct device *dev = &pdev->dev;
1706 struct flexrm_ring *ring;
1707 struct flexrm_mbox *mbox = platform_get_drvdata(pdev);
1708
1709 mbox_controller_unregister(&mbox->controller);
1710
Anup Patelacf7e502017-08-01 16:05:51 +05301711 debugfs_remove_recursive(mbox->root);
1712
Anup Pateldbc049e2017-03-15 12:10:00 +05301713 platform_msi_domain_free_irqs(dev);
1714
1715 dma_pool_destroy(mbox->cmpl_pool);
1716 dma_pool_destroy(mbox->bd_pool);
1717
1718 for (index = 0; index < mbox->num_rings; index++) {
1719 ring = &mbox->rings[index];
1720 ida_destroy(&ring->requests_ida);
1721 }
1722
1723 return 0;
1724}
1725
1726static const struct of_device_id flexrm_mbox_of_match[] = {
1727 { .compatible = "brcm,iproc-flexrm-mbox", },
1728 {},
1729};
1730MODULE_DEVICE_TABLE(of, flexrm_mbox_of_match);
1731
1732static struct platform_driver flexrm_mbox_driver = {
1733 .driver = {
1734 .name = "brcm-flexrm-mbox",
1735 .of_match_table = flexrm_mbox_of_match,
1736 },
1737 .probe = flexrm_mbox_probe,
1738 .remove = flexrm_mbox_remove,
1739};
1740module_platform_driver(flexrm_mbox_driver);
1741
1742MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>");
1743MODULE_DESCRIPTION("Broadcom FlexRM mailbox driver");
1744MODULE_LICENSE("GPL v2");