blob: e8c3666e94cf3648b752190ee6e9cb8bc8c27aca [file] [log] [blame]
Anup Pateldbc049e2017-03-15 12:10:00 +05301/* Broadcom FlexRM Mailbox Driver
2 *
3 * Copyright (C) 2017 Broadcom
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Each Broadcom FlexSparx4 offload engine is implemented as an
10 * extension to Broadcom FlexRM ring manager. The FlexRM ring
11 * manager provides a set of rings which can be used to submit
12 * work to a FlexSparx4 offload engine.
13 *
14 * This driver creates a mailbox controller using a set of FlexRM
15 * rings where each mailbox channel represents a separate FlexRM ring.
16 */
17
18#include <asm/barrier.h>
19#include <asm/byteorder.h>
20#include <linux/delay.h>
21#include <linux/device.h>
22#include <linux/dma-mapping.h>
23#include <linux/dmapool.h>
24#include <linux/err.h>
25#include <linux/idr.h>
26#include <linux/interrupt.h>
27#include <linux/kernel.h>
28#include <linux/mailbox_controller.h>
29#include <linux/mailbox_client.h>
30#include <linux/mailbox/brcm-message.h>
31#include <linux/module.h>
32#include <linux/msi.h>
33#include <linux/of_address.h>
34#include <linux/of_irq.h>
35#include <linux/platform_device.h>
36#include <linux/spinlock.h>
37
38/* ====== FlexRM register defines ===== */
39
40/* FlexRM configuration */
41#define RING_REGS_SIZE 0x10000
42#define RING_DESC_SIZE 8
43#define RING_DESC_INDEX(offset) \
44 ((offset) / RING_DESC_SIZE)
45#define RING_DESC_OFFSET(index) \
46 ((index) * RING_DESC_SIZE)
47#define RING_MAX_REQ_COUNT 1024
48#define RING_BD_ALIGN_ORDER 12
49#define RING_BD_ALIGN_CHECK(addr) \
50 (!((addr) & ((0x1 << RING_BD_ALIGN_ORDER) - 1)))
51#define RING_BD_TOGGLE_INVALID(offset) \
52 (((offset) >> RING_BD_ALIGN_ORDER) & 0x1)
53#define RING_BD_TOGGLE_VALID(offset) \
54 (!RING_BD_TOGGLE_INVALID(offset))
55#define RING_BD_DESC_PER_REQ 32
56#define RING_BD_DESC_COUNT \
57 (RING_MAX_REQ_COUNT * RING_BD_DESC_PER_REQ)
58#define RING_BD_SIZE \
59 (RING_BD_DESC_COUNT * RING_DESC_SIZE)
60#define RING_CMPL_ALIGN_ORDER 13
61#define RING_CMPL_DESC_COUNT RING_MAX_REQ_COUNT
62#define RING_CMPL_SIZE \
63 (RING_CMPL_DESC_COUNT * RING_DESC_SIZE)
64#define RING_VER_MAGIC 0x76303031
65
66/* Per-Ring register offsets */
67#define RING_VER 0x000
68#define RING_BD_START_ADDR 0x004
69#define RING_BD_READ_PTR 0x008
70#define RING_BD_WRITE_PTR 0x00c
71#define RING_BD_READ_PTR_DDR_LS 0x010
72#define RING_BD_READ_PTR_DDR_MS 0x014
73#define RING_CMPL_START_ADDR 0x018
74#define RING_CMPL_WRITE_PTR 0x01c
75#define RING_NUM_REQ_RECV_LS 0x020
76#define RING_NUM_REQ_RECV_MS 0x024
77#define RING_NUM_REQ_TRANS_LS 0x028
78#define RING_NUM_REQ_TRANS_MS 0x02c
79#define RING_NUM_REQ_OUTSTAND 0x030
80#define RING_CONTROL 0x034
81#define RING_FLUSH_DONE 0x038
82#define RING_MSI_ADDR_LS 0x03c
83#define RING_MSI_ADDR_MS 0x040
84#define RING_MSI_CONTROL 0x048
85#define RING_BD_READ_PTR_DDR_CONTROL 0x04c
86#define RING_MSI_DATA_VALUE 0x064
87
88/* Register RING_BD_START_ADDR fields */
89#define BD_LAST_UPDATE_HW_SHIFT 28
90#define BD_LAST_UPDATE_HW_MASK 0x1
91#define BD_START_ADDR_VALUE(pa) \
92 ((u32)((((dma_addr_t)(pa)) >> RING_BD_ALIGN_ORDER) & 0x0fffffff))
93#define BD_START_ADDR_DECODE(val) \
94 ((dma_addr_t)((val) & 0x0fffffff) << RING_BD_ALIGN_ORDER)
95
96/* Register RING_CMPL_START_ADDR fields */
97#define CMPL_START_ADDR_VALUE(pa) \
98 ((u32)((((u64)(pa)) >> RING_CMPL_ALIGN_ORDER) & 0x03ffffff))
99
100/* Register RING_CONTROL fields */
101#define CONTROL_MASK_DISABLE_CONTROL 12
102#define CONTROL_FLUSH_SHIFT 5
103#define CONTROL_ACTIVE_SHIFT 4
104#define CONTROL_RATE_ADAPT_MASK 0xf
105#define CONTROL_RATE_DYNAMIC 0x0
106#define CONTROL_RATE_FAST 0x8
107#define CONTROL_RATE_MEDIUM 0x9
108#define CONTROL_RATE_SLOW 0xa
109#define CONTROL_RATE_IDLE 0xb
110
111/* Register RING_FLUSH_DONE fields */
112#define FLUSH_DONE_MASK 0x1
113
114/* Register RING_MSI_CONTROL fields */
115#define MSI_TIMER_VAL_SHIFT 16
116#define MSI_TIMER_VAL_MASK 0xffff
117#define MSI_ENABLE_SHIFT 15
118#define MSI_ENABLE_MASK 0x1
119#define MSI_COUNT_SHIFT 0
120#define MSI_COUNT_MASK 0x3ff
121
122/* Register RING_BD_READ_PTR_DDR_CONTROL fields */
123#define BD_READ_PTR_DDR_TIMER_VAL_SHIFT 16
124#define BD_READ_PTR_DDR_TIMER_VAL_MASK 0xffff
125#define BD_READ_PTR_DDR_ENABLE_SHIFT 15
126#define BD_READ_PTR_DDR_ENABLE_MASK 0x1
127
128/* ====== FlexRM ring descriptor defines ===== */
129
130/* Completion descriptor format */
131#define CMPL_OPAQUE_SHIFT 0
132#define CMPL_OPAQUE_MASK 0xffff
133#define CMPL_ENGINE_STATUS_SHIFT 16
134#define CMPL_ENGINE_STATUS_MASK 0xffff
135#define CMPL_DME_STATUS_SHIFT 32
136#define CMPL_DME_STATUS_MASK 0xffff
137#define CMPL_RM_STATUS_SHIFT 48
138#define CMPL_RM_STATUS_MASK 0xffff
139
140/* Completion DME status code */
141#define DME_STATUS_MEM_COR_ERR BIT(0)
142#define DME_STATUS_MEM_UCOR_ERR BIT(1)
143#define DME_STATUS_FIFO_UNDERFLOW BIT(2)
144#define DME_STATUS_FIFO_OVERFLOW BIT(3)
145#define DME_STATUS_RRESP_ERR BIT(4)
146#define DME_STATUS_BRESP_ERR BIT(5)
147#define DME_STATUS_ERROR_MASK (DME_STATUS_MEM_COR_ERR | \
148 DME_STATUS_MEM_UCOR_ERR | \
149 DME_STATUS_FIFO_UNDERFLOW | \
150 DME_STATUS_FIFO_OVERFLOW | \
151 DME_STATUS_RRESP_ERR | \
152 DME_STATUS_BRESP_ERR)
153
154/* Completion RM status code */
155#define RM_STATUS_CODE_SHIFT 0
156#define RM_STATUS_CODE_MASK 0x3ff
157#define RM_STATUS_CODE_GOOD 0x0
158#define RM_STATUS_CODE_AE_TIMEOUT 0x3ff
159
160/* General descriptor format */
161#define DESC_TYPE_SHIFT 60
162#define DESC_TYPE_MASK 0xf
163#define DESC_PAYLOAD_SHIFT 0
164#define DESC_PAYLOAD_MASK 0x0fffffffffffffff
165
166/* Null descriptor format */
167#define NULL_TYPE 0
168#define NULL_TOGGLE_SHIFT 58
169#define NULL_TOGGLE_MASK 0x1
170
171/* Header descriptor format */
172#define HEADER_TYPE 1
173#define HEADER_TOGGLE_SHIFT 58
174#define HEADER_TOGGLE_MASK 0x1
175#define HEADER_ENDPKT_SHIFT 57
176#define HEADER_ENDPKT_MASK 0x1
177#define HEADER_STARTPKT_SHIFT 56
178#define HEADER_STARTPKT_MASK 0x1
179#define HEADER_BDCOUNT_SHIFT 36
180#define HEADER_BDCOUNT_MASK 0x1f
181#define HEADER_BDCOUNT_MAX HEADER_BDCOUNT_MASK
182#define HEADER_FLAGS_SHIFT 16
183#define HEADER_FLAGS_MASK 0xffff
184#define HEADER_OPAQUE_SHIFT 0
185#define HEADER_OPAQUE_MASK 0xffff
186
187/* Source (SRC) descriptor format */
188#define SRC_TYPE 2
189#define SRC_LENGTH_SHIFT 44
190#define SRC_LENGTH_MASK 0xffff
191#define SRC_ADDR_SHIFT 0
192#define SRC_ADDR_MASK 0x00000fffffffffff
193
194/* Destination (DST) descriptor format */
195#define DST_TYPE 3
196#define DST_LENGTH_SHIFT 44
197#define DST_LENGTH_MASK 0xffff
198#define DST_ADDR_SHIFT 0
199#define DST_ADDR_MASK 0x00000fffffffffff
200
201/* Immediate (IMM) descriptor format */
202#define IMM_TYPE 4
203#define IMM_DATA_SHIFT 0
204#define IMM_DATA_MASK 0x0fffffffffffffff
205
206/* Next pointer (NPTR) descriptor format */
207#define NPTR_TYPE 5
208#define NPTR_TOGGLE_SHIFT 58
209#define NPTR_TOGGLE_MASK 0x1
210#define NPTR_ADDR_SHIFT 0
211#define NPTR_ADDR_MASK 0x00000fffffffffff
212
213/* Mega source (MSRC) descriptor format */
214#define MSRC_TYPE 6
215#define MSRC_LENGTH_SHIFT 44
216#define MSRC_LENGTH_MASK 0xffff
217#define MSRC_ADDR_SHIFT 0
218#define MSRC_ADDR_MASK 0x00000fffffffffff
219
220/* Mega destination (MDST) descriptor format */
221#define MDST_TYPE 7
222#define MDST_LENGTH_SHIFT 44
223#define MDST_LENGTH_MASK 0xffff
224#define MDST_ADDR_SHIFT 0
225#define MDST_ADDR_MASK 0x00000fffffffffff
226
227/* Source with tlast (SRCT) descriptor format */
228#define SRCT_TYPE 8
229#define SRCT_LENGTH_SHIFT 44
230#define SRCT_LENGTH_MASK 0xffff
231#define SRCT_ADDR_SHIFT 0
232#define SRCT_ADDR_MASK 0x00000fffffffffff
233
234/* Destination with tlast (DSTT) descriptor format */
235#define DSTT_TYPE 9
236#define DSTT_LENGTH_SHIFT 44
237#define DSTT_LENGTH_MASK 0xffff
238#define DSTT_ADDR_SHIFT 0
239#define DSTT_ADDR_MASK 0x00000fffffffffff
240
241/* Immediate with tlast (IMMT) descriptor format */
242#define IMMT_TYPE 10
243#define IMMT_DATA_SHIFT 0
244#define IMMT_DATA_MASK 0x0fffffffffffffff
245
246/* Descriptor helper macros */
247#define DESC_DEC(_d, _s, _m) (((_d) >> (_s)) & (_m))
248#define DESC_ENC(_d, _v, _s, _m) \
249 do { \
250 (_d) &= ~((u64)(_m) << (_s)); \
251 (_d) |= (((u64)(_v) & (_m)) << (_s)); \
252 } while (0)
253
254/* ====== FlexRM data structures ===== */
255
256struct flexrm_ring {
257 /* Unprotected members */
258 int num;
259 struct flexrm_mbox *mbox;
260 void __iomem *regs;
261 bool irq_requested;
262 unsigned int irq;
Anup Patel6ac17fe2017-08-01 16:05:50 +0530263 cpumask_t irq_aff_hint;
Anup Pateldbc049e2017-03-15 12:10:00 +0530264 unsigned int msi_timer_val;
265 unsigned int msi_count_threshold;
266 struct ida requests_ida;
267 struct brcm_message *requests[RING_MAX_REQ_COUNT];
268 void *bd_base;
269 dma_addr_t bd_dma_base;
270 u32 bd_write_offset;
271 void *cmpl_base;
272 dma_addr_t cmpl_dma_base;
273 /* Protected members */
274 spinlock_t lock;
275 struct brcm_message *last_pending_msg;
276 u32 cmpl_read_offset;
277};
278
279struct flexrm_mbox {
280 struct device *dev;
281 void __iomem *regs;
282 u32 num_rings;
283 struct flexrm_ring *rings;
284 struct dma_pool *bd_pool;
285 struct dma_pool *cmpl_pool;
286 struct mbox_controller controller;
287};
288
289/* ====== FlexRM ring descriptor helper routines ===== */
290
291static u64 flexrm_read_desc(void *desc_ptr)
292{
293 return le64_to_cpu(*((u64 *)desc_ptr));
294}
295
296static void flexrm_write_desc(void *desc_ptr, u64 desc)
297{
298 *((u64 *)desc_ptr) = cpu_to_le64(desc);
299}
300
301static u32 flexrm_cmpl_desc_to_reqid(u64 cmpl_desc)
302{
303 return (u32)(cmpl_desc & CMPL_OPAQUE_MASK);
304}
305
306static int flexrm_cmpl_desc_to_error(u64 cmpl_desc)
307{
308 u32 status;
309
310 status = DESC_DEC(cmpl_desc, CMPL_DME_STATUS_SHIFT,
311 CMPL_DME_STATUS_MASK);
312 if (status & DME_STATUS_ERROR_MASK)
313 return -EIO;
314
315 status = DESC_DEC(cmpl_desc, CMPL_RM_STATUS_SHIFT,
316 CMPL_RM_STATUS_MASK);
317 status &= RM_STATUS_CODE_MASK;
318 if (status == RM_STATUS_CODE_AE_TIMEOUT)
319 return -ETIMEDOUT;
320
321 return 0;
322}
323
324static bool flexrm_is_next_table_desc(void *desc_ptr)
325{
326 u64 desc = flexrm_read_desc(desc_ptr);
327 u32 type = DESC_DEC(desc, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
328
329 return (type == NPTR_TYPE) ? true : false;
330}
331
332static u64 flexrm_next_table_desc(u32 toggle, dma_addr_t next_addr)
333{
334 u64 desc = 0;
335
336 DESC_ENC(desc, NPTR_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
337 DESC_ENC(desc, toggle, NPTR_TOGGLE_SHIFT, NPTR_TOGGLE_MASK);
338 DESC_ENC(desc, next_addr, NPTR_ADDR_SHIFT, NPTR_ADDR_MASK);
339
340 return desc;
341}
342
343static u64 flexrm_null_desc(u32 toggle)
344{
345 u64 desc = 0;
346
347 DESC_ENC(desc, NULL_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
348 DESC_ENC(desc, toggle, NULL_TOGGLE_SHIFT, NULL_TOGGLE_MASK);
349
350 return desc;
351}
352
353static u32 flexrm_estimate_header_desc_count(u32 nhcnt)
354{
355 u32 hcnt = nhcnt / HEADER_BDCOUNT_MAX;
356
357 if (!(nhcnt % HEADER_BDCOUNT_MAX))
358 hcnt += 1;
359
360 return hcnt;
361}
362
363static void flexrm_flip_header_toogle(void *desc_ptr)
364{
365 u64 desc = flexrm_read_desc(desc_ptr);
366
367 if (desc & ((u64)0x1 << HEADER_TOGGLE_SHIFT))
368 desc &= ~((u64)0x1 << HEADER_TOGGLE_SHIFT);
369 else
370 desc |= ((u64)0x1 << HEADER_TOGGLE_SHIFT);
371
372 flexrm_write_desc(desc_ptr, desc);
373}
374
375static u64 flexrm_header_desc(u32 toggle, u32 startpkt, u32 endpkt,
376 u32 bdcount, u32 flags, u32 opaque)
377{
378 u64 desc = 0;
379
380 DESC_ENC(desc, HEADER_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
381 DESC_ENC(desc, toggle, HEADER_TOGGLE_SHIFT, HEADER_TOGGLE_MASK);
382 DESC_ENC(desc, startpkt, HEADER_STARTPKT_SHIFT, HEADER_STARTPKT_MASK);
383 DESC_ENC(desc, endpkt, HEADER_ENDPKT_SHIFT, HEADER_ENDPKT_MASK);
384 DESC_ENC(desc, bdcount, HEADER_BDCOUNT_SHIFT, HEADER_BDCOUNT_MASK);
385 DESC_ENC(desc, flags, HEADER_FLAGS_SHIFT, HEADER_FLAGS_MASK);
386 DESC_ENC(desc, opaque, HEADER_OPAQUE_SHIFT, HEADER_OPAQUE_MASK);
387
388 return desc;
389}
390
391static void flexrm_enqueue_desc(u32 nhpos, u32 nhcnt, u32 reqid,
392 u64 desc, void **desc_ptr, u32 *toggle,
393 void *start_desc, void *end_desc)
394{
395 u64 d;
396 u32 nhavail, _toggle, _startpkt, _endpkt, _bdcount;
397
398 /* Sanity check */
399 if (nhcnt <= nhpos)
400 return;
401
402 /*
403 * Each request or packet start with a HEADER descriptor followed
404 * by one or more non-HEADER descriptors (SRC, SRCT, MSRC, DST,
405 * DSTT, MDST, IMM, and IMMT). The number of non-HEADER descriptors
406 * following a HEADER descriptor is represented by BDCOUNT field
407 * of HEADER descriptor. The max value of BDCOUNT field is 31 which
408 * means we can only have 31 non-HEADER descriptors following one
409 * HEADER descriptor.
410 *
411 * In general use, number of non-HEADER descriptors can easily go
412 * beyond 31. To tackle this situation, we have packet (or request)
413 * extenstion bits (STARTPKT and ENDPKT) in the HEADER descriptor.
414 *
415 * To use packet extension, the first HEADER descriptor of request
416 * (or packet) will have STARTPKT=1 and ENDPKT=0. The intermediate
417 * HEADER descriptors will have STARTPKT=0 and ENDPKT=0. The last
418 * HEADER descriptor will have STARTPKT=0 and ENDPKT=1. Also, the
419 * TOGGLE bit of the first HEADER will be set to invalid state to
420 * ensure that FlexRM does not start fetching descriptors till all
421 * descriptors are enqueued. The user of this function will flip
422 * the TOGGLE bit of first HEADER after all descriptors are
423 * enqueued.
424 */
425
426 if ((nhpos % HEADER_BDCOUNT_MAX == 0) && (nhcnt - nhpos)) {
427 /* Prepare the header descriptor */
428 nhavail = (nhcnt - nhpos);
429 _toggle = (nhpos == 0) ? !(*toggle) : (*toggle);
430 _startpkt = (nhpos == 0) ? 0x1 : 0x0;
431 _endpkt = (nhavail <= HEADER_BDCOUNT_MAX) ? 0x1 : 0x0;
432 _bdcount = (nhavail <= HEADER_BDCOUNT_MAX) ?
433 nhavail : HEADER_BDCOUNT_MAX;
434 if (nhavail <= HEADER_BDCOUNT_MAX)
435 _bdcount = nhavail;
436 else
437 _bdcount = HEADER_BDCOUNT_MAX;
438 d = flexrm_header_desc(_toggle, _startpkt, _endpkt,
439 _bdcount, 0x0, reqid);
440
441 /* Write header descriptor */
442 flexrm_write_desc(*desc_ptr, d);
443
444 /* Point to next descriptor */
445 *desc_ptr += sizeof(desc);
446 if (*desc_ptr == end_desc)
447 *desc_ptr = start_desc;
448
449 /* Skip next pointer descriptors */
450 while (flexrm_is_next_table_desc(*desc_ptr)) {
451 *toggle = (*toggle) ? 0 : 1;
452 *desc_ptr += sizeof(desc);
453 if (*desc_ptr == end_desc)
454 *desc_ptr = start_desc;
455 }
456 }
457
458 /* Write desired descriptor */
459 flexrm_write_desc(*desc_ptr, desc);
460
461 /* Point to next descriptor */
462 *desc_ptr += sizeof(desc);
463 if (*desc_ptr == end_desc)
464 *desc_ptr = start_desc;
465
466 /* Skip next pointer descriptors */
467 while (flexrm_is_next_table_desc(*desc_ptr)) {
468 *toggle = (*toggle) ? 0 : 1;
469 *desc_ptr += sizeof(desc);
470 if (*desc_ptr == end_desc)
471 *desc_ptr = start_desc;
472 }
473}
474
475static u64 flexrm_src_desc(dma_addr_t addr, unsigned int length)
476{
477 u64 desc = 0;
478
479 DESC_ENC(desc, SRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
480 DESC_ENC(desc, length, SRC_LENGTH_SHIFT, SRC_LENGTH_MASK);
481 DESC_ENC(desc, addr, SRC_ADDR_SHIFT, SRC_ADDR_MASK);
482
483 return desc;
484}
485
486static u64 flexrm_msrc_desc(dma_addr_t addr, unsigned int length_div_16)
487{
488 u64 desc = 0;
489
490 DESC_ENC(desc, MSRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
491 DESC_ENC(desc, length_div_16, MSRC_LENGTH_SHIFT, MSRC_LENGTH_MASK);
492 DESC_ENC(desc, addr, MSRC_ADDR_SHIFT, MSRC_ADDR_MASK);
493
494 return desc;
495}
496
497static u64 flexrm_dst_desc(dma_addr_t addr, unsigned int length)
498{
499 u64 desc = 0;
500
501 DESC_ENC(desc, DST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
502 DESC_ENC(desc, length, DST_LENGTH_SHIFT, DST_LENGTH_MASK);
503 DESC_ENC(desc, addr, DST_ADDR_SHIFT, DST_ADDR_MASK);
504
505 return desc;
506}
507
508static u64 flexrm_mdst_desc(dma_addr_t addr, unsigned int length_div_16)
509{
510 u64 desc = 0;
511
512 DESC_ENC(desc, MDST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
513 DESC_ENC(desc, length_div_16, MDST_LENGTH_SHIFT, MDST_LENGTH_MASK);
514 DESC_ENC(desc, addr, MDST_ADDR_SHIFT, MDST_ADDR_MASK);
515
516 return desc;
517}
518
519static u64 flexrm_imm_desc(u64 data)
520{
521 u64 desc = 0;
522
523 DESC_ENC(desc, IMM_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
524 DESC_ENC(desc, data, IMM_DATA_SHIFT, IMM_DATA_MASK);
525
526 return desc;
527}
528
529static u64 flexrm_srct_desc(dma_addr_t addr, unsigned int length)
530{
531 u64 desc = 0;
532
533 DESC_ENC(desc, SRCT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
534 DESC_ENC(desc, length, SRCT_LENGTH_SHIFT, SRCT_LENGTH_MASK);
535 DESC_ENC(desc, addr, SRCT_ADDR_SHIFT, SRCT_ADDR_MASK);
536
537 return desc;
538}
539
540static u64 flexrm_dstt_desc(dma_addr_t addr, unsigned int length)
541{
542 u64 desc = 0;
543
544 DESC_ENC(desc, DSTT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
545 DESC_ENC(desc, length, DSTT_LENGTH_SHIFT, DSTT_LENGTH_MASK);
546 DESC_ENC(desc, addr, DSTT_ADDR_SHIFT, DSTT_ADDR_MASK);
547
548 return desc;
549}
550
551static u64 flexrm_immt_desc(u64 data)
552{
553 u64 desc = 0;
554
555 DESC_ENC(desc, IMMT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
556 DESC_ENC(desc, data, IMMT_DATA_SHIFT, IMMT_DATA_MASK);
557
558 return desc;
559}
560
561static bool flexrm_spu_sanity_check(struct brcm_message *msg)
562{
563 struct scatterlist *sg;
564
565 if (!msg->spu.src || !msg->spu.dst)
566 return false;
567 for (sg = msg->spu.src; sg; sg = sg_next(sg)) {
568 if (sg->length & 0xf) {
569 if (sg->length > SRC_LENGTH_MASK)
570 return false;
571 } else {
572 if (sg->length > (MSRC_LENGTH_MASK * 16))
573 return false;
574 }
575 }
576 for (sg = msg->spu.dst; sg; sg = sg_next(sg)) {
577 if (sg->length & 0xf) {
578 if (sg->length > DST_LENGTH_MASK)
579 return false;
580 } else {
581 if (sg->length > (MDST_LENGTH_MASK * 16))
582 return false;
583 }
584 }
585
586 return true;
587}
588
589static u32 flexrm_spu_estimate_nonheader_desc_count(struct brcm_message *msg)
590{
591 u32 cnt = 0;
592 unsigned int dst_target = 0;
593 struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst;
594
595 while (src_sg || dst_sg) {
596 if (src_sg) {
597 cnt++;
598 dst_target = src_sg->length;
599 src_sg = sg_next(src_sg);
600 } else
601 dst_target = UINT_MAX;
602
603 while (dst_target && dst_sg) {
604 cnt++;
605 if (dst_sg->length < dst_target)
606 dst_target -= dst_sg->length;
607 else
608 dst_target = 0;
609 dst_sg = sg_next(dst_sg);
610 }
611 }
612
613 return cnt;
614}
615
616static int flexrm_spu_dma_map(struct device *dev, struct brcm_message *msg)
617{
618 int rc;
619
620 rc = dma_map_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
621 DMA_TO_DEVICE);
622 if (rc < 0)
623 return rc;
624
625 rc = dma_map_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst),
626 DMA_FROM_DEVICE);
627 if (rc < 0) {
628 dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
629 DMA_TO_DEVICE);
630 return rc;
631 }
632
633 return 0;
634}
635
636static void flexrm_spu_dma_unmap(struct device *dev, struct brcm_message *msg)
637{
638 dma_unmap_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst),
639 DMA_FROM_DEVICE);
640 dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
641 DMA_TO_DEVICE);
642}
643
644static void *flexrm_spu_write_descs(struct brcm_message *msg, u32 nhcnt,
645 u32 reqid, void *desc_ptr, u32 toggle,
646 void *start_desc, void *end_desc)
647{
648 u64 d;
649 u32 nhpos = 0;
650 void *orig_desc_ptr = desc_ptr;
651 unsigned int dst_target = 0;
652 struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst;
653
654 while (src_sg || dst_sg) {
655 if (src_sg) {
656 if (sg_dma_len(src_sg) & 0xf)
657 d = flexrm_src_desc(sg_dma_address(src_sg),
658 sg_dma_len(src_sg));
659 else
660 d = flexrm_msrc_desc(sg_dma_address(src_sg),
661 sg_dma_len(src_sg)/16);
662 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
663 d, &desc_ptr, &toggle,
664 start_desc, end_desc);
665 nhpos++;
666 dst_target = sg_dma_len(src_sg);
667 src_sg = sg_next(src_sg);
668 } else
669 dst_target = UINT_MAX;
670
671 while (dst_target && dst_sg) {
672 if (sg_dma_len(dst_sg) & 0xf)
673 d = flexrm_dst_desc(sg_dma_address(dst_sg),
674 sg_dma_len(dst_sg));
675 else
676 d = flexrm_mdst_desc(sg_dma_address(dst_sg),
677 sg_dma_len(dst_sg)/16);
678 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
679 d, &desc_ptr, &toggle,
680 start_desc, end_desc);
681 nhpos++;
682 if (sg_dma_len(dst_sg) < dst_target)
683 dst_target -= sg_dma_len(dst_sg);
684 else
685 dst_target = 0;
686 dst_sg = sg_next(dst_sg);
687 }
688 }
689
690 /* Null descriptor with invalid toggle bit */
691 flexrm_write_desc(desc_ptr, flexrm_null_desc(!toggle));
692
693 /* Ensure that descriptors have been written to memory */
694 wmb();
695
696 /* Flip toggle bit in header */
697 flexrm_flip_header_toogle(orig_desc_ptr);
698
699 return desc_ptr;
700}
701
702static bool flexrm_sba_sanity_check(struct brcm_message *msg)
703{
704 u32 i;
705
706 if (!msg->sba.cmds || !msg->sba.cmds_count)
707 return false;
708
709 for (i = 0; i < msg->sba.cmds_count; i++) {
710 if (((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) ||
711 (msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C)) &&
712 (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT))
713 return false;
714 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) &&
715 (msg->sba.cmds[i].data_len > SRCT_LENGTH_MASK))
716 return false;
717 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C) &&
718 (msg->sba.cmds[i].data_len > SRCT_LENGTH_MASK))
719 return false;
720 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_RESP) &&
721 (msg->sba.cmds[i].resp_len > DSTT_LENGTH_MASK))
722 return false;
723 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT) &&
724 (msg->sba.cmds[i].data_len > DSTT_LENGTH_MASK))
725 return false;
726 }
727
728 return true;
729}
730
731static u32 flexrm_sba_estimate_nonheader_desc_count(struct brcm_message *msg)
732{
733 u32 i, cnt;
734
735 cnt = 0;
736 for (i = 0; i < msg->sba.cmds_count; i++) {
737 cnt++;
738
739 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) ||
740 (msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C))
741 cnt++;
742
743 if (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_RESP)
744 cnt++;
745
746 if (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT)
747 cnt++;
748 }
749
750 return cnt;
751}
752
753static void *flexrm_sba_write_descs(struct brcm_message *msg, u32 nhcnt,
754 u32 reqid, void *desc_ptr, u32 toggle,
755 void *start_desc, void *end_desc)
756{
757 u64 d;
758 u32 i, nhpos = 0;
759 struct brcm_sba_command *c;
760 void *orig_desc_ptr = desc_ptr;
761
762 /* Convert SBA commands into descriptors */
763 for (i = 0; i < msg->sba.cmds_count; i++) {
764 c = &msg->sba.cmds[i];
765
766 if ((c->flags & BRCM_SBA_CMD_HAS_RESP) &&
767 (c->flags & BRCM_SBA_CMD_HAS_OUTPUT)) {
768 /* Destination response descriptor */
769 d = flexrm_dst_desc(c->resp, c->resp_len);
770 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
771 d, &desc_ptr, &toggle,
772 start_desc, end_desc);
773 nhpos++;
774 } else if (c->flags & BRCM_SBA_CMD_HAS_RESP) {
775 /* Destination response with tlast descriptor */
776 d = flexrm_dstt_desc(c->resp, c->resp_len);
777 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
778 d, &desc_ptr, &toggle,
779 start_desc, end_desc);
780 nhpos++;
781 }
782
783 if (c->flags & BRCM_SBA_CMD_HAS_OUTPUT) {
784 /* Destination with tlast descriptor */
785 d = flexrm_dstt_desc(c->data, c->data_len);
786 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
787 d, &desc_ptr, &toggle,
788 start_desc, end_desc);
789 nhpos++;
790 }
791
792 if (c->flags & BRCM_SBA_CMD_TYPE_B) {
793 /* Command as immediate descriptor */
794 d = flexrm_imm_desc(c->cmd);
795 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
796 d, &desc_ptr, &toggle,
797 start_desc, end_desc);
798 nhpos++;
799 } else {
800 /* Command as immediate descriptor with tlast */
801 d = flexrm_immt_desc(c->cmd);
802 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
803 d, &desc_ptr, &toggle,
804 start_desc, end_desc);
805 nhpos++;
806 }
807
808 if ((c->flags & BRCM_SBA_CMD_TYPE_B) ||
809 (c->flags & BRCM_SBA_CMD_TYPE_C)) {
810 /* Source with tlast descriptor */
811 d = flexrm_srct_desc(c->data, c->data_len);
812 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
813 d, &desc_ptr, &toggle,
814 start_desc, end_desc);
815 nhpos++;
816 }
817 }
818
819 /* Null descriptor with invalid toggle bit */
820 flexrm_write_desc(desc_ptr, flexrm_null_desc(!toggle));
821
822 /* Ensure that descriptors have been written to memory */
823 wmb();
824
825 /* Flip toggle bit in header */
826 flexrm_flip_header_toogle(orig_desc_ptr);
827
828 return desc_ptr;
829}
830
831static bool flexrm_sanity_check(struct brcm_message *msg)
832{
833 if (!msg)
834 return false;
835
836 switch (msg->type) {
837 case BRCM_MESSAGE_SPU:
838 return flexrm_spu_sanity_check(msg);
839 case BRCM_MESSAGE_SBA:
840 return flexrm_sba_sanity_check(msg);
841 default:
842 return false;
843 };
844}
845
846static u32 flexrm_estimate_nonheader_desc_count(struct brcm_message *msg)
847{
848 if (!msg)
849 return 0;
850
851 switch (msg->type) {
852 case BRCM_MESSAGE_SPU:
853 return flexrm_spu_estimate_nonheader_desc_count(msg);
854 case BRCM_MESSAGE_SBA:
855 return flexrm_sba_estimate_nonheader_desc_count(msg);
856 default:
857 return 0;
858 };
859}
860
861static int flexrm_dma_map(struct device *dev, struct brcm_message *msg)
862{
863 if (!dev || !msg)
864 return -EINVAL;
865
866 switch (msg->type) {
867 case BRCM_MESSAGE_SPU:
868 return flexrm_spu_dma_map(dev, msg);
869 default:
870 break;
871 }
872
873 return 0;
874}
875
876static void flexrm_dma_unmap(struct device *dev, struct brcm_message *msg)
877{
878 if (!dev || !msg)
879 return;
880
881 switch (msg->type) {
882 case BRCM_MESSAGE_SPU:
883 flexrm_spu_dma_unmap(dev, msg);
884 break;
885 default:
886 break;
887 }
888}
889
890static void *flexrm_write_descs(struct brcm_message *msg, u32 nhcnt,
891 u32 reqid, void *desc_ptr, u32 toggle,
892 void *start_desc, void *end_desc)
893{
894 if (!msg || !desc_ptr || !start_desc || !end_desc)
895 return ERR_PTR(-ENOTSUPP);
896
897 if ((desc_ptr < start_desc) || (end_desc <= desc_ptr))
898 return ERR_PTR(-ERANGE);
899
900 switch (msg->type) {
901 case BRCM_MESSAGE_SPU:
902 return flexrm_spu_write_descs(msg, nhcnt, reqid,
903 desc_ptr, toggle,
904 start_desc, end_desc);
905 case BRCM_MESSAGE_SBA:
906 return flexrm_sba_write_descs(msg, nhcnt, reqid,
907 desc_ptr, toggle,
908 start_desc, end_desc);
909 default:
910 return ERR_PTR(-ENOTSUPP);
911 };
912}
913
914/* ====== FlexRM driver helper routines ===== */
915
916static int flexrm_new_request(struct flexrm_ring *ring,
917 struct brcm_message *batch_msg,
918 struct brcm_message *msg)
919{
920 void *next;
921 unsigned long flags;
922 u32 val, count, nhcnt;
923 u32 read_offset, write_offset;
924 bool exit_cleanup = false;
925 int ret = 0, reqid;
926
927 /* Do sanity check on message */
928 if (!flexrm_sanity_check(msg))
929 return -EIO;
930 msg->error = 0;
931
932 /* If no requests possible then save data pointer and goto done. */
933 reqid = ida_simple_get(&ring->requests_ida, 0,
934 RING_MAX_REQ_COUNT, GFP_KERNEL);
935 if (reqid < 0) {
936 spin_lock_irqsave(&ring->lock, flags);
937 if (batch_msg)
938 ring->last_pending_msg = batch_msg;
939 else
940 ring->last_pending_msg = msg;
941 spin_unlock_irqrestore(&ring->lock, flags);
942 return 0;
943 }
944 ring->requests[reqid] = msg;
945
946 /* Do DMA mappings for the message */
947 ret = flexrm_dma_map(ring->mbox->dev, msg);
948 if (ret < 0) {
949 ring->requests[reqid] = NULL;
950 ida_simple_remove(&ring->requests_ida, reqid);
951 return ret;
952 }
953
954 /* If last_pending_msg is already set then goto done with error */
955 spin_lock_irqsave(&ring->lock, flags);
956 if (ring->last_pending_msg)
957 ret = -ENOSPC;
958 spin_unlock_irqrestore(&ring->lock, flags);
959 if (ret < 0) {
960 dev_warn(ring->mbox->dev, "no space in ring %d\n", ring->num);
961 exit_cleanup = true;
962 goto exit;
963 }
964
965 /* Determine current HW BD read offset */
966 read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR);
967 val = readl_relaxed(ring->regs + RING_BD_START_ADDR);
968 read_offset *= RING_DESC_SIZE;
969 read_offset += (u32)(BD_START_ADDR_DECODE(val) - ring->bd_dma_base);
970
971 /*
972 * Number required descriptors = number of non-header descriptors +
973 * number of header descriptors +
974 * 1x null descriptor
975 */
976 nhcnt = flexrm_estimate_nonheader_desc_count(msg);
977 count = flexrm_estimate_header_desc_count(nhcnt) + nhcnt + 1;
978
979 /* Check for available descriptor space. */
980 write_offset = ring->bd_write_offset;
981 while (count) {
982 if (!flexrm_is_next_table_desc(ring->bd_base + write_offset))
983 count--;
984 write_offset += RING_DESC_SIZE;
985 if (write_offset == RING_BD_SIZE)
986 write_offset = 0x0;
987 if (write_offset == read_offset)
988 break;
989 }
990 if (count) {
991 spin_lock_irqsave(&ring->lock, flags);
992 if (batch_msg)
993 ring->last_pending_msg = batch_msg;
994 else
995 ring->last_pending_msg = msg;
996 spin_unlock_irqrestore(&ring->lock, flags);
997 ret = 0;
998 exit_cleanup = true;
999 goto exit;
1000 }
1001
1002 /* Write descriptors to ring */
1003 next = flexrm_write_descs(msg, nhcnt, reqid,
1004 ring->bd_base + ring->bd_write_offset,
1005 RING_BD_TOGGLE_VALID(ring->bd_write_offset),
1006 ring->bd_base, ring->bd_base + RING_BD_SIZE);
1007 if (IS_ERR(next)) {
1008 ret = PTR_ERR(next);
1009 exit_cleanup = true;
1010 goto exit;
1011 }
1012
1013 /* Save ring BD write offset */
1014 ring->bd_write_offset = (unsigned long)(next - ring->bd_base);
1015
1016exit:
1017 /* Update error status in message */
1018 msg->error = ret;
1019
1020 /* Cleanup if we failed */
1021 if (exit_cleanup) {
1022 flexrm_dma_unmap(ring->mbox->dev, msg);
1023 ring->requests[reqid] = NULL;
1024 ida_simple_remove(&ring->requests_ida, reqid);
1025 }
1026
1027 return ret;
1028}
1029
1030static int flexrm_process_completions(struct flexrm_ring *ring)
1031{
1032 u64 desc;
1033 int err, count = 0;
1034 unsigned long flags;
1035 struct brcm_message *msg = NULL;
1036 u32 reqid, cmpl_read_offset, cmpl_write_offset;
1037 struct mbox_chan *chan = &ring->mbox->controller.chans[ring->num];
1038
1039 spin_lock_irqsave(&ring->lock, flags);
1040
1041 /* Check last_pending_msg */
1042 if (ring->last_pending_msg) {
1043 msg = ring->last_pending_msg;
1044 ring->last_pending_msg = NULL;
1045 }
1046
1047 /*
1048 * Get current completion read and write offset
1049 *
1050 * Note: We should read completion write pointer atleast once
1051 * after we get a MSI interrupt because HW maintains internal
1052 * MSI status which will allow next MSI interrupt only after
1053 * completion write pointer is read.
1054 */
1055 cmpl_write_offset = readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR);
1056 cmpl_write_offset *= RING_DESC_SIZE;
1057 cmpl_read_offset = ring->cmpl_read_offset;
1058 ring->cmpl_read_offset = cmpl_write_offset;
1059
1060 spin_unlock_irqrestore(&ring->lock, flags);
1061
1062 /* If last_pending_msg was set then queue it back */
1063 if (msg)
1064 mbox_send_message(chan, msg);
1065
1066 /* For each completed request notify mailbox clients */
1067 reqid = 0;
1068 while (cmpl_read_offset != cmpl_write_offset) {
1069 /* Dequeue next completion descriptor */
1070 desc = *((u64 *)(ring->cmpl_base + cmpl_read_offset));
1071
1072 /* Next read offset */
1073 cmpl_read_offset += RING_DESC_SIZE;
1074 if (cmpl_read_offset == RING_CMPL_SIZE)
1075 cmpl_read_offset = 0;
1076
1077 /* Decode error from completion descriptor */
1078 err = flexrm_cmpl_desc_to_error(desc);
1079 if (err < 0) {
1080 dev_warn(ring->mbox->dev,
1081 "got completion desc=0x%lx with error %d",
1082 (unsigned long)desc, err);
1083 }
1084
1085 /* Determine request id from completion descriptor */
1086 reqid = flexrm_cmpl_desc_to_reqid(desc);
1087
1088 /* Determine message pointer based on reqid */
1089 msg = ring->requests[reqid];
1090 if (!msg) {
1091 dev_warn(ring->mbox->dev,
1092 "null msg pointer for completion desc=0x%lx",
1093 (unsigned long)desc);
1094 continue;
1095 }
1096
1097 /* Release reqid for recycling */
1098 ring->requests[reqid] = NULL;
1099 ida_simple_remove(&ring->requests_ida, reqid);
1100
1101 /* Unmap DMA mappings */
1102 flexrm_dma_unmap(ring->mbox->dev, msg);
1103
1104 /* Give-back message to mailbox client */
1105 msg->error = err;
1106 mbox_chan_received_data(chan, msg);
1107
1108 /* Increment number of completions processed */
1109 count++;
1110 }
1111
1112 return count;
1113}
1114
1115/* ====== FlexRM interrupt handler ===== */
1116
1117static irqreturn_t flexrm_irq_event(int irq, void *dev_id)
1118{
1119 /* We only have MSI for completions so just wakeup IRQ thread */
1120 /* Ring related errors will be informed via completion descriptors */
1121
1122 return IRQ_WAKE_THREAD;
1123}
1124
1125static irqreturn_t flexrm_irq_thread(int irq, void *dev_id)
1126{
1127 flexrm_process_completions(dev_id);
1128
1129 return IRQ_HANDLED;
1130}
1131
1132/* ====== FlexRM mailbox callbacks ===== */
1133
1134static int flexrm_send_data(struct mbox_chan *chan, void *data)
1135{
1136 int i, rc;
1137 struct flexrm_ring *ring = chan->con_priv;
1138 struct brcm_message *msg = data;
1139
1140 if (msg->type == BRCM_MESSAGE_BATCH) {
1141 for (i = msg->batch.msgs_queued;
1142 i < msg->batch.msgs_count; i++) {
1143 rc = flexrm_new_request(ring, msg,
1144 &msg->batch.msgs[i]);
1145 if (rc) {
1146 msg->error = rc;
1147 return rc;
1148 }
1149 msg->batch.msgs_queued++;
1150 }
1151 return 0;
1152 }
1153
1154 return flexrm_new_request(ring, NULL, data);
1155}
1156
1157static bool flexrm_peek_data(struct mbox_chan *chan)
1158{
1159 int cnt = flexrm_process_completions(chan->con_priv);
1160
1161 return (cnt > 0) ? true : false;
1162}
1163
1164static int flexrm_startup(struct mbox_chan *chan)
1165{
1166 u64 d;
1167 u32 val, off;
1168 int ret = 0;
1169 dma_addr_t next_addr;
1170 struct flexrm_ring *ring = chan->con_priv;
1171
1172 /* Allocate BD memory */
1173 ring->bd_base = dma_pool_alloc(ring->mbox->bd_pool,
1174 GFP_KERNEL, &ring->bd_dma_base);
1175 if (!ring->bd_base) {
1176 dev_err(ring->mbox->dev, "can't allocate BD memory\n");
1177 ret = -ENOMEM;
1178 goto fail;
1179 }
1180
1181 /* Configure next table pointer entries in BD memory */
1182 for (off = 0; off < RING_BD_SIZE; off += RING_DESC_SIZE) {
1183 next_addr = off + RING_DESC_SIZE;
1184 if (next_addr == RING_BD_SIZE)
1185 next_addr = 0;
1186 next_addr += ring->bd_dma_base;
1187 if (RING_BD_ALIGN_CHECK(next_addr))
1188 d = flexrm_next_table_desc(RING_BD_TOGGLE_VALID(off),
1189 next_addr);
1190 else
1191 d = flexrm_null_desc(RING_BD_TOGGLE_INVALID(off));
1192 flexrm_write_desc(ring->bd_base + off, d);
1193 }
1194
1195 /* Allocate completion memory */
1196 ring->cmpl_base = dma_pool_alloc(ring->mbox->cmpl_pool,
1197 GFP_KERNEL, &ring->cmpl_dma_base);
1198 if (!ring->cmpl_base) {
1199 dev_err(ring->mbox->dev, "can't allocate completion memory\n");
1200 ret = -ENOMEM;
1201 goto fail_free_bd_memory;
1202 }
1203 memset(ring->cmpl_base, 0, RING_CMPL_SIZE);
1204
1205 /* Request IRQ */
1206 if (ring->irq == UINT_MAX) {
1207 dev_err(ring->mbox->dev, "ring IRQ not available\n");
1208 ret = -ENODEV;
1209 goto fail_free_cmpl_memory;
1210 }
1211 ret = request_threaded_irq(ring->irq,
1212 flexrm_irq_event,
1213 flexrm_irq_thread,
1214 0, dev_name(ring->mbox->dev), ring);
1215 if (ret) {
1216 dev_err(ring->mbox->dev, "failed to request ring IRQ\n");
1217 goto fail_free_cmpl_memory;
1218 }
1219 ring->irq_requested = true;
1220
Anup Patel6ac17fe2017-08-01 16:05:50 +05301221 /* Set IRQ affinity hint */
1222 ring->irq_aff_hint = CPU_MASK_NONE;
1223 val = ring->mbox->num_rings;
1224 val = (num_online_cpus() < val) ? val / num_online_cpus() : 1;
1225 cpumask_set_cpu((ring->num / val) % num_online_cpus(),
1226 &ring->irq_aff_hint);
1227 ret = irq_set_affinity_hint(ring->irq, &ring->irq_aff_hint);
1228 if (ret) {
1229 dev_err(ring->mbox->dev, "failed to set IRQ affinity hint\n");
1230 goto fail_free_irq;
1231 }
1232
Anup Pateldbc049e2017-03-15 12:10:00 +05301233 /* Disable/inactivate ring */
1234 writel_relaxed(0x0, ring->regs + RING_CONTROL);
1235
1236 /* Program BD start address */
1237 val = BD_START_ADDR_VALUE(ring->bd_dma_base);
1238 writel_relaxed(val, ring->regs + RING_BD_START_ADDR);
1239
1240 /* BD write pointer will be same as HW write pointer */
1241 ring->bd_write_offset =
1242 readl_relaxed(ring->regs + RING_BD_WRITE_PTR);
1243 ring->bd_write_offset *= RING_DESC_SIZE;
1244
1245 /* Program completion start address */
1246 val = CMPL_START_ADDR_VALUE(ring->cmpl_dma_base);
1247 writel_relaxed(val, ring->regs + RING_CMPL_START_ADDR);
1248
1249 /* Ensure last pending message is cleared */
1250 ring->last_pending_msg = NULL;
1251
1252 /* Completion read pointer will be same as HW write pointer */
1253 ring->cmpl_read_offset =
1254 readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR);
1255 ring->cmpl_read_offset *= RING_DESC_SIZE;
1256
1257 /* Read ring Tx, Rx, and Outstanding counts to clear */
1258 readl_relaxed(ring->regs + RING_NUM_REQ_RECV_LS);
1259 readl_relaxed(ring->regs + RING_NUM_REQ_RECV_MS);
1260 readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_LS);
1261 readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_MS);
1262 readl_relaxed(ring->regs + RING_NUM_REQ_OUTSTAND);
1263
1264 /* Configure RING_MSI_CONTROL */
1265 val = 0;
1266 val |= (ring->msi_timer_val << MSI_TIMER_VAL_SHIFT);
1267 val |= BIT(MSI_ENABLE_SHIFT);
1268 val |= (ring->msi_count_threshold & MSI_COUNT_MASK) << MSI_COUNT_SHIFT;
1269 writel_relaxed(val, ring->regs + RING_MSI_CONTROL);
1270
1271 /* Enable/activate ring */
1272 val = BIT(CONTROL_ACTIVE_SHIFT);
1273 writel_relaxed(val, ring->regs + RING_CONTROL);
1274
1275 return 0;
1276
Anup Patel6ac17fe2017-08-01 16:05:50 +05301277fail_free_irq:
1278 free_irq(ring->irq, ring);
1279 ring->irq_requested = false;
Anup Pateldbc049e2017-03-15 12:10:00 +05301280fail_free_cmpl_memory:
1281 dma_pool_free(ring->mbox->cmpl_pool,
1282 ring->cmpl_base, ring->cmpl_dma_base);
1283 ring->cmpl_base = NULL;
1284fail_free_bd_memory:
1285 dma_pool_free(ring->mbox->bd_pool,
1286 ring->bd_base, ring->bd_dma_base);
1287 ring->bd_base = NULL;
1288fail:
1289 return ret;
1290}
1291
1292static void flexrm_shutdown(struct mbox_chan *chan)
1293{
1294 u32 reqid;
1295 unsigned int timeout;
1296 struct brcm_message *msg;
1297 struct flexrm_ring *ring = chan->con_priv;
1298
1299 /* Disable/inactivate ring */
1300 writel_relaxed(0x0, ring->regs + RING_CONTROL);
1301
1302 /* Flush ring with timeout of 1s */
1303 timeout = 1000;
1304 writel_relaxed(BIT(CONTROL_FLUSH_SHIFT),
1305 ring->regs + RING_CONTROL);
1306 do {
1307 if (readl_relaxed(ring->regs + RING_FLUSH_DONE) &
1308 FLUSH_DONE_MASK)
1309 break;
1310 mdelay(1);
1311 } while (timeout--);
1312
1313 /* Abort all in-flight requests */
1314 for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) {
1315 msg = ring->requests[reqid];
1316 if (!msg)
1317 continue;
1318
1319 /* Release reqid for recycling */
1320 ring->requests[reqid] = NULL;
1321 ida_simple_remove(&ring->requests_ida, reqid);
1322
1323 /* Unmap DMA mappings */
1324 flexrm_dma_unmap(ring->mbox->dev, msg);
1325
1326 /* Give-back message to mailbox client */
1327 msg->error = -EIO;
1328 mbox_chan_received_data(chan, msg);
1329 }
1330
1331 /* Release IRQ */
1332 if (ring->irq_requested) {
Anup Patel6ac17fe2017-08-01 16:05:50 +05301333 irq_set_affinity_hint(ring->irq, NULL);
Anup Pateldbc049e2017-03-15 12:10:00 +05301334 free_irq(ring->irq, ring);
1335 ring->irq_requested = false;
1336 }
1337
1338 /* Free-up completion descriptor ring */
1339 if (ring->cmpl_base) {
1340 dma_pool_free(ring->mbox->cmpl_pool,
1341 ring->cmpl_base, ring->cmpl_dma_base);
1342 ring->cmpl_base = NULL;
1343 }
1344
1345 /* Free-up BD descriptor ring */
1346 if (ring->bd_base) {
1347 dma_pool_free(ring->mbox->bd_pool,
1348 ring->bd_base, ring->bd_dma_base);
1349 ring->bd_base = NULL;
1350 }
1351}
1352
1353static bool flexrm_last_tx_done(struct mbox_chan *chan)
1354{
1355 bool ret;
1356 unsigned long flags;
1357 struct flexrm_ring *ring = chan->con_priv;
1358
1359 spin_lock_irqsave(&ring->lock, flags);
1360 ret = (ring->last_pending_msg) ? false : true;
1361 spin_unlock_irqrestore(&ring->lock, flags);
1362
1363 return ret;
1364}
1365
1366static const struct mbox_chan_ops flexrm_mbox_chan_ops = {
1367 .send_data = flexrm_send_data,
1368 .startup = flexrm_startup,
1369 .shutdown = flexrm_shutdown,
1370 .last_tx_done = flexrm_last_tx_done,
1371 .peek_data = flexrm_peek_data,
1372};
1373
1374static struct mbox_chan *flexrm_mbox_of_xlate(struct mbox_controller *cntlr,
1375 const struct of_phandle_args *pa)
1376{
1377 struct mbox_chan *chan;
1378 struct flexrm_ring *ring;
1379
1380 if (pa->args_count < 3)
1381 return ERR_PTR(-EINVAL);
1382
1383 if (pa->args[0] >= cntlr->num_chans)
1384 return ERR_PTR(-ENOENT);
1385
1386 if (pa->args[1] > MSI_COUNT_MASK)
1387 return ERR_PTR(-EINVAL);
1388
1389 if (pa->args[2] > MSI_TIMER_VAL_MASK)
1390 return ERR_PTR(-EINVAL);
1391
1392 chan = &cntlr->chans[pa->args[0]];
1393 ring = chan->con_priv;
1394 ring->msi_count_threshold = pa->args[1];
1395 ring->msi_timer_val = pa->args[2];
1396
1397 return chan;
1398}
1399
1400/* ====== FlexRM platform driver ===== */
1401
1402static void flexrm_mbox_msi_write(struct msi_desc *desc, struct msi_msg *msg)
1403{
1404 struct device *dev = msi_desc_to_dev(desc);
1405 struct flexrm_mbox *mbox = dev_get_drvdata(dev);
1406 struct flexrm_ring *ring = &mbox->rings[desc->platform.msi_index];
1407
1408 /* Configure per-Ring MSI registers */
1409 writel_relaxed(msg->address_lo, ring->regs + RING_MSI_ADDR_LS);
1410 writel_relaxed(msg->address_hi, ring->regs + RING_MSI_ADDR_MS);
1411 writel_relaxed(msg->data, ring->regs + RING_MSI_DATA_VALUE);
1412}
1413
1414static int flexrm_mbox_probe(struct platform_device *pdev)
1415{
1416 int index, ret = 0;
1417 void __iomem *regs;
1418 void __iomem *regs_end;
1419 struct msi_desc *desc;
1420 struct resource *iomem;
1421 struct flexrm_ring *ring;
1422 struct flexrm_mbox *mbox;
1423 struct device *dev = &pdev->dev;
1424
1425 /* Allocate driver mailbox struct */
1426 mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
1427 if (!mbox) {
1428 ret = -ENOMEM;
1429 goto fail;
1430 }
1431 mbox->dev = dev;
1432 platform_set_drvdata(pdev, mbox);
1433
1434 /* Get resource for registers */
1435 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1436 if (!iomem || (resource_size(iomem) < RING_REGS_SIZE)) {
1437 ret = -ENODEV;
1438 goto fail;
1439 }
1440
1441 /* Map registers of all rings */
1442 mbox->regs = devm_ioremap_resource(&pdev->dev, iomem);
1443 if (IS_ERR(mbox->regs)) {
1444 ret = PTR_ERR(mbox->regs);
1445 dev_err(&pdev->dev, "Failed to remap mailbox regs: %d\n", ret);
1446 goto fail;
1447 }
1448 regs_end = mbox->regs + resource_size(iomem);
1449
1450 /* Scan and count available rings */
1451 mbox->num_rings = 0;
1452 for (regs = mbox->regs; regs < regs_end; regs += RING_REGS_SIZE) {
1453 if (readl_relaxed(regs + RING_VER) == RING_VER_MAGIC)
1454 mbox->num_rings++;
1455 }
1456 if (!mbox->num_rings) {
1457 ret = -ENODEV;
1458 goto fail;
1459 }
1460
1461 /* Allocate driver ring structs */
1462 ring = devm_kcalloc(dev, mbox->num_rings, sizeof(*ring), GFP_KERNEL);
1463 if (!ring) {
1464 ret = -ENOMEM;
1465 goto fail;
1466 }
1467 mbox->rings = ring;
1468
1469 /* Initialize members of driver ring structs */
1470 regs = mbox->regs;
1471 for (index = 0; index < mbox->num_rings; index++) {
1472 ring = &mbox->rings[index];
1473 ring->num = index;
1474 ring->mbox = mbox;
1475 while ((regs < regs_end) &&
1476 (readl_relaxed(regs + RING_VER) != RING_VER_MAGIC))
1477 regs += RING_REGS_SIZE;
1478 if (regs_end <= regs) {
1479 ret = -ENODEV;
1480 goto fail;
1481 }
1482 ring->regs = regs;
1483 regs += RING_REGS_SIZE;
1484 ring->irq = UINT_MAX;
1485 ring->irq_requested = false;
1486 ring->msi_timer_val = MSI_TIMER_VAL_MASK;
1487 ring->msi_count_threshold = 0x1;
1488 ida_init(&ring->requests_ida);
1489 memset(ring->requests, 0, sizeof(ring->requests));
1490 ring->bd_base = NULL;
1491 ring->bd_dma_base = 0;
1492 ring->cmpl_base = NULL;
1493 ring->cmpl_dma_base = 0;
1494 spin_lock_init(&ring->lock);
1495 ring->last_pending_msg = NULL;
1496 ring->cmpl_read_offset = 0;
1497 }
1498
1499 /* FlexRM is capable of 40-bit physical addresses only */
1500 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
1501 if (ret) {
1502 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
1503 if (ret)
1504 goto fail;
1505 }
1506
1507 /* Create DMA pool for ring BD memory */
1508 mbox->bd_pool = dma_pool_create("bd", dev, RING_BD_SIZE,
1509 1 << RING_BD_ALIGN_ORDER, 0);
1510 if (!mbox->bd_pool) {
1511 ret = -ENOMEM;
1512 goto fail;
1513 }
1514
1515 /* Create DMA pool for ring completion memory */
1516 mbox->cmpl_pool = dma_pool_create("cmpl", dev, RING_CMPL_SIZE,
1517 1 << RING_CMPL_ALIGN_ORDER, 0);
1518 if (!mbox->cmpl_pool) {
1519 ret = -ENOMEM;
1520 goto fail_destroy_bd_pool;
1521 }
1522
1523 /* Allocate platform MSIs for each ring */
1524 ret = platform_msi_domain_alloc_irqs(dev, mbox->num_rings,
1525 flexrm_mbox_msi_write);
1526 if (ret)
1527 goto fail_destroy_cmpl_pool;
1528
1529 /* Save alloced IRQ numbers for each ring */
1530 for_each_msi_entry(desc, dev) {
1531 ring = &mbox->rings[desc->platform.msi_index];
1532 ring->irq = desc->irq;
1533 }
1534
1535 /* Initialize mailbox controller */
1536 mbox->controller.txdone_irq = false;
1537 mbox->controller.txdone_poll = true;
1538 mbox->controller.txpoll_period = 1;
1539 mbox->controller.ops = &flexrm_mbox_chan_ops;
1540 mbox->controller.dev = dev;
1541 mbox->controller.num_chans = mbox->num_rings;
1542 mbox->controller.of_xlate = flexrm_mbox_of_xlate;
1543 mbox->controller.chans = devm_kcalloc(dev, mbox->num_rings,
1544 sizeof(*mbox->controller.chans), GFP_KERNEL);
1545 if (!mbox->controller.chans) {
1546 ret = -ENOMEM;
1547 goto fail_free_msis;
1548 }
1549 for (index = 0; index < mbox->num_rings; index++)
1550 mbox->controller.chans[index].con_priv = &mbox->rings[index];
1551
1552 /* Register mailbox controller */
1553 ret = mbox_controller_register(&mbox->controller);
1554 if (ret)
1555 goto fail_free_msis;
1556
1557 dev_info(dev, "registered flexrm mailbox with %d channels\n",
1558 mbox->controller.num_chans);
1559
1560 return 0;
1561
1562fail_free_msis:
1563 platform_msi_domain_free_irqs(dev);
1564fail_destroy_cmpl_pool:
1565 dma_pool_destroy(mbox->cmpl_pool);
1566fail_destroy_bd_pool:
1567 dma_pool_destroy(mbox->bd_pool);
1568fail:
1569 return ret;
1570}
1571
1572static int flexrm_mbox_remove(struct platform_device *pdev)
1573{
1574 int index;
1575 struct device *dev = &pdev->dev;
1576 struct flexrm_ring *ring;
1577 struct flexrm_mbox *mbox = platform_get_drvdata(pdev);
1578
1579 mbox_controller_unregister(&mbox->controller);
1580
1581 platform_msi_domain_free_irqs(dev);
1582
1583 dma_pool_destroy(mbox->cmpl_pool);
1584 dma_pool_destroy(mbox->bd_pool);
1585
1586 for (index = 0; index < mbox->num_rings; index++) {
1587 ring = &mbox->rings[index];
1588 ida_destroy(&ring->requests_ida);
1589 }
1590
1591 return 0;
1592}
1593
1594static const struct of_device_id flexrm_mbox_of_match[] = {
1595 { .compatible = "brcm,iproc-flexrm-mbox", },
1596 {},
1597};
1598MODULE_DEVICE_TABLE(of, flexrm_mbox_of_match);
1599
1600static struct platform_driver flexrm_mbox_driver = {
1601 .driver = {
1602 .name = "brcm-flexrm-mbox",
1603 .of_match_table = flexrm_mbox_of_match,
1604 },
1605 .probe = flexrm_mbox_probe,
1606 .remove = flexrm_mbox_remove,
1607};
1608module_platform_driver(flexrm_mbox_driver);
1609
1610MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>");
1611MODULE_DESCRIPTION("Broadcom FlexRM mailbox driver");
1612MODULE_LICENSE("GPL v2");