blob: 5cd481ae66edbda83389953be76e13400ee57ba5 [file] [log] [blame]
Lina Iyer88a8fda2016-04-01 08:23:31 -06001/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
15
16#include <linux/bitmap.h>
17#include <linux/interrupt.h>
18#include <linux/jiffies.h>
19#include <linux/kernel.h>
20#include <linux/list.h>
21#include <linux/mailbox_client.h> /* For dev_err */
22#include <linux/mailbox_controller.h>
23#include <linux/module.h>
24#include <linux/of.h>
25#include <linux/of_address.h>
26#include <linux/of_irq.h>
27#include <linux/platform_device.h>
28#include <linux/spinlock.h>
29#include <linux/workqueue.h>
30
31#include <asm-generic/io.h>
32
33#include <soc/qcom/tcs.h>
34
35#include <dt-bindings/soc/qcom,tcs-mbox.h>
36
37#include "mailbox.h"
38
Lina Iyerea921442016-05-26 15:07:48 -060039#define CREATE_TRACE_POINTS
40#include <trace/events/rpmh.h>
41
Lina Iyer88a8fda2016-04-01 08:23:31 -060042#define MAX_CMDS_PER_TCS 16
43#define MAX_TCS_PER_TYPE 3
44#define MAX_TCS_SLOTS (MAX_CMDS_PER_TCS * MAX_TCS_PER_TYPE)
45
46#define TCS_DRV_TCS_OFFSET 672
47#define TCS_DRV_CMD_OFFSET 20
48
49/* DRV Configuration Information Register */
50#define DRV_PRNT_CHLD_CONFIG 0x0C
51#define DRV_NUM_TCS_MASK 0x3F
52#define DRV_NUM_TCS_SHIFT 6
53#define DRV_NCPT_MASK 0x1F
54#define DRV_NCPT_SHIFT 27
55
56/* Register offsets */
57#define TCS_DRV_IRQ_ENABLE 0x00
58#define TCS_DRV_IRQ_STATUS 0x04
59#define TCS_DRV_IRQ_CLEAR 0x08
60#define TCS_DRV_CMD_WAIT_FOR_CMPL 0x10
61#define TCS_DRV_CONTROL 0x14
62#define TCS_DRV_STATUS 0x18
63#define TCS_DRV_CMD_ENABLE 0x1C
64#define TCS_DRV_CMD_MSGID 0x30
65#define TCS_DRV_CMD_ADDR 0x34
66#define TCS_DRV_CMD_DATA 0x38
67#define TCS_DRV_CMD_STATUS 0x3C
68#define TCS_DRV_CMD_RESP_DATA 0x40
69
70#define TCS_AMC_MODE_ENABLE BIT(16)
71#define TCS_AMC_MODE_TRIGGER BIT(24)
72
73/* TCS CMD register bit mask */
74#define CMD_MSGID_LEN 8
75#define CMD_MSGID_RESP_REQ BIT(8)
76#define CMD_MSGID_WRITE BIT(16)
77#define CMD_STATUS_ISSUED BIT(8)
78#define CMD_STATUS_COMPL BIT(16)
79
80/* Control/Hidden TCS */
81#define TCS_HIDDEN_MAX_SLOTS 3
82#define TCS_HIDDEN_CMD0_DRV_ADDR 0x34
83#define TCS_HIDDEN_CMD0_DRV_DATA 0x38
84#define TCS_HIDDEN_CMD_SHIFT 0x08
85
86#define TCS_TYPE_NR 4
87#define TCS_MBOX_TOUT_MS 2000
88#define MAX_POOL_SIZE (MAX_TCS_PER_TYPE * TCS_TYPE_NR)
89
90struct tcs_drv;
91
92struct tcs_response {
93 struct tcs_drv *drv;
94 struct mbox_chan *chan;
95 struct tcs_mbox_msg *msg;
96 u32 m; /* m-th TCS */
97 struct tasklet_struct tasklet;
98 struct delayed_work dwork;
99 int err;
100};
101
102struct tcs_response_pool {
103 struct tcs_response *resp;
104 spinlock_t lock;
105 DECLARE_BITMAP(avail, MAX_POOL_SIZE);
106};
107
108/* One per TCS type of a controller */
109struct tcs_mbox {
110 struct tcs_drv *drv;
111 u32 *cmd_addr;
112 int type;
113 u32 tcs_mask;
114 u32 tcs_offset;
115 int num_tcs;
116 int ncpt; /* num cmds per tcs */
117 DECLARE_BITMAP(slots, MAX_TCS_SLOTS);
118 spinlock_t tcs_lock; /* TCS type lock */
119 spinlock_t tcs_m_lock[MAX_TCS_PER_TYPE];
120 struct tcs_response *resp[MAX_TCS_PER_TYPE];
121};
122
123/* One per MBOX controller */
124struct tcs_drv {
125 void *base; /* start address of the RSC's registers */
126 void *reg_base; /* start address for DRV specific register */
127 int drv_id;
128 struct platform_device *pdev;
129 struct mbox_controller mbox;
130 struct tcs_mbox tcs[TCS_TYPE_NR];
131 int num_assigned;
132 int num_tcs;
133 struct workqueue_struct *wq;
134 struct tcs_response_pool *resp_pool;
135};
136
137static void tcs_notify_tx_done(unsigned long data);
138static void tcs_notify_timeout(struct work_struct *work);
139
140static int tcs_response_pool_init(struct tcs_drv *drv)
141{
142 struct tcs_response_pool *pool;
143 int i;
144
145 pool = devm_kzalloc(&drv->pdev->dev, sizeof(*pool), GFP_KERNEL);
146 if (!pool)
147 return -ENOMEM;
148
149 pool->resp = devm_kzalloc(&drv->pdev->dev, sizeof(*pool->resp) *
150 MAX_POOL_SIZE, GFP_KERNEL);
151 if (!pool->resp)
152 return -ENOMEM;
153
154 for (i = 0; i < MAX_POOL_SIZE; i++) {
155 tasklet_init(&pool->resp[i].tasklet, tcs_notify_tx_done,
156 (unsigned long) &pool->resp[i]);
157 INIT_DELAYED_WORK(&pool->resp[i].dwork,
158 tcs_notify_timeout);
159 }
160
161 spin_lock_init(&pool->lock);
162 drv->resp_pool = pool;
163
164 return 0;
165}
166
167static struct tcs_response *get_response_from_pool(struct tcs_drv *drv)
168{
169 struct tcs_response_pool *pool = drv->resp_pool;
170 struct tcs_response *resp = ERR_PTR(-ENOMEM);
171 unsigned long flags;
172 int pos;
173
174 spin_lock_irqsave(&pool->lock, flags);
175 pos = find_first_zero_bit(pool->avail, MAX_POOL_SIZE);
176 if (pos != MAX_POOL_SIZE) {
177 bitmap_set(pool->avail, pos, 1);
178 resp = &pool->resp[pos];
179 memset(resp, 0, sizeof(*resp));
180 tasklet_init(&resp->tasklet, tcs_notify_tx_done,
181 (unsigned long) resp);
182 INIT_DELAYED_WORK(&resp->dwork, tcs_notify_timeout);
183 resp->drv = drv;
184 }
185 spin_unlock_irqrestore(&pool->lock, flags);
186
187 return resp;
188}
189
190static void free_response_to_pool(struct tcs_response *resp)
191{
192 struct tcs_response_pool *pool = resp->drv->resp_pool;
193 unsigned long flags;
194 int i;
195
196 spin_lock_irqsave(&pool->lock, flags);
197 i = resp - pool->resp;
198 bitmap_clear(pool->avail, i, 1);
199 spin_unlock_irqrestore(&pool->lock, flags);
200}
201
202static inline u32 read_drv_config(void __iomem *base)
203{
204 return le32_to_cpu(readl_relaxed(base + DRV_PRNT_CHLD_CONFIG));
205}
206
207static inline u32 read_tcs_reg(void __iomem *base, int reg, int m, int n)
208{
209 return le32_to_cpu(readl_relaxed(base + reg +
210 TCS_DRV_TCS_OFFSET * m + TCS_DRV_CMD_OFFSET * n));
211}
212
213static inline void write_tcs_reg(void __iomem *base, int reg, int m, int n,
214 u32 data)
215{
216 writel_relaxed(cpu_to_le32(data), base + reg +
217 TCS_DRV_TCS_OFFSET * m + TCS_DRV_CMD_OFFSET * n);
218}
219
220static inline void write_tcs_reg_sync(void __iomem *base, int reg, int m, int n,
221 u32 data)
222{
223 do {
224 write_tcs_reg(base, reg, m, n, data);
225 if (data == read_tcs_reg(base, reg, m, n))
226 break;
227 cpu_relax();
228 } while (1);
229}
230
231static inline bool tcs_is_free(void __iomem *base, int m)
232{
233 return read_tcs_reg(base, TCS_DRV_STATUS, m, 0);
234}
235
236static inline struct tcs_mbox *get_tcs_from_index(struct tcs_drv *drv, int m)
237{
238 struct tcs_mbox *tcs;
239 int i;
240
241 for (i = 0; i < TCS_TYPE_NR; i++) {
242 tcs = &drv->tcs[i];
243 if (tcs->tcs_mask & BIT(m))
244 break;
245 }
246
247 if (i == TCS_TYPE_NR)
248 tcs = NULL;
249
250 return tcs;
251}
252
253static inline struct tcs_mbox *get_tcs_of_type(struct tcs_drv *drv, int type)
254{
255 int i;
256 struct tcs_mbox *tcs;
257
258 for (i = 0; i < TCS_TYPE_NR; i++)
259 if (type == drv->tcs[i].type)
260 break;
261
262 if (i == TCS_TYPE_NR)
263 return ERR_PTR(-EINVAL);
264
265 tcs = &drv->tcs[i];
266 if (!tcs->num_tcs)
267 return ERR_PTR(-EINVAL);
268
269 return tcs;
270}
271
272static inline struct tcs_mbox *get_tcs_for_msg(struct tcs_drv *drv,
273 struct tcs_mbox_msg *msg)
274{
275 int type = -1;
276
277 /* Which box are we dropping this in and do we trigger the TCS */
278 switch (msg->state) {
279 case RPMH_SLEEP_STATE:
280 type = SLEEP_TCS;
281 break;
282 case RPMH_WAKE_ONLY_STATE:
283 type = WAKE_TCS;
284 break;
285 case RPMH_ACTIVE_ONLY_STATE:
286 type = ACTIVE_TCS;
287 break;
Lina Iyer21c17882016-09-22 11:05:51 -0600288 case RPMH_AWAKE_STATE:
289 /*
290 * Awake state is only used when the DRV has no separate
291 * TCS for ACTIVE requests. Switch to WAKE TCS to send
292 * active votes. Otherwise, the caller should be explicit
293 * about the state.
294 */
295 if (IS_ERR(get_tcs_of_type(drv, ACTIVE_TCS)))
296 type = WAKE_TCS;
297 break;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600298 }
299
300 if (msg->is_read)
301 type = ACTIVE_TCS;
302
303 if (type < 0)
304 return ERR_PTR(-EINVAL);
305
306 return get_tcs_of_type(drv, type);
307}
308
309static inline struct tcs_response *get_tcs_response(struct tcs_drv *drv, int m)
310{
311 struct tcs_mbox *tcs = get_tcs_from_index(drv, m);
312
313 return tcs ? tcs->resp[m - tcs->tcs_offset] : NULL;
314}
315
316static inline void send_tcs_response(struct tcs_response *resp)
317{
318 tasklet_schedule(&resp->tasklet);
319}
320
321static inline void schedule_tcs_err_response(struct tcs_response *resp)
322{
323 schedule_delayed_work(&resp->dwork, msecs_to_jiffies(TCS_MBOX_TOUT_MS));
324}
325
326/**
327 * tcs_irq_handler: TX Done / Recv data handler
328 */
329static irqreturn_t tcs_irq_handler(int irq, void *p)
330{
331 struct tcs_drv *drv = p;
332 void __iomem *base = drv->reg_base;
333 int m, i;
334 u32 irq_status, sts;
Lina Iyer21c17882016-09-22 11:05:51 -0600335 struct tcs_mbox *tcs;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600336 struct tcs_response *resp;
337 u32 irq_clear = 0;
338 u32 data;
339
340 /* Know which TCSes were triggered */
341 irq_status = read_tcs_reg(base, TCS_DRV_IRQ_STATUS, 0, 0);
342
343 for (m = 0; irq_status >= BIT(m); m++) {
344 if (!(irq_status & BIT(m)))
345 continue;
346
347 /* Find the TCS that triggered */
348 resp = get_tcs_response(drv, m);
349 if (!resp) {
350 pr_err("No resp request for TCS-%d\n", m);
351 continue;
352 }
353
354 cancel_delayed_work(&resp->dwork);
355
Lina Iyer21c17882016-09-22 11:05:51 -0600356 /* Clear the AMC mode for non-ACTIVE TCSes */
357 tcs = get_tcs_from_index(drv, m);
358 if (!tcs) {
359 pr_err("TCS-%d doesn't exist in DRV\n", m);
360 continue;
361 }
362 if (tcs->type != ACTIVE_TCS) {
363 data = read_tcs_reg(base, TCS_DRV_CONTROL, m, 0);
364 data &= ~TCS_AMC_MODE_ENABLE;
365 write_tcs_reg(base, TCS_DRV_CONTROL, m, 0, data);
366 } else {
367 /* Clear the enable bit for the commands */
368 write_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0, 0);
369 }
Lina Iyer88a8fda2016-04-01 08:23:31 -0600370
371 /* Check if all commands were completed */
372 resp->err = 0;
373 for (i = 0; i < resp->msg->num_payload; i++) {
374 sts = read_tcs_reg(base, TCS_DRV_CMD_STATUS, m, i);
375 if (!(sts & CMD_STATUS_ISSUED) ||
376 (resp->msg->is_complete &&
377 !(sts & CMD_STATUS_COMPL)))
378 resp->err = -EIO;
379 }
380
381 /* Check for response if this was a read request */
382 if (resp->msg->is_read) {
383 /* Respond the data back in the same req data */
384 data = read_tcs_reg(base, TCS_DRV_CMD_RESP_DATA, m, 0);
385 resp->msg->payload[0].data = data;
386 mbox_chan_received_data(resp->chan, resp->msg);
387 }
388
Lina Iyerea921442016-05-26 15:07:48 -0600389 trace_rpmh_notify_irq(m, resp->msg->payload[0].addr, resp->err);
390
Lina Iyer88a8fda2016-04-01 08:23:31 -0600391 /* Notify the client that this request is completed. */
392 send_tcs_response(resp);
393 irq_clear |= BIT(m);
394 }
395
396 /* Clear the TCS IRQ status */
397 write_tcs_reg(base, TCS_DRV_IRQ_CLEAR, 0, 0, irq_clear);
398
399 return IRQ_HANDLED;
400}
401
402static inline void mbox_notify_tx_done(struct mbox_chan *chan,
403 struct tcs_mbox_msg *msg, int m, int err)
404{
Lina Iyerea921442016-05-26 15:07:48 -0600405 trace_rpmh_notify(m, msg->payload[0].addr, err);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600406 mbox_chan_txdone(chan, err);
407}
408
409/**
410 * tcs_notify_tx_done: TX Done for requests that do not trigger TCS
411 */
412static void tcs_notify_tx_done(unsigned long data)
413{
414 struct tcs_response *resp = (struct tcs_response *) data;
415 struct mbox_chan *chan = resp->chan;
416 struct tcs_mbox_msg *msg = resp->msg;
417 int err = resp->err;
418 int m = resp->m;
419
420 free_response_to_pool(resp);
421 mbox_notify_tx_done(chan, msg, m, err);
422}
423
424/**
425 * tcs_notify_timeout: TX Done for requests that do trigger TCS, but
426 * we do not get a response IRQ back.
427 */
428static void tcs_notify_timeout(struct work_struct *work)
429{
430 struct delayed_work *dwork = to_delayed_work(work);
431 struct tcs_response *resp = container_of(dwork,
432 struct tcs_response, dwork);
433 struct mbox_chan *chan = resp->chan;
434 struct tcs_mbox_msg *msg = resp->msg;
435 struct tcs_drv *drv = resp->drv;
436 int m = resp->m;
437 int err = -EIO;
438
439 /*
440 * In case the RPMH resource fails to respond to the completion
441 * request, the TCS would be blocked forever waiting on the response.
442 * There is no way to recover from this case.
443 */
444 if (!tcs_is_free(drv->reg_base, m)) {
445 bool pending = false;
446 struct tcs_cmd *cmd;
447 int i;
448 u32 addr;
449
450 for (i = 0; i < msg->num_payload; i++) {
451 cmd = &msg->payload[i];
452 addr = read_tcs_reg(drv->reg_base, TCS_DRV_CMD_ADDR,
453 m, i);
454 pending = (cmd->addr == addr);
455 }
456 if (pending) {
457 pr_err("TCS-%d blocked waiting for RPMH to respond.\n",
458 m);
459 for (i = 0; i < msg->num_payload; i++)
460 pr_err("Addr: 0x%x Data: 0x%x\n",
461 msg->payload[i].addr,
462 msg->payload[i].data);
463 BUG();
464 }
465 }
466
467 free_response_to_pool(resp);
468 mbox_notify_tx_done(chan, msg, -1, err);
469}
470
471static void __tcs_buffer_write(void __iomem *base, int d, int m, int n,
472 struct tcs_mbox_msg *msg, bool trigger)
473{
474 u32 cmd_msgid = 0;
475 u32 cmd_enable = 0;
476 u32 cmd_complete;
477 u32 enable = TCS_AMC_MODE_ENABLE;
478 struct tcs_cmd *cmd;
479 int i;
480
481 /* We have homologous command set i.e pure read or write, not a mix */
482 cmd_msgid = CMD_MSGID_LEN;
483 cmd_msgid |= (msg->is_complete) ? CMD_MSGID_RESP_REQ : 0;
484 cmd_msgid |= (!msg->is_read) ? CMD_MSGID_WRITE : 0;
485
486 /* Read the send-after-prev complete flag for those already in TCS */
487 cmd_complete = read_tcs_reg(base, TCS_DRV_CMD_WAIT_FOR_CMPL, m, 0);
488
489 for (i = 0; i < msg->num_payload; i++) {
490 cmd = &msg->payload[i];
491 cmd_enable |= BIT(n + i);
492 cmd_complete |= cmd->complete << (n + i);
493 write_tcs_reg(base, TCS_DRV_CMD_MSGID, m, n + i, cmd_msgid);
494 write_tcs_reg(base, TCS_DRV_CMD_ADDR, m, n + i, cmd->addr);
495 write_tcs_reg(base, TCS_DRV_CMD_DATA, m, n + i, cmd->data);
Lina Iyerea921442016-05-26 15:07:48 -0600496 trace_rpmh_send_msg(base, m, n + i,
497 cmd_msgid, cmd->addr, cmd->data, cmd->complete);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600498 }
499
500 /* Write the send-after-prev completion bits for the batch */
501 write_tcs_reg(base, TCS_DRV_CMD_WAIT_FOR_CMPL, m, 0, cmd_complete);
502
503 /* Enable the new commands in TCS */
504 cmd_enable |= read_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0);
505 write_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0, cmd_enable);
506
507 if (trigger) {
508 /* Clear pending interrupt bits for this TCS, OK to not lock */
509 write_tcs_reg(base, TCS_DRV_IRQ_CLEAR, 0, 0, BIT(m));
510 /* HW req: Clear the DRV_CONTROL and enable TCS again */
511 write_tcs_reg_sync(base, TCS_DRV_CONTROL, m, 0, 0);
512 write_tcs_reg_sync(base, TCS_DRV_CONTROL, m, 0, enable);
513 /* Enable the AMC mode on the TCS */
514 enable |= TCS_AMC_MODE_TRIGGER;
515 write_tcs_reg_sync(base, TCS_DRV_CONTROL, m, 0, enable);
516 }
517}
518
Lina Iyer0d81e942016-05-26 11:18:23 -0600519/**
520 * tcs_drv_is_idle: Check if any of the AMCs are busy.
521 *
522 * @mbox: The mailbox controller.
523 *
524 * Returns true if the AMCs are not engaged or absent.
525 */
526static bool tcs_drv_is_idle(struct mbox_controller *mbox)
527{
528 int m;
529 struct tcs_drv *drv = container_of(mbox, struct tcs_drv, mbox);
530 struct tcs_mbox *tcs = get_tcs_of_type(drv, ACTIVE_TCS);
531
Lina Iyer21c17882016-09-22 11:05:51 -0600532 /* Check for WAKE TCS if there are no ACTIVE TCS */
Lina Iyer0d81e942016-05-26 11:18:23 -0600533 if (IS_ERR(tcs))
Lina Iyer21c17882016-09-22 11:05:51 -0600534 tcs = get_tcs_of_type(drv, WAKE_TCS);
Lina Iyer0d81e942016-05-26 11:18:23 -0600535
536 for (m = tcs->tcs_offset; m < tcs->tcs_offset + tcs->num_tcs; m++)
537 if (!tcs_is_free(drv->reg_base, m))
538 return false;
539
540 return true;
541}
542
Lina Iyer88a8fda2016-04-01 08:23:31 -0600543static void wait_for_req_inflight(struct tcs_drv *drv, struct tcs_mbox *tcs,
544 struct tcs_mbox_msg *msg)
545{
546 u32 curr_enabled;
547 int i, j, k;
548 bool is_free;
549
550 do {
551 is_free = true;
552 for (i = 1; i > tcs->tcs_mask; i = i << 1) {
553 if (!(tcs->tcs_mask & i))
554 continue;
555 if (tcs_is_free(drv->reg_base, i))
556 continue;
557 curr_enabled = read_tcs_reg(drv->reg_base,
558 TCS_DRV_CMD_ENABLE, i, 0);
559 for (j = 0; j < msg->num_payload; j++) {
560 for (k = 0; k < curr_enabled; k++) {
561 if (!(curr_enabled & BIT(k)))
562 continue;
563 if (tcs->cmd_addr[k] ==
564 msg->payload[j].addr) {
565 is_free = false;
566 goto retry;
567 }
568 }
569 }
570 }
571retry:
572 if (!is_free)
573 cpu_relax();
574 } while (!is_free);
575}
576
577static int find_free_tcs(struct tcs_mbox *tcs)
578{
579 int slot, m = 0;
580
581 /* Loop until we find a free AMC */
582 do {
583 if (tcs_is_free(tcs->drv->reg_base, tcs->tcs_offset + m)) {
584 slot = m * tcs->ncpt;
585 break;
586 }
587 if (++m > tcs->num_tcs)
588 m = 0;
589 cpu_relax();
590 } while (1);
591
592 return slot;
593}
594
595static int find_match(struct tcs_mbox *tcs, struct tcs_cmd *cmd, int len)
596{
597 bool found = false;
598 int i = 0, j;
599
600 /* Check for already cached commands */
601 while ((i = find_next_bit(tcs->slots, MAX_TCS_SLOTS, i)) <
602 MAX_TCS_SLOTS) {
603 if (tcs->cmd_addr[i] != cmd[0].addr) {
604 i++;
605 continue;
606 }
607 /* sanity check to ensure the seq is same */
608 for (j = 1; j < len; j++) {
609 WARN((tcs->cmd_addr[i + j] != cmd[j].addr),
610 "Message does not match previous sequence.\n");
611 return -EINVAL;
612 }
613 found = true;
614 break;
615 }
616
617 return found ? i : -1;
618}
619
620static int find_slots(struct tcs_mbox *tcs, struct tcs_mbox_msg *msg)
621{
622 int slot;
623 int n = 0;
624
625 /* For active requests find the first free AMC. */
626 if (tcs->type == ACTIVE_TCS)
627 return find_free_tcs(tcs);
628
629 /* Find if we already have the msg in our TCS */
630 slot = find_match(tcs, msg->payload, msg->num_payload);
631 if (slot >= 0)
632 return slot;
633
634 /* Do over, until we can fit the full payload in a TCS */
635 do {
636 slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS,
637 n, msg->num_payload, 0);
638 if (slot == MAX_TCS_SLOTS)
639 break;
640 n += tcs->ncpt;
641 } while (slot + msg->num_payload - 1 >= n);
642
643 return (slot != MAX_TCS_SLOTS) ? slot : -ENOMEM;
644}
645
646static struct tcs_response *setup_response(struct tcs_mbox *tcs,
647 struct mbox_chan *chan, struct tcs_mbox_msg *msg, int m)
648{
649 struct tcs_response *resp = get_response_from_pool(tcs->drv);
650
651 if (IS_ERR(resp))
652 return resp;
653
654 if (m < tcs->tcs_offset)
655 return ERR_PTR(-EINVAL);
656
657 tcs->resp[m - tcs->tcs_offset] = resp;
658 resp->msg = msg;
659 resp->chan = chan;
660 resp->m = m;
661 resp->err = 0;
662
663 return resp;
664}
665
666static int tcs_mbox_write(struct mbox_chan *chan, struct tcs_mbox_msg *msg,
667 bool trigger)
668{
669 const struct device *dev = chan->cl->dev;
670 struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
671 int d = drv->drv_id;
672 struct tcs_mbox *tcs;
673 int i, slot, offset, m, n;
674 struct tcs_response *resp;
675
676 tcs = get_tcs_for_msg(drv, msg);
677 if (IS_ERR(tcs))
678 return PTR_ERR(tcs);
679
680 /* Identify the sequential slots that we can write to */
681 spin_lock(&tcs->tcs_lock);
682 slot = find_slots(tcs, msg);
683 if (slot < 0) {
684 dev_err(dev, "No TCS slot found.\n");
685 spin_unlock(&tcs->tcs_lock);
686 return slot;
687 }
688 /* Mark the slots as in-use, before we unlock */
689 if (tcs->type == SLEEP_TCS || tcs->type == WAKE_TCS)
690 bitmap_set(tcs->slots, slot, msg->num_payload);
691
692 /* Copy the addresses of the resources over to the slots */
693 for (i = 0; tcs->cmd_addr && i < msg->num_payload; i++)
694 tcs->cmd_addr[slot + i] = msg->payload[i].addr;
695
696 if (trigger)
697 resp = setup_response(tcs, chan, msg,
698 slot / tcs->ncpt + tcs->tcs_offset);
699
700 spin_unlock(&tcs->tcs_lock);
701
702 /*
703 * Find the TCS corresponding to the slot and start writing.
704 * Break down 'slot' into a 'n' position in the 'm'th TCS.
705 */
706 offset = slot / tcs->ncpt;
707 m = offset + tcs->tcs_offset;
708 n = slot % tcs->ncpt;
709
710 spin_lock(&tcs->tcs_m_lock[offset]);
711 if (trigger) {
712 /* Block, if we have an address from the msg in flight */
713 wait_for_req_inflight(drv, tcs, msg);
714 /* If the TCS is busy there is nothing to do but spin wait */
715 while (!tcs_is_free(drv->reg_base, m))
716 cpu_relax();
717 }
718
719 /* Write to the TCS or AMC */
720 __tcs_buffer_write(drv->reg_base, d, m, n, msg, trigger);
721
722 /* Schedule a timeout response, incase there is no actual response */
723 if (trigger)
724 schedule_tcs_err_response(resp);
725
726 spin_unlock(&tcs->tcs_m_lock[offset]);
727
728 return 0;
729}
730
731/**
732 * chan_tcs_write: Validate the incoming message and write to the
733 * appropriate TCS block.
734 *
735 * @chan: the MBOX channel
736 * @data: the tcs_mbox_msg*
737 *
738 * Returns a negative error for invalid message structure and invalid
739 * message combination, -EBUSY if there is an other active request for
740 * the channel in process, otherwise bubbles up internal error.
741 */
742static int chan_tcs_write(struct mbox_chan *chan, void *data)
743{
744 struct tcs_mbox_msg *msg = data;
745 const struct device *dev = chan->cl->dev;
746 int ret = -EINVAL;
747
748 if (!msg) {
749 dev_err(dev, "Payload error.\n");
750 goto tx_fail;
751 }
752
753 if (!msg->payload || msg->num_payload > MAX_RPMH_PAYLOAD) {
754 dev_err(dev, "Payload error.\n");
755 goto tx_fail;
756 }
757
758 if (msg->invalidate || msg->is_control) {
759 dev_err(dev, "Incorrect API.\n");
760 goto tx_fail;
761 }
762
Lina Iyer21c17882016-09-22 11:05:51 -0600763 if (msg->state != RPMH_ACTIVE_ONLY_STATE &&
764 msg->state != RPMH_AWAKE_STATE) {
Lina Iyer88a8fda2016-04-01 08:23:31 -0600765 dev_err(dev, "Incorrect API.\n");
766 goto tx_fail;
767 }
768
769 /* Read requests should always be single */
770 if (msg->is_read && msg->num_payload > 1) {
771 dev_err(dev, "Incorrect read request.\n");
772 goto tx_fail;
773 }
774
775 /* Post the message to the TCS and trigger */
776 ret = tcs_mbox_write(chan, msg, true);
777
778tx_fail:
779 if (ret) {
780 struct tcs_drv *drv = container_of(chan->mbox,
781 struct tcs_drv, mbox);
782 struct tcs_response *resp = get_response_from_pool(drv);
783
784 resp->chan = chan;
785 resp->msg = msg;
786 resp->err = ret;
787
788 dev_err(dev, "Error sending RPMH message %d\n", ret);
789 send_tcs_response(resp);
790 }
791
792 return 0;
793}
794
795static void __tcs_buffer_invalidate(void __iomem *base, int m)
796{
797 write_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0, 0);
798}
799
800static int tcs_mbox_invalidate(struct mbox_chan *chan)
801{
802 struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
803 struct tcs_mbox *tcs;
804 int m, i;
805 int inv_types[] = { WAKE_TCS, SLEEP_TCS };
806 int type = 0;
807
808 do {
809 tcs = get_tcs_of_type(drv, inv_types[type]);
810 if (IS_ERR(tcs))
811 return PTR_ERR(tcs);
812
813 spin_lock(&tcs->tcs_lock);
814 for (i = 0; i < tcs->num_tcs; i++) {
815 m = i + tcs->tcs_offset;
816 spin_lock(&tcs->tcs_m_lock[i]);
817 while (!tcs_is_free(drv->reg_base, m))
818 cpu_relax();
819 __tcs_buffer_invalidate(drv->reg_base, m);
820 spin_unlock(&tcs->tcs_m_lock[i]);
821 }
822 /* Mark the TCS as free */
823 bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
824 spin_unlock(&tcs->tcs_lock);
825 } while (++type < ARRAY_SIZE(inv_types));
826
827 return 0;
828}
829
830static void __tcs_write_hidden(void *base, int d, struct tcs_mbox_msg *msg)
831{
832 int i;
833 void __iomem *addr;
834 const u32 offset = TCS_HIDDEN_CMD0_DRV_DATA - TCS_HIDDEN_CMD0_DRV_ADDR;
835
836 addr = base + TCS_HIDDEN_CMD0_DRV_ADDR;
837 for (i = 0; i < msg->num_payload; i++) {
838 /* Only data is write capable */
839 writel_relaxed(cpu_to_le32(msg->payload[i].data),
840 addr + offset);
Lina Iyerea921442016-05-26 15:07:48 -0600841 trace_rpmh_control_msg(addr + offset, msg->payload[i].data);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600842 addr += TCS_HIDDEN_CMD_SHIFT;
843 }
844}
845
846static int tcs_control_write(struct mbox_chan *chan, struct tcs_mbox_msg *msg)
847{
848 const struct device *dev = chan->cl->dev;
849 struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
850 struct tcs_mbox *tcs;
851
852 tcs = get_tcs_of_type(drv, CONTROL_TCS);
853 if (IS_ERR(tcs))
854 return PTR_ERR(tcs);
855
856 if (msg->num_payload != tcs->ncpt) {
857 dev_err(dev, "Request must fit the control TCS size.\n");
858 return -EINVAL;
859 }
860
861 spin_lock(&tcs->tcs_lock);
862 __tcs_write_hidden(tcs->drv->base, drv->drv_id, msg);
863 spin_unlock(&tcs->tcs_lock);
864
865 return 0;
866}
867
868/**
869 * chan_tcs_ctrl_write: Write message to the controller, no ACK sent.
870 *
871 * @chan: the MBOX channel
872 * @data: the tcs_mbox_msg*
873 */
874static int chan_tcs_ctrl_write(struct mbox_chan *chan, void *data)
875{
876 struct tcs_mbox_msg *msg = data;
877 const struct device *dev = chan->cl->dev;
878 int ret = -EINVAL;
879
880 if (!msg) {
881 dev_err(dev, "Payload error.\n");
882 goto tx_done;
883 }
884
885 if (msg->num_payload > MAX_RPMH_PAYLOAD) {
886 dev_err(dev, "Payload error.\n");
887 goto tx_done;
888 }
889
890 /* Invalidate sleep/wake TCS */
891 if (msg->invalidate) {
892 ret = tcs_mbox_invalidate(chan);
893 goto tx_done;
894 }
895
896 /* Control slots are unique. They carry specific data. */
897 if (msg->is_control) {
898 ret = tcs_control_write(chan, msg);
899 goto tx_done;
900 }
901
902 if (msg->is_complete) {
903 dev_err(dev, "Incorrect ctrl request.\n");
904 goto tx_done;
905 }
906
907 /* Post the message to the TCS without trigger */
908 ret = tcs_mbox_write(chan, msg, false);
909
910tx_done:
911 return ret;
912}
913
914static int chan_init(struct mbox_chan *chan)
915{
916 return 0;
917}
918
919static void chan_shutdown(struct mbox_chan *chan)
920{ }
921
922static const struct mbox_chan_ops mbox_ops = {
923 .send_data = chan_tcs_write,
924 .send_controller_data = chan_tcs_ctrl_write,
925 .startup = chan_init,
926 .shutdown = chan_shutdown,
927};
928
929static struct mbox_chan *of_tcs_mbox_xlate(struct mbox_controller *mbox,
930 const struct of_phandle_args *sp)
931{
932 struct tcs_drv *drv = container_of(mbox, struct tcs_drv, mbox);
933 struct mbox_chan *chan;
934
935 if (drv->num_assigned >= mbox->num_chans) {
936 pr_err("TCS-Mbox out of channel memory\n");
937 return ERR_PTR(-ENOMEM);
938 }
939
940 chan = &mbox->chans[drv->num_assigned++];
941
942 return chan;
943}
944
945static int tcs_drv_probe(struct platform_device *pdev)
946{
947 struct device_node *dn = pdev->dev.of_node;
948 struct device_node *np;
949 struct tcs_drv *drv;
950 struct mbox_chan *chans;
951 struct tcs_mbox *tcs;
952 struct of_phandle_args p;
953 int irq;
954 u32 val[8] = { 0 };
955 int num_chans = 0;
956 int st = 0;
957 int i, j, ret, nelem;
958 u32 config, max_tcs, ncpt;
959
960 drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
961 if (!drv)
962 return -ENOMEM;
963
964 of_property_read_u32(dn, "qcom,drv-id", &drv->drv_id);
965
966 drv->base = of_iomap(dn, 0);
967 if (IS_ERR(drv->base))
968 return PTR_ERR(drv->base);
969
970 drv->reg_base = of_iomap(dn, 1);
971 if (IS_ERR(drv->reg_base))
972 return PTR_ERR(drv->reg_base);
973
974 config = read_drv_config(drv->base);
975 max_tcs = config & (DRV_NUM_TCS_MASK <<
976 (DRV_NUM_TCS_SHIFT * drv->drv_id));
977 max_tcs = max_tcs >> (DRV_NUM_TCS_SHIFT * drv->drv_id);
978 ncpt = config & (DRV_NCPT_MASK << DRV_NCPT_SHIFT);
979 ncpt = ncpt >> DRV_NCPT_SHIFT;
980
981 nelem = of_property_count_elems_of_size(dn, "qcom,tcs-config",
982 sizeof(u32));
983 if (!nelem || (nelem % 2) || (nelem > 2 * TCS_TYPE_NR))
984 return -EINVAL;
985
986 ret = of_property_read_u32_array(dn, "qcom,tcs-config", val, nelem);
987 if (ret)
988 return ret;
989
990 for (i = 0; i < (nelem / 2); i++) {
991 tcs = &drv->tcs[i];
992 tcs->drv = drv;
993 tcs->type = val[2 * i];
994 tcs->num_tcs = val[2 * i + 1];
995 tcs->ncpt = (tcs->type == CONTROL_TCS) ? TCS_HIDDEN_MAX_SLOTS
996 : ncpt;
997 spin_lock_init(&tcs->tcs_lock);
998
999 if (tcs->num_tcs <= 0 || tcs->type == CONTROL_TCS)
1000 continue;
1001
1002 if (tcs->num_tcs > MAX_TCS_PER_TYPE)
1003 return -EINVAL;
1004
1005 if (st > max_tcs)
1006 return -EINVAL;
1007
1008 tcs->tcs_mask = ((1 << tcs->num_tcs) - 1) << st;
1009 tcs->tcs_offset = st;
1010 st += tcs->num_tcs;
1011
1012 tcs->cmd_addr = devm_kzalloc(&pdev->dev, sizeof(u32) *
1013 tcs->num_tcs * tcs->ncpt, GFP_KERNEL);
1014 if (!tcs->cmd_addr)
1015 return -ENOMEM;
1016
1017 for (j = 0; j < tcs->num_tcs; j++)
1018 spin_lock_init(&tcs->tcs_m_lock[j]);
1019 }
1020
1021 /* Allocate only that many channels specified in DT for our MBOX */
1022 for_each_node_with_property(np, "mboxes") {
1023 if (!of_device_is_available(np))
1024 continue;
1025 i = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
1026 for (j = 0; j < i; j++) {
1027 ret = of_parse_phandle_with_args(np, "mboxes",
1028 "#mbox-cells", j, &p);
1029 if (!ret && p.np == pdev->dev.of_node)
1030 break;
1031 }
1032 num_chans++;
1033 }
1034
1035 if (!num_chans) {
1036 pr_err("%s: No clients for controller (%s)\n", __func__,
1037 dn->full_name);
1038 return -ENODEV;
1039 }
1040
1041 chans = devm_kzalloc(&pdev->dev, num_chans * sizeof(*chans),
1042 GFP_KERNEL);
1043 if (!chans)
1044 return -ENOMEM;
1045
1046 for (i = 0; i < num_chans; i++) {
1047 chans[i].mbox = &drv->mbox;
1048 chans[i].txdone_method = TXDONE_BY_IRQ;
1049 }
1050
1051 drv->mbox.dev = &pdev->dev;
1052 drv->mbox.ops = &mbox_ops;
1053 drv->mbox.chans = chans;
1054 drv->mbox.num_chans = num_chans;
1055 drv->mbox.txdone_irq = true;
1056 drv->mbox.of_xlate = of_tcs_mbox_xlate;
Lina Iyer0d81e942016-05-26 11:18:23 -06001057 drv->mbox.is_idle = tcs_drv_is_idle;
Lina Iyer88a8fda2016-04-01 08:23:31 -06001058 drv->num_tcs = st;
1059 drv->pdev = pdev;
1060
1061 ret = tcs_response_pool_init(drv);
1062 if (ret)
1063 return ret;
1064
1065 irq = of_irq_get(dn, 0);
1066 if (irq < 0)
1067 return irq;
1068
1069 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
1070 tcs_irq_handler,
1071 IRQF_ONESHOT | IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
1072 "tcs_irq", drv);
1073 if (ret)
1074 return ret;
1075
1076 /* Enable interrupts for AMC TCS */
1077 write_tcs_reg(drv->reg_base, TCS_DRV_IRQ_ENABLE, 0, 0,
1078 drv->tcs[ACTIVE_TCS].tcs_mask);
1079
1080 ret = mbox_controller_register(&drv->mbox);
1081 if (ret)
1082 return ret;
1083
1084 pr_debug("Mailbox controller (%s, drv=%d) registered\n",
1085 dn->full_name, drv->drv_id);
1086
1087 return 0;
1088}
1089
1090static const struct of_device_id tcs_drv_match[] = {
1091 { .compatible = "qcom,tcs-drv", },
1092 { }
1093};
1094
1095static struct platform_driver tcs_mbox_driver = {
1096 .probe = tcs_drv_probe,
1097 .driver = {
1098 .name = KBUILD_MODNAME,
1099 .of_match_table = tcs_drv_match,
1100 },
1101};
1102
1103static int __init tcs_mbox_driver_init(void)
1104{
1105 return platform_driver_register(&tcs_mbox_driver);
1106}
1107arch_initcall(tcs_mbox_driver_init);