blob: 96b4472a835db0d7cb2c2056c5b52bd9d99f2eaf [file] [log] [blame]
Lina Iyer73101422017-02-16 14:09:25 -07001/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
Lina Iyer88a8fda2016-04-01 08:23:31 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
15
Lina Iyerb68814f2017-04-14 12:49:07 -060016#include <linux/atomic.h>
Lina Iyer88a8fda2016-04-01 08:23:31 -060017#include <linux/bitmap.h>
Lina Iyer8bb7d5a2017-04-20 09:50:41 -060018#include <linux/delay.h>
Lina Iyer88a8fda2016-04-01 08:23:31 -060019#include <linux/interrupt.h>
20#include <linux/jiffies.h>
21#include <linux/kernel.h>
22#include <linux/list.h>
23#include <linux/mailbox_client.h> /* For dev_err */
24#include <linux/mailbox_controller.h>
25#include <linux/module.h>
26#include <linux/of.h>
27#include <linux/of_address.h>
28#include <linux/of_irq.h>
29#include <linux/platform_device.h>
30#include <linux/spinlock.h>
31#include <linux/workqueue.h>
32
33#include <asm-generic/io.h>
34
35#include <soc/qcom/tcs.h>
36
37#include <dt-bindings/soc/qcom,tcs-mbox.h>
38
39#include "mailbox.h"
40
Lina Iyerea921442016-05-26 15:07:48 -060041#define CREATE_TRACE_POINTS
42#include <trace/events/rpmh.h>
43
Lina Iyer88a8fda2016-04-01 08:23:31 -060044#define MAX_CMDS_PER_TCS 16
45#define MAX_TCS_PER_TYPE 3
46#define MAX_TCS_SLOTS (MAX_CMDS_PER_TCS * MAX_TCS_PER_TYPE)
47
48#define TCS_DRV_TCS_OFFSET 672
49#define TCS_DRV_CMD_OFFSET 20
50
51/* DRV Configuration Information Register */
52#define DRV_PRNT_CHLD_CONFIG 0x0C
53#define DRV_NUM_TCS_MASK 0x3F
54#define DRV_NUM_TCS_SHIFT 6
55#define DRV_NCPT_MASK 0x1F
56#define DRV_NCPT_SHIFT 27
57
58/* Register offsets */
59#define TCS_DRV_IRQ_ENABLE 0x00
60#define TCS_DRV_IRQ_STATUS 0x04
61#define TCS_DRV_IRQ_CLEAR 0x08
62#define TCS_DRV_CMD_WAIT_FOR_CMPL 0x10
63#define TCS_DRV_CONTROL 0x14
64#define TCS_DRV_STATUS 0x18
65#define TCS_DRV_CMD_ENABLE 0x1C
66#define TCS_DRV_CMD_MSGID 0x30
67#define TCS_DRV_CMD_ADDR 0x34
68#define TCS_DRV_CMD_DATA 0x38
69#define TCS_DRV_CMD_STATUS 0x3C
70#define TCS_DRV_CMD_RESP_DATA 0x40
71
72#define TCS_AMC_MODE_ENABLE BIT(16)
73#define TCS_AMC_MODE_TRIGGER BIT(24)
74
75/* TCS CMD register bit mask */
76#define CMD_MSGID_LEN 8
77#define CMD_MSGID_RESP_REQ BIT(8)
78#define CMD_MSGID_WRITE BIT(16)
79#define CMD_STATUS_ISSUED BIT(8)
80#define CMD_STATUS_COMPL BIT(16)
81
82/* Control/Hidden TCS */
83#define TCS_HIDDEN_MAX_SLOTS 3
Lina Iyer88a8fda2016-04-01 08:23:31 -060084#define TCS_HIDDEN_CMD0_DRV_DATA 0x38
85#define TCS_HIDDEN_CMD_SHIFT 0x08
86
87#define TCS_TYPE_NR 4
88#define TCS_MBOX_TOUT_MS 2000
89#define MAX_POOL_SIZE (MAX_TCS_PER_TYPE * TCS_TYPE_NR)
Lina Iyerc8712ca2017-04-20 00:18:49 -060090#define TCS_M_INIT 0xFFFF
Lina Iyer88a8fda2016-04-01 08:23:31 -060091
92struct tcs_drv;
93
94struct tcs_response {
95 struct tcs_drv *drv;
96 struct mbox_chan *chan;
97 struct tcs_mbox_msg *msg;
98 u32 m; /* m-th TCS */
99 struct tasklet_struct tasklet;
100 struct delayed_work dwork;
101 int err;
Lina Iyerc8712ca2017-04-20 00:18:49 -0600102 int idx;
103 bool in_use;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600104};
105
106struct tcs_response_pool {
Lina Iyerc8712ca2017-04-20 00:18:49 -0600107 struct tcs_response resp[MAX_POOL_SIZE];
Lina Iyer88a8fda2016-04-01 08:23:31 -0600108 spinlock_t lock;
109 DECLARE_BITMAP(avail, MAX_POOL_SIZE);
110};
111
112/* One per TCS type of a controller */
113struct tcs_mbox {
114 struct tcs_drv *drv;
115 u32 *cmd_addr;
116 int type;
117 u32 tcs_mask;
118 u32 tcs_offset;
119 int num_tcs;
120 int ncpt; /* num cmds per tcs */
121 DECLARE_BITMAP(slots, MAX_TCS_SLOTS);
122 spinlock_t tcs_lock; /* TCS type lock */
Lina Iyer88a8fda2016-04-01 08:23:31 -0600123};
124
125/* One per MBOX controller */
126struct tcs_drv {
Lina Iyer884981e2017-03-21 13:43:05 -0600127 const char *name;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600128 void *base; /* start address of the RSC's registers */
129 void *reg_base; /* start address for DRV specific register */
130 int drv_id;
131 struct platform_device *pdev;
132 struct mbox_controller mbox;
133 struct tcs_mbox tcs[TCS_TYPE_NR];
134 int num_assigned;
135 int num_tcs;
136 struct workqueue_struct *wq;
137 struct tcs_response_pool *resp_pool;
Lina Iyerb68814f2017-04-14 12:49:07 -0600138 atomic_t tcs_in_use[TCS_TYPE_NR * MAX_TCS_PER_TYPE];
Lina Iyer88a8fda2016-04-01 08:23:31 -0600139};
140
141static void tcs_notify_tx_done(unsigned long data);
142static void tcs_notify_timeout(struct work_struct *work);
143
144static int tcs_response_pool_init(struct tcs_drv *drv)
145{
146 struct tcs_response_pool *pool;
147 int i;
148
149 pool = devm_kzalloc(&drv->pdev->dev, sizeof(*pool), GFP_KERNEL);
150 if (!pool)
151 return -ENOMEM;
152
Lina Iyer88a8fda2016-04-01 08:23:31 -0600153 for (i = 0; i < MAX_POOL_SIZE; i++) {
154 tasklet_init(&pool->resp[i].tasklet, tcs_notify_tx_done,
155 (unsigned long) &pool->resp[i]);
Lina Iyerc8712ca2017-04-20 00:18:49 -0600156 INIT_DELAYED_WORK(&pool->resp[i].dwork, tcs_notify_timeout);
157 pool->resp[i].drv = drv;
158 pool->resp[i].idx = i;
159 pool->resp[i].m = TCS_M_INIT;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600160 }
161
162 spin_lock_init(&pool->lock);
163 drv->resp_pool = pool;
164
165 return 0;
166}
167
Lina Iyerc8712ca2017-04-20 00:18:49 -0600168static struct tcs_response *setup_response(struct tcs_drv *drv,
169 struct tcs_mbox_msg *msg, struct mbox_chan *chan,
170 u32 m, int err)
Lina Iyer88a8fda2016-04-01 08:23:31 -0600171{
172 struct tcs_response_pool *pool = drv->resp_pool;
173 struct tcs_response *resp = ERR_PTR(-ENOMEM);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600174 int pos;
175
Lina Iyerc8712ca2017-04-20 00:18:49 -0600176 spin_lock(&pool->lock);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600177 pos = find_first_zero_bit(pool->avail, MAX_POOL_SIZE);
178 if (pos != MAX_POOL_SIZE) {
179 bitmap_set(pool->avail, pos, 1);
180 resp = &pool->resp[pos];
Lina Iyerc8712ca2017-04-20 00:18:49 -0600181 resp->chan = chan;
182 resp->msg = msg;
183 resp->m = m;
184 resp->err = err;
185 resp->in_use = false;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600186 }
Lina Iyerc8712ca2017-04-20 00:18:49 -0600187 spin_unlock(&pool->lock);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600188
189 return resp;
190}
191
Lina Iyerc8712ca2017-04-20 00:18:49 -0600192static void free_response(struct tcs_response *resp)
Lina Iyer88a8fda2016-04-01 08:23:31 -0600193{
194 struct tcs_response_pool *pool = resp->drv->resp_pool;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600195
Lina Iyerc8712ca2017-04-20 00:18:49 -0600196 spin_lock(&pool->lock);
197 resp->err = -EINVAL;
198 bitmap_clear(pool->avail, resp->idx, 1);
199 spin_unlock(&pool->lock);
200}
201
202static inline struct tcs_response *get_response(struct tcs_drv *drv, u32 m)
203{
204 struct tcs_response_pool *pool = drv->resp_pool;
205 struct tcs_response *resp = NULL;
206 int pos = 0;
207
208 do {
209 pos = find_next_bit(pool->avail, MAX_POOL_SIZE, pos);
210 if (pos == MAX_POOL_SIZE)
211 break;
212 resp = &pool->resp[pos];
213 if (resp->m == m && !resp->in_use) {
214 resp->in_use = true;
215 break;
216 }
217 pos++;
218 } while (1);
219
220 return resp;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600221}
222
223static inline u32 read_drv_config(void __iomem *base)
224{
225 return le32_to_cpu(readl_relaxed(base + DRV_PRNT_CHLD_CONFIG));
226}
227
228static inline u32 read_tcs_reg(void __iomem *base, int reg, int m, int n)
229{
230 return le32_to_cpu(readl_relaxed(base + reg +
231 TCS_DRV_TCS_OFFSET * m + TCS_DRV_CMD_OFFSET * n));
232}
233
234static inline void write_tcs_reg(void __iomem *base, int reg, int m, int n,
235 u32 data)
236{
237 writel_relaxed(cpu_to_le32(data), base + reg +
238 TCS_DRV_TCS_OFFSET * m + TCS_DRV_CMD_OFFSET * n);
239}
240
241static inline void write_tcs_reg_sync(void __iomem *base, int reg, int m, int n,
242 u32 data)
243{
244 do {
245 write_tcs_reg(base, reg, m, n, data);
246 if (data == read_tcs_reg(base, reg, m, n))
247 break;
Lina Iyer8bb7d5a2017-04-20 09:50:41 -0600248 udelay(1);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600249 } while (1);
250}
251
Lina Iyerb68814f2017-04-14 12:49:07 -0600252static inline bool tcs_is_free(struct tcs_drv *drv, int m)
Lina Iyer88a8fda2016-04-01 08:23:31 -0600253{
Lina Iyerb68814f2017-04-14 12:49:07 -0600254 void __iomem *base = drv->reg_base;
255
256 return read_tcs_reg(base, TCS_DRV_STATUS, m, 0) &&
257 !atomic_read(&drv->tcs_in_use[m]);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600258}
259
260static inline struct tcs_mbox *get_tcs_from_index(struct tcs_drv *drv, int m)
261{
262 struct tcs_mbox *tcs;
263 int i;
264
265 for (i = 0; i < TCS_TYPE_NR; i++) {
266 tcs = &drv->tcs[i];
267 if (tcs->tcs_mask & BIT(m))
268 break;
269 }
270
271 if (i == TCS_TYPE_NR)
272 tcs = NULL;
273
274 return tcs;
275}
276
277static inline struct tcs_mbox *get_tcs_of_type(struct tcs_drv *drv, int type)
278{
279 int i;
280 struct tcs_mbox *tcs;
281
282 for (i = 0; i < TCS_TYPE_NR; i++)
283 if (type == drv->tcs[i].type)
284 break;
285
286 if (i == TCS_TYPE_NR)
287 return ERR_PTR(-EINVAL);
288
289 tcs = &drv->tcs[i];
290 if (!tcs->num_tcs)
291 return ERR_PTR(-EINVAL);
292
293 return tcs;
294}
295
296static inline struct tcs_mbox *get_tcs_for_msg(struct tcs_drv *drv,
297 struct tcs_mbox_msg *msg)
298{
299 int type = -1;
300
301 /* Which box are we dropping this in and do we trigger the TCS */
302 switch (msg->state) {
303 case RPMH_SLEEP_STATE:
304 type = SLEEP_TCS;
305 break;
306 case RPMH_WAKE_ONLY_STATE:
307 type = WAKE_TCS;
308 break;
309 case RPMH_ACTIVE_ONLY_STATE:
310 type = ACTIVE_TCS;
311 break;
Lina Iyer21c17882016-09-22 11:05:51 -0600312 case RPMH_AWAKE_STATE:
313 /*
314 * Awake state is only used when the DRV has no separate
315 * TCS for ACTIVE requests. Switch to WAKE TCS to send
316 * active votes. Otherwise, the caller should be explicit
317 * about the state.
318 */
319 if (IS_ERR(get_tcs_of_type(drv, ACTIVE_TCS)))
320 type = WAKE_TCS;
321 break;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600322 }
323
324 if (msg->is_read)
325 type = ACTIVE_TCS;
326
327 if (type < 0)
328 return ERR_PTR(-EINVAL);
329
330 return get_tcs_of_type(drv, type);
331}
332
Lina Iyer88a8fda2016-04-01 08:23:31 -0600333static inline void send_tcs_response(struct tcs_response *resp)
334{
335 tasklet_schedule(&resp->tasklet);
336}
337
338static inline void schedule_tcs_err_response(struct tcs_response *resp)
339{
340 schedule_delayed_work(&resp->dwork, msecs_to_jiffies(TCS_MBOX_TOUT_MS));
341}
342
343/**
344 * tcs_irq_handler: TX Done / Recv data handler
345 */
346static irqreturn_t tcs_irq_handler(int irq, void *p)
347{
348 struct tcs_drv *drv = p;
349 void __iomem *base = drv->reg_base;
350 int m, i;
351 u32 irq_status, sts;
Lina Iyer21c17882016-09-22 11:05:51 -0600352 struct tcs_mbox *tcs;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600353 struct tcs_response *resp;
Lina Iyer7846e212017-03-22 10:35:53 -0600354 struct tcs_cmd *cmd;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600355 u32 data;
356
357 /* Know which TCSes were triggered */
358 irq_status = read_tcs_reg(base, TCS_DRV_IRQ_STATUS, 0, 0);
359
360 for (m = 0; irq_status >= BIT(m); m++) {
361 if (!(irq_status & BIT(m)))
362 continue;
363
Lina Iyerc8712ca2017-04-20 00:18:49 -0600364 resp = get_response(drv, m);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600365 if (!resp) {
366 pr_err("No resp request for TCS-%d\n", m);
367 continue;
368 }
369
370 cancel_delayed_work(&resp->dwork);
371
Lina Iyer21c17882016-09-22 11:05:51 -0600372 tcs = get_tcs_from_index(drv, m);
373 if (!tcs) {
374 pr_err("TCS-%d doesn't exist in DRV\n", m);
375 continue;
376 }
Lina Iyer88a8fda2016-04-01 08:23:31 -0600377
378 /* Check if all commands were completed */
379 resp->err = 0;
380 for (i = 0; i < resp->msg->num_payload; i++) {
Lina Iyer7846e212017-03-22 10:35:53 -0600381 cmd = &resp->msg->payload[i];
Lina Iyer88a8fda2016-04-01 08:23:31 -0600382 sts = read_tcs_reg(base, TCS_DRV_CMD_STATUS, m, i);
Lina Iyer7846e212017-03-22 10:35:53 -0600383 if ((!(sts & CMD_STATUS_ISSUED)) ||
384 ((resp->msg->is_complete || cmd->complete) &&
385 (!(sts & CMD_STATUS_COMPL))))
Lina Iyer88a8fda2016-04-01 08:23:31 -0600386 resp->err = -EIO;
387 }
388
389 /* Check for response if this was a read request */
390 if (resp->msg->is_read) {
391 /* Respond the data back in the same req data */
392 data = read_tcs_reg(base, TCS_DRV_CMD_RESP_DATA, m, 0);
393 resp->msg->payload[0].data = data;
394 mbox_chan_received_data(resp->chan, resp->msg);
395 }
396
Lina Iyer884981e2017-03-21 13:43:05 -0600397 trace_rpmh_notify_irq(drv->name, m, resp->msg->payload[0].addr,
398 resp->err);
Lina Iyerea921442016-05-26 15:07:48 -0600399
Lina Iyer7846e212017-03-22 10:35:53 -0600400 /* Clear the AMC mode for non-ACTIVE TCSes */
401 if (tcs->type != ACTIVE_TCS) {
402 data = read_tcs_reg(base, TCS_DRV_CONTROL, m, 0);
403 data &= ~TCS_AMC_MODE_ENABLE;
404 write_tcs_reg(base, TCS_DRV_CONTROL, m, 0, data);
405 } else {
406 /* Clear the enable bit for the commands */
407 write_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0, 0);
408 }
409
Lina Iyerc8712ca2017-04-20 00:18:49 -0600410 /* Clear the TCS IRQ status */
411 write_tcs_reg(base, TCS_DRV_IRQ_CLEAR, 0, 0, BIT(m));
412
413 /* Clean up response object and notify mbox in tasklet */
Lina Iyer88a8fda2016-04-01 08:23:31 -0600414 send_tcs_response(resp);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600415
Lina Iyerc8712ca2017-04-20 00:18:49 -0600416 /* Notify the client that this request is completed. */
Lina Iyerb68814f2017-04-14 12:49:07 -0600417 atomic_set(&drv->tcs_in_use[m], 0);
418 }
419
Lina Iyer88a8fda2016-04-01 08:23:31 -0600420 return IRQ_HANDLED;
421}
422
423static inline void mbox_notify_tx_done(struct mbox_chan *chan,
424 struct tcs_mbox_msg *msg, int m, int err)
425{
Lina Iyer884981e2017-03-21 13:43:05 -0600426 struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
427
428 trace_rpmh_notify(drv->name, m, msg->payload[0].addr, err);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600429 mbox_chan_txdone(chan, err);
430}
431
432/**
433 * tcs_notify_tx_done: TX Done for requests that do not trigger TCS
434 */
435static void tcs_notify_tx_done(unsigned long data)
436{
437 struct tcs_response *resp = (struct tcs_response *) data;
438 struct mbox_chan *chan = resp->chan;
439 struct tcs_mbox_msg *msg = resp->msg;
440 int err = resp->err;
441 int m = resp->m;
442
Lina Iyer88a8fda2016-04-01 08:23:31 -0600443 mbox_notify_tx_done(chan, msg, m, err);
Lina Iyerc8712ca2017-04-20 00:18:49 -0600444 free_response(resp);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600445}
446
447/**
448 * tcs_notify_timeout: TX Done for requests that do trigger TCS, but
449 * we do not get a response IRQ back.
450 */
451static void tcs_notify_timeout(struct work_struct *work)
452{
453 struct delayed_work *dwork = to_delayed_work(work);
454 struct tcs_response *resp = container_of(dwork,
455 struct tcs_response, dwork);
456 struct mbox_chan *chan = resp->chan;
457 struct tcs_mbox_msg *msg = resp->msg;
458 struct tcs_drv *drv = resp->drv;
459 int m = resp->m;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600460
461 /*
462 * In case the RPMH resource fails to respond to the completion
463 * request, the TCS would be blocked forever waiting on the response.
464 * There is no way to recover from this case.
465 */
Lina Iyerb68814f2017-04-14 12:49:07 -0600466 if (!tcs_is_free(drv, m)) {
Lina Iyer88a8fda2016-04-01 08:23:31 -0600467 bool pending = false;
468 struct tcs_cmd *cmd;
469 int i;
470 u32 addr;
471
472 for (i = 0; i < msg->num_payload; i++) {
473 cmd = &msg->payload[i];
474 addr = read_tcs_reg(drv->reg_base, TCS_DRV_CMD_ADDR,
475 m, i);
Lina Iyerc8712ca2017-04-20 00:18:49 -0600476 pending |= (cmd->addr == addr);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600477 }
478 if (pending) {
479 pr_err("TCS-%d blocked waiting for RPMH to respond.\n",
480 m);
481 for (i = 0; i < msg->num_payload; i++)
482 pr_err("Addr: 0x%x Data: 0x%x\n",
483 msg->payload[i].addr,
484 msg->payload[i].data);
485 BUG();
486 }
487 }
488
Lina Iyer693cdec2017-04-05 12:17:03 -0600489 mbox_notify_tx_done(chan, msg, -1, -ETIMEDOUT);
Lina Iyerc8712ca2017-04-20 00:18:49 -0600490 free_response(resp);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600491}
492
Lina Iyer884981e2017-03-21 13:43:05 -0600493static void __tcs_buffer_write(struct tcs_drv *drv, int d, int m, int n,
Lina Iyer88a8fda2016-04-01 08:23:31 -0600494 struct tcs_mbox_msg *msg, bool trigger)
495{
Lina Iyer7846e212017-03-22 10:35:53 -0600496 u32 msgid, cmd_msgid = 0;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600497 u32 cmd_enable = 0;
498 u32 cmd_complete;
499 u32 enable = TCS_AMC_MODE_ENABLE;
500 struct tcs_cmd *cmd;
501 int i;
Lina Iyer884981e2017-03-21 13:43:05 -0600502 void __iomem *base = drv->reg_base;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600503
504 /* We have homologous command set i.e pure read or write, not a mix */
505 cmd_msgid = CMD_MSGID_LEN;
506 cmd_msgid |= (msg->is_complete) ? CMD_MSGID_RESP_REQ : 0;
507 cmd_msgid |= (!msg->is_read) ? CMD_MSGID_WRITE : 0;
508
509 /* Read the send-after-prev complete flag for those already in TCS */
510 cmd_complete = read_tcs_reg(base, TCS_DRV_CMD_WAIT_FOR_CMPL, m, 0);
511
512 for (i = 0; i < msg->num_payload; i++) {
513 cmd = &msg->payload[i];
514 cmd_enable |= BIT(n + i);
515 cmd_complete |= cmd->complete << (n + i);
Lina Iyer7846e212017-03-22 10:35:53 -0600516 msgid = cmd_msgid;
517 msgid |= (cmd->complete) ? CMD_MSGID_RESP_REQ : 0;
518 write_tcs_reg(base, TCS_DRV_CMD_MSGID, m, n + i, msgid);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600519 write_tcs_reg(base, TCS_DRV_CMD_ADDR, m, n + i, cmd->addr);
520 write_tcs_reg(base, TCS_DRV_CMD_DATA, m, n + i, cmd->data);
Lina Iyer7846e212017-03-22 10:35:53 -0600521 trace_rpmh_send_msg(drv->name, m, n + i, msgid, cmd->addr,
Lina Iyer884981e2017-03-21 13:43:05 -0600522 cmd->data, cmd->complete, trigger);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600523 }
524
525 /* Write the send-after-prev completion bits for the batch */
526 write_tcs_reg(base, TCS_DRV_CMD_WAIT_FOR_CMPL, m, 0, cmd_complete);
527
528 /* Enable the new commands in TCS */
529 cmd_enable |= read_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0);
530 write_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0, cmd_enable);
531
532 if (trigger) {
Lina Iyer88a8fda2016-04-01 08:23:31 -0600533 /* HW req: Clear the DRV_CONTROL and enable TCS again */
534 write_tcs_reg_sync(base, TCS_DRV_CONTROL, m, 0, 0);
535 write_tcs_reg_sync(base, TCS_DRV_CONTROL, m, 0, enable);
536 /* Enable the AMC mode on the TCS */
537 enable |= TCS_AMC_MODE_TRIGGER;
538 write_tcs_reg_sync(base, TCS_DRV_CONTROL, m, 0, enable);
539 }
540}
541
Lina Iyer0d81e942016-05-26 11:18:23 -0600542/**
543 * tcs_drv_is_idle: Check if any of the AMCs are busy.
544 *
545 * @mbox: The mailbox controller.
546 *
547 * Returns true if the AMCs are not engaged or absent.
548 */
549static bool tcs_drv_is_idle(struct mbox_controller *mbox)
550{
551 int m;
552 struct tcs_drv *drv = container_of(mbox, struct tcs_drv, mbox);
553 struct tcs_mbox *tcs = get_tcs_of_type(drv, ACTIVE_TCS);
554
Lina Iyer21c17882016-09-22 11:05:51 -0600555 /* Check for WAKE TCS if there are no ACTIVE TCS */
Lina Iyer0d81e942016-05-26 11:18:23 -0600556 if (IS_ERR(tcs))
Lina Iyer21c17882016-09-22 11:05:51 -0600557 tcs = get_tcs_of_type(drv, WAKE_TCS);
Lina Iyer0d81e942016-05-26 11:18:23 -0600558
559 for (m = tcs->tcs_offset; m < tcs->tcs_offset + tcs->num_tcs; m++)
Lina Iyerb68814f2017-04-14 12:49:07 -0600560 if (!tcs_is_free(drv, m))
Lina Iyer0d81e942016-05-26 11:18:23 -0600561 return false;
562
563 return true;
564}
565
Lina Iyer88a8fda2016-04-01 08:23:31 -0600566static void wait_for_req_inflight(struct tcs_drv *drv, struct tcs_mbox *tcs,
567 struct tcs_mbox_msg *msg)
568{
569 u32 curr_enabled;
570 int i, j, k;
571 bool is_free;
572
573 do {
574 is_free = true;
575 for (i = 1; i > tcs->tcs_mask; i = i << 1) {
576 if (!(tcs->tcs_mask & i))
577 continue;
Lina Iyerb68814f2017-04-14 12:49:07 -0600578 if (tcs_is_free(drv, i))
Lina Iyer88a8fda2016-04-01 08:23:31 -0600579 continue;
580 curr_enabled = read_tcs_reg(drv->reg_base,
581 TCS_DRV_CMD_ENABLE, i, 0);
582 for (j = 0; j < msg->num_payload; j++) {
583 for (k = 0; k < curr_enabled; k++) {
584 if (!(curr_enabled & BIT(k)))
585 continue;
586 if (tcs->cmd_addr[k] ==
587 msg->payload[j].addr) {
588 is_free = false;
589 goto retry;
590 }
591 }
592 }
593 }
594retry:
595 if (!is_free)
Lina Iyer8bb7d5a2017-04-20 09:50:41 -0600596 udelay(1);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600597 } while (!is_free);
598}
599
600static int find_free_tcs(struct tcs_mbox *tcs)
601{
602 int slot, m = 0;
Lina Iyerc8712ca2017-04-20 00:18:49 -0600603 u32 irq_status;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600604
605 /* Loop until we find a free AMC */
606 do {
Lina Iyerb68814f2017-04-14 12:49:07 -0600607 if (tcs_is_free(tcs->drv, tcs->tcs_offset + m)) {
Lina Iyer88a8fda2016-04-01 08:23:31 -0600608 slot = m * tcs->ncpt;
609 break;
610 }
Lina Iyerc8712ca2017-04-20 00:18:49 -0600611 if (++m >= tcs->num_tcs) {
Lina Iyer88a8fda2016-04-01 08:23:31 -0600612 m = 0;
Lina Iyerc8712ca2017-04-20 00:18:49 -0600613 irq_status = read_tcs_reg(tcs->drv->reg_base,
614 TCS_DRV_IRQ_STATUS, 0, 0);
615 WARN((irq_status & tcs->tcs_mask && in_irq()),
616 "TCS busy. Request should not be made from hard IRQ context.");
617 udelay(10);
618 }
Lina Iyer88a8fda2016-04-01 08:23:31 -0600619 } while (1);
620
621 return slot;
622}
623
624static int find_match(struct tcs_mbox *tcs, struct tcs_cmd *cmd, int len)
625{
626 bool found = false;
627 int i = 0, j;
628
629 /* Check for already cached commands */
630 while ((i = find_next_bit(tcs->slots, MAX_TCS_SLOTS, i)) <
631 MAX_TCS_SLOTS) {
632 if (tcs->cmd_addr[i] != cmd[0].addr) {
633 i++;
634 continue;
635 }
636 /* sanity check to ensure the seq is same */
637 for (j = 1; j < len; j++) {
638 WARN((tcs->cmd_addr[i + j] != cmd[j].addr),
639 "Message does not match previous sequence.\n");
640 return -EINVAL;
641 }
642 found = true;
643 break;
644 }
645
646 return found ? i : -1;
647}
648
649static int find_slots(struct tcs_mbox *tcs, struct tcs_mbox_msg *msg)
650{
651 int slot;
652 int n = 0;
653
654 /* For active requests find the first free AMC. */
655 if (tcs->type == ACTIVE_TCS)
656 return find_free_tcs(tcs);
657
658 /* Find if we already have the msg in our TCS */
659 slot = find_match(tcs, msg->payload, msg->num_payload);
660 if (slot >= 0)
661 return slot;
662
663 /* Do over, until we can fit the full payload in a TCS */
664 do {
665 slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS,
666 n, msg->num_payload, 0);
667 if (slot == MAX_TCS_SLOTS)
668 break;
669 n += tcs->ncpt;
670 } while (slot + msg->num_payload - 1 >= n);
671
672 return (slot != MAX_TCS_SLOTS) ? slot : -ENOMEM;
673}
674
Lina Iyer88a8fda2016-04-01 08:23:31 -0600675static int tcs_mbox_write(struct mbox_chan *chan, struct tcs_mbox_msg *msg,
676 bool trigger)
677{
678 const struct device *dev = chan->cl->dev;
679 struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
680 int d = drv->drv_id;
681 struct tcs_mbox *tcs;
682 int i, slot, offset, m, n;
Channagoud Kadabi075db3b2017-03-16 14:26:17 -0700683 struct tcs_response *resp = NULL;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600684
685 tcs = get_tcs_for_msg(drv, msg);
686 if (IS_ERR(tcs))
687 return PTR_ERR(tcs);
688
Lina Iyerc8712ca2017-04-20 00:18:49 -0600689 if (trigger)
690 resp = setup_response(drv, msg, chan, TCS_M_INIT, 0);
691
Lina Iyer88a8fda2016-04-01 08:23:31 -0600692 /* Identify the sequential slots that we can write to */
693 spin_lock(&tcs->tcs_lock);
694 slot = find_slots(tcs, msg);
695 if (slot < 0) {
696 dev_err(dev, "No TCS slot found.\n");
697 spin_unlock(&tcs->tcs_lock);
Lina Iyerc8712ca2017-04-20 00:18:49 -0600698 if (resp)
699 free_response(resp);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600700 return slot;
701 }
Lina Iyerc8712ca2017-04-20 00:18:49 -0600702
Lina Iyer88a8fda2016-04-01 08:23:31 -0600703 /* Mark the slots as in-use, before we unlock */
704 if (tcs->type == SLEEP_TCS || tcs->type == WAKE_TCS)
705 bitmap_set(tcs->slots, slot, msg->num_payload);
706
707 /* Copy the addresses of the resources over to the slots */
708 for (i = 0; tcs->cmd_addr && i < msg->num_payload; i++)
709 tcs->cmd_addr[slot + i] = msg->payload[i].addr;
710
Lina Iyer88a8fda2016-04-01 08:23:31 -0600711 offset = slot / tcs->ncpt;
712 m = offset + tcs->tcs_offset;
713 n = slot % tcs->ncpt;
714
Lina Iyerc8712ca2017-04-20 00:18:49 -0600715 /* Block, if we have an address from the msg in flight */
Lina Iyer88a8fda2016-04-01 08:23:31 -0600716 if (trigger) {
Lina Iyerc8712ca2017-04-20 00:18:49 -0600717 resp->m = m;
718 /* Mark the TCS as busy */
719 atomic_set(&drv->tcs_in_use[m], 1);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600720 wait_for_req_inflight(drv, tcs, msg);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600721 }
722
723 /* Write to the TCS or AMC */
Lina Iyer884981e2017-03-21 13:43:05 -0600724 __tcs_buffer_write(drv, d, m, n, msg, trigger);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600725
726 /* Schedule a timeout response, incase there is no actual response */
727 if (trigger)
728 schedule_tcs_err_response(resp);
729
Lina Iyerc8712ca2017-04-20 00:18:49 -0600730 spin_unlock(&tcs->tcs_lock);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600731
732 return 0;
733}
734
Lina Iyer7846e212017-03-22 10:35:53 -0600735static void __tcs_buffer_invalidate(void __iomem *base, int m)
736{
737 write_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0, 0);
738}
739
740static int tcs_mbox_invalidate(struct mbox_chan *chan)
741{
742 struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
743 struct tcs_mbox *tcs;
744 int m, i;
745 int inv_types[] = { WAKE_TCS, SLEEP_TCS };
746 int type = 0;
747
748 do {
749 tcs = get_tcs_of_type(drv, inv_types[type]);
750 if (IS_ERR(tcs))
751 return PTR_ERR(tcs);
752
753 spin_lock(&tcs->tcs_lock);
754 for (i = 0; i < tcs->num_tcs; i++) {
755 m = i + tcs->tcs_offset;
Lina Iyerb68814f2017-04-14 12:49:07 -0600756 while (!tcs_is_free(drv, m))
Lina Iyer8bb7d5a2017-04-20 09:50:41 -0600757 udelay(1);
Lina Iyer7846e212017-03-22 10:35:53 -0600758 __tcs_buffer_invalidate(drv->reg_base, m);
Lina Iyer7846e212017-03-22 10:35:53 -0600759 }
760 /* Mark the TCS as free */
761 bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
762 spin_unlock(&tcs->tcs_lock);
763 } while (++type < ARRAY_SIZE(inv_types));
764
765 return 0;
766}
767
Lina Iyer88a8fda2016-04-01 08:23:31 -0600768/**
769 * chan_tcs_write: Validate the incoming message and write to the
770 * appropriate TCS block.
771 *
772 * @chan: the MBOX channel
773 * @data: the tcs_mbox_msg*
774 *
775 * Returns a negative error for invalid message structure and invalid
776 * message combination, -EBUSY if there is an other active request for
777 * the channel in process, otherwise bubbles up internal error.
778 */
779static int chan_tcs_write(struct mbox_chan *chan, void *data)
780{
781 struct tcs_mbox_msg *msg = data;
782 const struct device *dev = chan->cl->dev;
783 int ret = -EINVAL;
784
785 if (!msg) {
786 dev_err(dev, "Payload error.\n");
787 goto tx_fail;
788 }
789
790 if (!msg->payload || msg->num_payload > MAX_RPMH_PAYLOAD) {
791 dev_err(dev, "Payload error.\n");
792 goto tx_fail;
793 }
794
795 if (msg->invalidate || msg->is_control) {
796 dev_err(dev, "Incorrect API.\n");
797 goto tx_fail;
798 }
799
Lina Iyer21c17882016-09-22 11:05:51 -0600800 if (msg->state != RPMH_ACTIVE_ONLY_STATE &&
801 msg->state != RPMH_AWAKE_STATE) {
Lina Iyer88a8fda2016-04-01 08:23:31 -0600802 dev_err(dev, "Incorrect API.\n");
803 goto tx_fail;
804 }
805
806 /* Read requests should always be single */
807 if (msg->is_read && msg->num_payload > 1) {
808 dev_err(dev, "Incorrect read request.\n");
809 goto tx_fail;
810 }
811
Lina Iyer7846e212017-03-22 10:35:53 -0600812 /*
813 * Since we are re-purposing the wake TCS, invalidate previous
814 * contents to avoid confusion.
815 */
816 if (msg->state == RPMH_AWAKE_STATE)
817 tcs_mbox_invalidate(chan);
818
Lina Iyer88a8fda2016-04-01 08:23:31 -0600819 /* Post the message to the TCS and trigger */
820 ret = tcs_mbox_write(chan, msg, true);
821
822tx_fail:
823 if (ret) {
824 struct tcs_drv *drv = container_of(chan->mbox,
Lina Iyerc8712ca2017-04-20 00:18:49 -0600825 struct tcs_drv, mbox);
826 struct tcs_response *resp = setup_response(
827 drv, msg, chan, TCS_M_INIT, ret);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600828
829 dev_err(dev, "Error sending RPMH message %d\n", ret);
830 send_tcs_response(resp);
831 }
832
833 return 0;
834}
835
Lina Iyer884981e2017-03-21 13:43:05 -0600836static void __tcs_write_hidden(struct tcs_drv *drv, int d,
837 struct tcs_mbox_msg *msg)
Lina Iyer88a8fda2016-04-01 08:23:31 -0600838{
839 int i;
Lina Iyer884981e2017-03-21 13:43:05 -0600840 void __iomem *addr = drv->base + TCS_HIDDEN_CMD0_DRV_DATA;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600841
Lina Iyer88a8fda2016-04-01 08:23:31 -0600842 for (i = 0; i < msg->num_payload; i++) {
843 /* Only data is write capable */
Lina Iyerd93fbce2017-03-01 09:07:20 -0700844 writel_relaxed(cpu_to_le32(msg->payload[i].data), addr);
Lina Iyer884981e2017-03-21 13:43:05 -0600845 trace_rpmh_control_msg(drv->name, msg->payload[i].data);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600846 addr += TCS_HIDDEN_CMD_SHIFT;
847 }
848}
849
850static int tcs_control_write(struct mbox_chan *chan, struct tcs_mbox_msg *msg)
851{
852 const struct device *dev = chan->cl->dev;
853 struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
854 struct tcs_mbox *tcs;
855
856 tcs = get_tcs_of_type(drv, CONTROL_TCS);
857 if (IS_ERR(tcs))
858 return PTR_ERR(tcs);
859
860 if (msg->num_payload != tcs->ncpt) {
861 dev_err(dev, "Request must fit the control TCS size.\n");
862 return -EINVAL;
863 }
864
865 spin_lock(&tcs->tcs_lock);
Lina Iyer884981e2017-03-21 13:43:05 -0600866 __tcs_write_hidden(tcs->drv, drv->drv_id, msg);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600867 spin_unlock(&tcs->tcs_lock);
868
869 return 0;
870}
871
872/**
873 * chan_tcs_ctrl_write: Write message to the controller, no ACK sent.
874 *
875 * @chan: the MBOX channel
876 * @data: the tcs_mbox_msg*
877 */
878static int chan_tcs_ctrl_write(struct mbox_chan *chan, void *data)
879{
880 struct tcs_mbox_msg *msg = data;
881 const struct device *dev = chan->cl->dev;
882 int ret = -EINVAL;
883
884 if (!msg) {
885 dev_err(dev, "Payload error.\n");
886 goto tx_done;
887 }
888
889 if (msg->num_payload > MAX_RPMH_PAYLOAD) {
890 dev_err(dev, "Payload error.\n");
891 goto tx_done;
892 }
893
894 /* Invalidate sleep/wake TCS */
895 if (msg->invalidate) {
896 ret = tcs_mbox_invalidate(chan);
897 goto tx_done;
898 }
899
900 /* Control slots are unique. They carry specific data. */
901 if (msg->is_control) {
902 ret = tcs_control_write(chan, msg);
903 goto tx_done;
904 }
905
Lina Iyer88a8fda2016-04-01 08:23:31 -0600906 /* Post the message to the TCS without trigger */
907 ret = tcs_mbox_write(chan, msg, false);
908
909tx_done:
910 return ret;
911}
912
913static int chan_init(struct mbox_chan *chan)
914{
915 return 0;
916}
917
918static void chan_shutdown(struct mbox_chan *chan)
919{ }
920
921static const struct mbox_chan_ops mbox_ops = {
922 .send_data = chan_tcs_write,
923 .send_controller_data = chan_tcs_ctrl_write,
924 .startup = chan_init,
925 .shutdown = chan_shutdown,
926};
927
928static struct mbox_chan *of_tcs_mbox_xlate(struct mbox_controller *mbox,
929 const struct of_phandle_args *sp)
930{
931 struct tcs_drv *drv = container_of(mbox, struct tcs_drv, mbox);
932 struct mbox_chan *chan;
933
934 if (drv->num_assigned >= mbox->num_chans) {
935 pr_err("TCS-Mbox out of channel memory\n");
936 return ERR_PTR(-ENOMEM);
937 }
938
939 chan = &mbox->chans[drv->num_assigned++];
940
941 return chan;
942}
943
944static int tcs_drv_probe(struct platform_device *pdev)
945{
946 struct device_node *dn = pdev->dev.of_node;
947 struct device_node *np;
948 struct tcs_drv *drv;
949 struct mbox_chan *chans;
950 struct tcs_mbox *tcs;
951 struct of_phandle_args p;
952 int irq;
953 u32 val[8] = { 0 };
954 int num_chans = 0;
955 int st = 0;
956 int i, j, ret, nelem;
957 u32 config, max_tcs, ncpt;
Lina Iyer73101422017-02-16 14:09:25 -0700958 int tcs_type_count[TCS_TYPE_NR] = { 0 };
959 struct resource *res;
Lina Iyer7846e212017-03-22 10:35:53 -0600960 u32 irq_mask;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600961
962 drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
963 if (!drv)
964 return -ENOMEM;
965
Lina Iyer73101422017-02-16 14:09:25 -0700966 ret = of_property_read_u32(dn, "qcom,drv-id", &drv->drv_id);
967 if (ret)
968 return ret;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600969
Lina Iyer73101422017-02-16 14:09:25 -0700970 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
971 if (!res)
972 return -EINVAL;
973 drv->base = devm_ioremap_resource(&pdev->dev, res);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600974 if (IS_ERR(drv->base))
975 return PTR_ERR(drv->base);
976
Lina Iyer73101422017-02-16 14:09:25 -0700977 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
978 if (!res)
979 return -EINVAL;
980 drv->reg_base = devm_ioremap_resource(&pdev->dev, res);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600981 if (IS_ERR(drv->reg_base))
982 return PTR_ERR(drv->reg_base);
983
984 config = read_drv_config(drv->base);
985 max_tcs = config & (DRV_NUM_TCS_MASK <<
986 (DRV_NUM_TCS_SHIFT * drv->drv_id));
987 max_tcs = max_tcs >> (DRV_NUM_TCS_SHIFT * drv->drv_id);
988 ncpt = config & (DRV_NCPT_MASK << DRV_NCPT_SHIFT);
989 ncpt = ncpt >> DRV_NCPT_SHIFT;
990
991 nelem = of_property_count_elems_of_size(dn, "qcom,tcs-config",
992 sizeof(u32));
993 if (!nelem || (nelem % 2) || (nelem > 2 * TCS_TYPE_NR))
994 return -EINVAL;
995
996 ret = of_property_read_u32_array(dn, "qcom,tcs-config", val, nelem);
997 if (ret)
998 return ret;
999
Lina Iyer73101422017-02-16 14:09:25 -07001000 /* Ensure we have exactly not more than one of each type in DT */
Lina Iyer88a8fda2016-04-01 08:23:31 -06001001 for (i = 0; i < (nelem / 2); i++) {
Lina Iyer73101422017-02-16 14:09:25 -07001002 if (val[2 * i] >= TCS_TYPE_NR)
1003 return -EINVAL;
1004 tcs_type_count[val[2 * i]]++;
1005 if (tcs_type_count[val[2 * i]] > 1)
1006 return -EINVAL;
1007 }
1008
1009 /* Ensure we have each type specified in DT */
1010 for (i = 0; i < ARRAY_SIZE(tcs_type_count); i++)
1011 if (!tcs_type_count[i])
1012 return -EINVAL;
1013
1014 for (i = 0; i < (nelem / 2); i++) {
1015 tcs = &drv->tcs[val[2 * i]];
Lina Iyer88a8fda2016-04-01 08:23:31 -06001016 tcs->drv = drv;
1017 tcs->type = val[2 * i];
1018 tcs->num_tcs = val[2 * i + 1];
1019 tcs->ncpt = (tcs->type == CONTROL_TCS) ? TCS_HIDDEN_MAX_SLOTS
1020 : ncpt;
1021 spin_lock_init(&tcs->tcs_lock);
1022
1023 if (tcs->num_tcs <= 0 || tcs->type == CONTROL_TCS)
1024 continue;
1025
1026 if (tcs->num_tcs > MAX_TCS_PER_TYPE)
1027 return -EINVAL;
1028
1029 if (st > max_tcs)
1030 return -EINVAL;
1031
1032 tcs->tcs_mask = ((1 << tcs->num_tcs) - 1) << st;
1033 tcs->tcs_offset = st;
1034 st += tcs->num_tcs;
1035
1036 tcs->cmd_addr = devm_kzalloc(&pdev->dev, sizeof(u32) *
1037 tcs->num_tcs * tcs->ncpt, GFP_KERNEL);
1038 if (!tcs->cmd_addr)
1039 return -ENOMEM;
1040
Lina Iyer88a8fda2016-04-01 08:23:31 -06001041 }
1042
1043 /* Allocate only that many channels specified in DT for our MBOX */
1044 for_each_node_with_property(np, "mboxes") {
1045 if (!of_device_is_available(np))
1046 continue;
1047 i = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
1048 for (j = 0; j < i; j++) {
1049 ret = of_parse_phandle_with_args(np, "mboxes",
1050 "#mbox-cells", j, &p);
1051 if (!ret && p.np == pdev->dev.of_node)
1052 break;
1053 }
1054 num_chans++;
1055 }
1056
1057 if (!num_chans) {
1058 pr_err("%s: No clients for controller (%s)\n", __func__,
1059 dn->full_name);
1060 return -ENODEV;
1061 }
1062
1063 chans = devm_kzalloc(&pdev->dev, num_chans * sizeof(*chans),
1064 GFP_KERNEL);
1065 if (!chans)
1066 return -ENOMEM;
1067
1068 for (i = 0; i < num_chans; i++) {
1069 chans[i].mbox = &drv->mbox;
1070 chans[i].txdone_method = TXDONE_BY_IRQ;
1071 }
1072
1073 drv->mbox.dev = &pdev->dev;
1074 drv->mbox.ops = &mbox_ops;
1075 drv->mbox.chans = chans;
1076 drv->mbox.num_chans = num_chans;
1077 drv->mbox.txdone_irq = true;
1078 drv->mbox.of_xlate = of_tcs_mbox_xlate;
Lina Iyer0d81e942016-05-26 11:18:23 -06001079 drv->mbox.is_idle = tcs_drv_is_idle;
Lina Iyer88a8fda2016-04-01 08:23:31 -06001080 drv->num_tcs = st;
1081 drv->pdev = pdev;
1082
Lina Iyer884981e2017-03-21 13:43:05 -06001083 drv->name = of_get_property(pdev->dev.of_node, "label", NULL);
1084 if (!drv->name)
1085 drv->name = dev_name(&pdev->dev);
1086
Lina Iyer88a8fda2016-04-01 08:23:31 -06001087 ret = tcs_response_pool_init(drv);
1088 if (ret)
1089 return ret;
1090
1091 irq = of_irq_get(dn, 0);
1092 if (irq < 0)
1093 return irq;
1094
Lina Iyerafcdc182017-04-19 18:31:04 -06001095 ret = devm_request_irq(&pdev->dev, irq, tcs_irq_handler,
Lina Iyer88a8fda2016-04-01 08:23:31 -06001096 IRQF_ONESHOT | IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
1097 "tcs_irq", drv);
1098 if (ret)
1099 return ret;
1100
Lina Iyer7846e212017-03-22 10:35:53 -06001101 /*
1102 * Enable interrupts for AMC TCS,
1103 * if there are no AMC TCS, use wake TCS.
1104 */
1105 irq_mask = (drv->tcs[ACTIVE_TCS].num_tcs) ?
1106 drv->tcs[ACTIVE_TCS].tcs_mask :
1107 drv->tcs[WAKE_TCS].tcs_mask;
1108 write_tcs_reg(drv->reg_base, TCS_DRV_IRQ_ENABLE, 0, 0, irq_mask);
Lina Iyer88a8fda2016-04-01 08:23:31 -06001109
Lina Iyerb68814f2017-04-14 12:49:07 -06001110 for (i = 0; i < ARRAY_SIZE(drv->tcs_in_use); i++)
1111 atomic_set(&drv->tcs_in_use[i], 0);
1112
Lina Iyer88a8fda2016-04-01 08:23:31 -06001113 ret = mbox_controller_register(&drv->mbox);
1114 if (ret)
1115 return ret;
1116
1117 pr_debug("Mailbox controller (%s, drv=%d) registered\n",
1118 dn->full_name, drv->drv_id);
1119
1120 return 0;
1121}
1122
1123static const struct of_device_id tcs_drv_match[] = {
1124 { .compatible = "qcom,tcs-drv", },
1125 { }
1126};
1127
1128static struct platform_driver tcs_mbox_driver = {
1129 .probe = tcs_drv_probe,
1130 .driver = {
1131 .name = KBUILD_MODNAME,
1132 .of_match_table = tcs_drv_match,
1133 },
1134};
1135
1136static int __init tcs_mbox_driver_init(void)
1137{
1138 return platform_driver_register(&tcs_mbox_driver);
1139}
1140arch_initcall(tcs_mbox_driver_init);