blob: fb97a6c77fb7574877655992838f8a65706061b9 [file] [log] [blame]
Lina Iyer73101422017-02-16 14:09:25 -07001/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
Lina Iyer88a8fda2016-04-01 08:23:31 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
15
Lina Iyerb68814f2017-04-14 12:49:07 -060016#include <linux/atomic.h>
Lina Iyer88a8fda2016-04-01 08:23:31 -060017#include <linux/bitmap.h>
Lina Iyer8bb7d5a2017-04-20 09:50:41 -060018#include <linux/delay.h>
Lina Iyer88a8fda2016-04-01 08:23:31 -060019#include <linux/interrupt.h>
20#include <linux/jiffies.h>
21#include <linux/kernel.h>
22#include <linux/list.h>
23#include <linux/mailbox_client.h> /* For dev_err */
24#include <linux/mailbox_controller.h>
25#include <linux/module.h>
26#include <linux/of.h>
27#include <linux/of_address.h>
28#include <linux/of_irq.h>
29#include <linux/platform_device.h>
30#include <linux/spinlock.h>
31#include <linux/workqueue.h>
32
33#include <asm-generic/io.h>
34
35#include <soc/qcom/tcs.h>
36
37#include <dt-bindings/soc/qcom,tcs-mbox.h>
38
39#include "mailbox.h"
40
Lina Iyerea921442016-05-26 15:07:48 -060041#define CREATE_TRACE_POINTS
42#include <trace/events/rpmh.h>
43
Lina Iyer88a8fda2016-04-01 08:23:31 -060044#define MAX_CMDS_PER_TCS 16
45#define MAX_TCS_PER_TYPE 3
46#define MAX_TCS_SLOTS (MAX_CMDS_PER_TCS * MAX_TCS_PER_TYPE)
47
48#define TCS_DRV_TCS_OFFSET 672
49#define TCS_DRV_CMD_OFFSET 20
50
51/* DRV Configuration Information Register */
52#define DRV_PRNT_CHLD_CONFIG 0x0C
53#define DRV_NUM_TCS_MASK 0x3F
54#define DRV_NUM_TCS_SHIFT 6
55#define DRV_NCPT_MASK 0x1F
56#define DRV_NCPT_SHIFT 27
57
58/* Register offsets */
59#define TCS_DRV_IRQ_ENABLE 0x00
60#define TCS_DRV_IRQ_STATUS 0x04
61#define TCS_DRV_IRQ_CLEAR 0x08
62#define TCS_DRV_CMD_WAIT_FOR_CMPL 0x10
63#define TCS_DRV_CONTROL 0x14
64#define TCS_DRV_STATUS 0x18
65#define TCS_DRV_CMD_ENABLE 0x1C
66#define TCS_DRV_CMD_MSGID 0x30
67#define TCS_DRV_CMD_ADDR 0x34
68#define TCS_DRV_CMD_DATA 0x38
69#define TCS_DRV_CMD_STATUS 0x3C
70#define TCS_DRV_CMD_RESP_DATA 0x40
71
72#define TCS_AMC_MODE_ENABLE BIT(16)
73#define TCS_AMC_MODE_TRIGGER BIT(24)
74
75/* TCS CMD register bit mask */
76#define CMD_MSGID_LEN 8
77#define CMD_MSGID_RESP_REQ BIT(8)
78#define CMD_MSGID_WRITE BIT(16)
79#define CMD_STATUS_ISSUED BIT(8)
80#define CMD_STATUS_COMPL BIT(16)
81
82/* Control/Hidden TCS */
83#define TCS_HIDDEN_MAX_SLOTS 3
Lina Iyer88a8fda2016-04-01 08:23:31 -060084#define TCS_HIDDEN_CMD0_DRV_DATA 0x38
85#define TCS_HIDDEN_CMD_SHIFT 0x08
86
87#define TCS_TYPE_NR 4
88#define TCS_MBOX_TOUT_MS 2000
89#define MAX_POOL_SIZE (MAX_TCS_PER_TYPE * TCS_TYPE_NR)
Lina Iyerc8712ca2017-04-20 00:18:49 -060090#define TCS_M_INIT 0xFFFF
Lina Iyer88a8fda2016-04-01 08:23:31 -060091
92struct tcs_drv;
93
94struct tcs_response {
95 struct tcs_drv *drv;
96 struct mbox_chan *chan;
97 struct tcs_mbox_msg *msg;
98 u32 m; /* m-th TCS */
99 struct tasklet_struct tasklet;
100 struct delayed_work dwork;
101 int err;
Lina Iyerc8712ca2017-04-20 00:18:49 -0600102 int idx;
103 bool in_use;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600104};
105
106struct tcs_response_pool {
Lina Iyerc8712ca2017-04-20 00:18:49 -0600107 struct tcs_response resp[MAX_POOL_SIZE];
Lina Iyer88a8fda2016-04-01 08:23:31 -0600108 spinlock_t lock;
109 DECLARE_BITMAP(avail, MAX_POOL_SIZE);
110};
111
112/* One per TCS type of a controller */
113struct tcs_mbox {
114 struct tcs_drv *drv;
115 u32 *cmd_addr;
116 int type;
117 u32 tcs_mask;
118 u32 tcs_offset;
119 int num_tcs;
120 int ncpt; /* num cmds per tcs */
121 DECLARE_BITMAP(slots, MAX_TCS_SLOTS);
122 spinlock_t tcs_lock; /* TCS type lock */
Lina Iyer88a8fda2016-04-01 08:23:31 -0600123};
124
125/* One per MBOX controller */
126struct tcs_drv {
Lina Iyer884981e2017-03-21 13:43:05 -0600127 const char *name;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600128 void *base; /* start address of the RSC's registers */
129 void *reg_base; /* start address for DRV specific register */
130 int drv_id;
131 struct platform_device *pdev;
132 struct mbox_controller mbox;
133 struct tcs_mbox tcs[TCS_TYPE_NR];
134 int num_assigned;
135 int num_tcs;
136 struct workqueue_struct *wq;
137 struct tcs_response_pool *resp_pool;
Lina Iyer088ccec2017-04-24 20:18:48 -0600138 atomic_t tcs_in_use[MAX_POOL_SIZE];
139 atomic_t tcs_send_count[MAX_POOL_SIZE];
140 atomic_t tcs_irq_count[MAX_POOL_SIZE];
Lina Iyer88a8fda2016-04-01 08:23:31 -0600141};
142
143static void tcs_notify_tx_done(unsigned long data);
144static void tcs_notify_timeout(struct work_struct *work);
145
146static int tcs_response_pool_init(struct tcs_drv *drv)
147{
148 struct tcs_response_pool *pool;
149 int i;
150
151 pool = devm_kzalloc(&drv->pdev->dev, sizeof(*pool), GFP_KERNEL);
152 if (!pool)
153 return -ENOMEM;
154
Lina Iyer88a8fda2016-04-01 08:23:31 -0600155 for (i = 0; i < MAX_POOL_SIZE; i++) {
156 tasklet_init(&pool->resp[i].tasklet, tcs_notify_tx_done,
157 (unsigned long) &pool->resp[i]);
Lina Iyerc8712ca2017-04-20 00:18:49 -0600158 INIT_DELAYED_WORK(&pool->resp[i].dwork, tcs_notify_timeout);
159 pool->resp[i].drv = drv;
160 pool->resp[i].idx = i;
161 pool->resp[i].m = TCS_M_INIT;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600162 }
163
164 spin_lock_init(&pool->lock);
165 drv->resp_pool = pool;
166
167 return 0;
168}
169
Lina Iyerc8712ca2017-04-20 00:18:49 -0600170static struct tcs_response *setup_response(struct tcs_drv *drv,
171 struct tcs_mbox_msg *msg, struct mbox_chan *chan,
172 u32 m, int err)
Lina Iyer88a8fda2016-04-01 08:23:31 -0600173{
174 struct tcs_response_pool *pool = drv->resp_pool;
175 struct tcs_response *resp = ERR_PTR(-ENOMEM);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600176 int pos;
177
Lina Iyerc8712ca2017-04-20 00:18:49 -0600178 spin_lock(&pool->lock);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600179 pos = find_first_zero_bit(pool->avail, MAX_POOL_SIZE);
180 if (pos != MAX_POOL_SIZE) {
181 bitmap_set(pool->avail, pos, 1);
182 resp = &pool->resp[pos];
Lina Iyerc8712ca2017-04-20 00:18:49 -0600183 resp->chan = chan;
184 resp->msg = msg;
185 resp->m = m;
186 resp->err = err;
187 resp->in_use = false;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600188 }
Lina Iyerc8712ca2017-04-20 00:18:49 -0600189 spin_unlock(&pool->lock);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600190
191 return resp;
192}
193
Lina Iyerc8712ca2017-04-20 00:18:49 -0600194static void free_response(struct tcs_response *resp)
Lina Iyer88a8fda2016-04-01 08:23:31 -0600195{
196 struct tcs_response_pool *pool = resp->drv->resp_pool;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600197
Lina Iyerc8712ca2017-04-20 00:18:49 -0600198 spin_lock(&pool->lock);
199 resp->err = -EINVAL;
200 bitmap_clear(pool->avail, resp->idx, 1);
201 spin_unlock(&pool->lock);
202}
203
204static inline struct tcs_response *get_response(struct tcs_drv *drv, u32 m)
205{
206 struct tcs_response_pool *pool = drv->resp_pool;
207 struct tcs_response *resp = NULL;
208 int pos = 0;
209
210 do {
211 pos = find_next_bit(pool->avail, MAX_POOL_SIZE, pos);
212 if (pos == MAX_POOL_SIZE)
213 break;
214 resp = &pool->resp[pos];
215 if (resp->m == m && !resp->in_use) {
216 resp->in_use = true;
217 break;
218 }
219 pos++;
220 } while (1);
221
222 return resp;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600223}
224
225static inline u32 read_drv_config(void __iomem *base)
226{
227 return le32_to_cpu(readl_relaxed(base + DRV_PRNT_CHLD_CONFIG));
228}
229
230static inline u32 read_tcs_reg(void __iomem *base, int reg, int m, int n)
231{
232 return le32_to_cpu(readl_relaxed(base + reg +
233 TCS_DRV_TCS_OFFSET * m + TCS_DRV_CMD_OFFSET * n));
234}
235
236static inline void write_tcs_reg(void __iomem *base, int reg, int m, int n,
237 u32 data)
238{
239 writel_relaxed(cpu_to_le32(data), base + reg +
240 TCS_DRV_TCS_OFFSET * m + TCS_DRV_CMD_OFFSET * n);
241}
242
243static inline void write_tcs_reg_sync(void __iomem *base, int reg, int m, int n,
244 u32 data)
245{
246 do {
247 write_tcs_reg(base, reg, m, n, data);
248 if (data == read_tcs_reg(base, reg, m, n))
249 break;
Lina Iyer8bb7d5a2017-04-20 09:50:41 -0600250 udelay(1);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600251 } while (1);
252}
253
Lina Iyerb68814f2017-04-14 12:49:07 -0600254static inline bool tcs_is_free(struct tcs_drv *drv, int m)
Lina Iyer88a8fda2016-04-01 08:23:31 -0600255{
Lina Iyerb68814f2017-04-14 12:49:07 -0600256 void __iomem *base = drv->reg_base;
257
258 return read_tcs_reg(base, TCS_DRV_STATUS, m, 0) &&
259 !atomic_read(&drv->tcs_in_use[m]);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600260}
261
262static inline struct tcs_mbox *get_tcs_from_index(struct tcs_drv *drv, int m)
263{
264 struct tcs_mbox *tcs;
265 int i;
266
267 for (i = 0; i < TCS_TYPE_NR; i++) {
268 tcs = &drv->tcs[i];
269 if (tcs->tcs_mask & BIT(m))
270 break;
271 }
272
273 if (i == TCS_TYPE_NR)
274 tcs = NULL;
275
276 return tcs;
277}
278
279static inline struct tcs_mbox *get_tcs_of_type(struct tcs_drv *drv, int type)
280{
281 int i;
282 struct tcs_mbox *tcs;
283
284 for (i = 0; i < TCS_TYPE_NR; i++)
285 if (type == drv->tcs[i].type)
286 break;
287
288 if (i == TCS_TYPE_NR)
289 return ERR_PTR(-EINVAL);
290
291 tcs = &drv->tcs[i];
292 if (!tcs->num_tcs)
293 return ERR_PTR(-EINVAL);
294
295 return tcs;
296}
297
298static inline struct tcs_mbox *get_tcs_for_msg(struct tcs_drv *drv,
299 struct tcs_mbox_msg *msg)
300{
301 int type = -1;
302
303 /* Which box are we dropping this in and do we trigger the TCS */
304 switch (msg->state) {
305 case RPMH_SLEEP_STATE:
306 type = SLEEP_TCS;
307 break;
308 case RPMH_WAKE_ONLY_STATE:
309 type = WAKE_TCS;
310 break;
311 case RPMH_ACTIVE_ONLY_STATE:
312 type = ACTIVE_TCS;
313 break;
Lina Iyer21c17882016-09-22 11:05:51 -0600314 case RPMH_AWAKE_STATE:
315 /*
316 * Awake state is only used when the DRV has no separate
317 * TCS for ACTIVE requests. Switch to WAKE TCS to send
318 * active votes. Otherwise, the caller should be explicit
319 * about the state.
320 */
321 if (IS_ERR(get_tcs_of_type(drv, ACTIVE_TCS)))
322 type = WAKE_TCS;
323 break;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600324 }
325
326 if (msg->is_read)
327 type = ACTIVE_TCS;
328
329 if (type < 0)
330 return ERR_PTR(-EINVAL);
331
332 return get_tcs_of_type(drv, type);
333}
334
Lina Iyer88a8fda2016-04-01 08:23:31 -0600335static inline void send_tcs_response(struct tcs_response *resp)
336{
337 tasklet_schedule(&resp->tasklet);
338}
339
340static inline void schedule_tcs_err_response(struct tcs_response *resp)
341{
342 schedule_delayed_work(&resp->dwork, msecs_to_jiffies(TCS_MBOX_TOUT_MS));
343}
344
345/**
346 * tcs_irq_handler: TX Done / Recv data handler
347 */
348static irqreturn_t tcs_irq_handler(int irq, void *p)
349{
350 struct tcs_drv *drv = p;
351 void __iomem *base = drv->reg_base;
352 int m, i;
353 u32 irq_status, sts;
Lina Iyer21c17882016-09-22 11:05:51 -0600354 struct tcs_mbox *tcs;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600355 struct tcs_response *resp;
Lina Iyer7846e212017-03-22 10:35:53 -0600356 struct tcs_cmd *cmd;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600357 u32 data;
358
359 /* Know which TCSes were triggered */
360 irq_status = read_tcs_reg(base, TCS_DRV_IRQ_STATUS, 0, 0);
361
362 for (m = 0; irq_status >= BIT(m); m++) {
363 if (!(irq_status & BIT(m)))
364 continue;
365
Lina Iyer088ccec2017-04-24 20:18:48 -0600366 atomic_inc(&drv->tcs_irq_count[m]);
367
Lina Iyerc8712ca2017-04-20 00:18:49 -0600368 resp = get_response(drv, m);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600369 if (!resp) {
370 pr_err("No resp request for TCS-%d\n", m);
371 continue;
372 }
373
374 cancel_delayed_work(&resp->dwork);
375
Lina Iyer21c17882016-09-22 11:05:51 -0600376 tcs = get_tcs_from_index(drv, m);
377 if (!tcs) {
378 pr_err("TCS-%d doesn't exist in DRV\n", m);
379 continue;
380 }
Lina Iyer88a8fda2016-04-01 08:23:31 -0600381
382 /* Check if all commands were completed */
383 resp->err = 0;
384 for (i = 0; i < resp->msg->num_payload; i++) {
Lina Iyer7846e212017-03-22 10:35:53 -0600385 cmd = &resp->msg->payload[i];
Lina Iyer88a8fda2016-04-01 08:23:31 -0600386 sts = read_tcs_reg(base, TCS_DRV_CMD_STATUS, m, i);
Lina Iyer7846e212017-03-22 10:35:53 -0600387 if ((!(sts & CMD_STATUS_ISSUED)) ||
388 ((resp->msg->is_complete || cmd->complete) &&
389 (!(sts & CMD_STATUS_COMPL))))
Lina Iyer88a8fda2016-04-01 08:23:31 -0600390 resp->err = -EIO;
391 }
392
393 /* Check for response if this was a read request */
394 if (resp->msg->is_read) {
395 /* Respond the data back in the same req data */
396 data = read_tcs_reg(base, TCS_DRV_CMD_RESP_DATA, m, 0);
397 resp->msg->payload[0].data = data;
398 mbox_chan_received_data(resp->chan, resp->msg);
399 }
400
Lina Iyer884981e2017-03-21 13:43:05 -0600401 trace_rpmh_notify_irq(drv->name, m, resp->msg->payload[0].addr,
402 resp->err);
Lina Iyerea921442016-05-26 15:07:48 -0600403
Lina Iyer7846e212017-03-22 10:35:53 -0600404 /* Clear the AMC mode for non-ACTIVE TCSes */
405 if (tcs->type != ACTIVE_TCS) {
406 data = read_tcs_reg(base, TCS_DRV_CONTROL, m, 0);
407 data &= ~TCS_AMC_MODE_ENABLE;
408 write_tcs_reg(base, TCS_DRV_CONTROL, m, 0, data);
409 } else {
410 /* Clear the enable bit for the commands */
411 write_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0, 0);
412 }
413
Lina Iyerc8712ca2017-04-20 00:18:49 -0600414 /* Clear the TCS IRQ status */
415 write_tcs_reg(base, TCS_DRV_IRQ_CLEAR, 0, 0, BIT(m));
416
417 /* Clean up response object and notify mbox in tasklet */
Lina Iyer88a8fda2016-04-01 08:23:31 -0600418 send_tcs_response(resp);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600419
Lina Iyerc8712ca2017-04-20 00:18:49 -0600420 /* Notify the client that this request is completed. */
Lina Iyerb68814f2017-04-14 12:49:07 -0600421 atomic_set(&drv->tcs_in_use[m], 0);
422 }
423
Lina Iyer88a8fda2016-04-01 08:23:31 -0600424 return IRQ_HANDLED;
425}
426
427static inline void mbox_notify_tx_done(struct mbox_chan *chan,
428 struct tcs_mbox_msg *msg, int m, int err)
429{
Lina Iyer884981e2017-03-21 13:43:05 -0600430 struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
431
432 trace_rpmh_notify(drv->name, m, msg->payload[0].addr, err);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600433 mbox_chan_txdone(chan, err);
434}
435
436/**
437 * tcs_notify_tx_done: TX Done for requests that do not trigger TCS
438 */
439static void tcs_notify_tx_done(unsigned long data)
440{
441 struct tcs_response *resp = (struct tcs_response *) data;
442 struct mbox_chan *chan = resp->chan;
443 struct tcs_mbox_msg *msg = resp->msg;
444 int err = resp->err;
445 int m = resp->m;
446
Lina Iyer88a8fda2016-04-01 08:23:31 -0600447 mbox_notify_tx_done(chan, msg, m, err);
Lina Iyerc8712ca2017-04-20 00:18:49 -0600448 free_response(resp);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600449}
450
451/**
452 * tcs_notify_timeout: TX Done for requests that do trigger TCS, but
453 * we do not get a response IRQ back.
454 */
455static void tcs_notify_timeout(struct work_struct *work)
456{
457 struct delayed_work *dwork = to_delayed_work(work);
458 struct tcs_response *resp = container_of(dwork,
459 struct tcs_response, dwork);
460 struct mbox_chan *chan = resp->chan;
461 struct tcs_mbox_msg *msg = resp->msg;
462 struct tcs_drv *drv = resp->drv;
463 int m = resp->m;
Lina Iyer088ccec2017-04-24 20:18:48 -0600464 u32 irq_status;
465 struct tcs_mbox *tcs = get_tcs_from_index(drv, m);
466 bool pending = false;
467 int sent_count, irq_count;
468 int i;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600469
Lina Iyer088ccec2017-04-24 20:18:48 -0600470 /* Read while holding a lock, to get a consistent state snapshot */
471 spin_lock(&tcs->tcs_lock);
472 irq_status = read_tcs_reg(drv->reg_base, TCS_DRV_IRQ_STATUS, 0, 0);
473 sent_count = atomic_read(&drv->tcs_send_count[m]);
474 irq_count = atomic_read(&drv->tcs_irq_count[m]);
475
Lina Iyerb68814f2017-04-14 12:49:07 -0600476 if (!tcs_is_free(drv, m)) {
Lina Iyer88a8fda2016-04-01 08:23:31 -0600477 struct tcs_cmd *cmd;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600478 u32 addr;
479
480 for (i = 0; i < msg->num_payload; i++) {
481 cmd = &msg->payload[i];
482 addr = read_tcs_reg(drv->reg_base, TCS_DRV_CMD_ADDR,
483 m, i);
Lina Iyerc8712ca2017-04-20 00:18:49 -0600484 pending |= (cmd->addr == addr);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600485 }
Lina Iyer088ccec2017-04-24 20:18:48 -0600486 }
487 spin_unlock(&tcs->tcs_lock);
488
489 if (pending) {
490 pr_err("TCS-%d waiting for response. (sent=%d recvd=%d ctrlr-sts=0x%x)\n",
491 m, sent_count, irq_count, irq_status & (u32)BIT(m));
492 for (i = 0; i < msg->num_payload; i++)
493 pr_err("Addr: 0x%x Data: 0x%x\n",
494 msg->payload[i].addr,
495 msg->payload[i].data);
496 /*
497 * In case the RPMH resource fails to respond to the
498 * completion request, the TCS would be blocked forever
499 * waiting on the response. There is no way to recover
500 * from such a case. But WARN() to investigate any false
501 * positives.
502 */
503 WARN_ON(irq_status & BIT(m));
504
505 /* Clear the TCS status register so we could try again */
506 write_tcs_reg(drv->reg_base, TCS_DRV_IRQ_CLEAR, 0, 0, BIT(m));
507
508 /* Increment the response count, so it doesn't keep adding up */
509 atomic_inc(&drv->tcs_irq_count[m]);
510
511 /*
512 * If the request was fire-n-forget then the controller,
513 * then our controller is OK, but the accelerator may be
514 * in a bad state.
515 * Let the upper layers figure out what needs to be done
516 * in such a case. Return error code and carry on.
517 */
518 atomic_set(&drv->tcs_in_use[m], 0);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600519 }
520
Lina Iyer693cdec2017-04-05 12:17:03 -0600521 mbox_notify_tx_done(chan, msg, -1, -ETIMEDOUT);
Lina Iyerc8712ca2017-04-20 00:18:49 -0600522 free_response(resp);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600523}
524
Lina Iyer884981e2017-03-21 13:43:05 -0600525static void __tcs_buffer_write(struct tcs_drv *drv, int d, int m, int n,
Lina Iyer88a8fda2016-04-01 08:23:31 -0600526 struct tcs_mbox_msg *msg, bool trigger)
527{
Lina Iyer7846e212017-03-22 10:35:53 -0600528 u32 msgid, cmd_msgid = 0;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600529 u32 cmd_enable = 0;
530 u32 cmd_complete;
531 u32 enable = TCS_AMC_MODE_ENABLE;
532 struct tcs_cmd *cmd;
533 int i;
Lina Iyer884981e2017-03-21 13:43:05 -0600534 void __iomem *base = drv->reg_base;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600535
536 /* We have homologous command set i.e pure read or write, not a mix */
537 cmd_msgid = CMD_MSGID_LEN;
538 cmd_msgid |= (msg->is_complete) ? CMD_MSGID_RESP_REQ : 0;
539 cmd_msgid |= (!msg->is_read) ? CMD_MSGID_WRITE : 0;
540
541 /* Read the send-after-prev complete flag for those already in TCS */
542 cmd_complete = read_tcs_reg(base, TCS_DRV_CMD_WAIT_FOR_CMPL, m, 0);
543
544 for (i = 0; i < msg->num_payload; i++) {
545 cmd = &msg->payload[i];
546 cmd_enable |= BIT(n + i);
547 cmd_complete |= cmd->complete << (n + i);
Lina Iyer7846e212017-03-22 10:35:53 -0600548 msgid = cmd_msgid;
549 msgid |= (cmd->complete) ? CMD_MSGID_RESP_REQ : 0;
550 write_tcs_reg(base, TCS_DRV_CMD_MSGID, m, n + i, msgid);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600551 write_tcs_reg(base, TCS_DRV_CMD_ADDR, m, n + i, cmd->addr);
552 write_tcs_reg(base, TCS_DRV_CMD_DATA, m, n + i, cmd->data);
Lina Iyer7846e212017-03-22 10:35:53 -0600553 trace_rpmh_send_msg(drv->name, m, n + i, msgid, cmd->addr,
Lina Iyer884981e2017-03-21 13:43:05 -0600554 cmd->data, cmd->complete, trigger);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600555 }
556
557 /* Write the send-after-prev completion bits for the batch */
558 write_tcs_reg(base, TCS_DRV_CMD_WAIT_FOR_CMPL, m, 0, cmd_complete);
559
560 /* Enable the new commands in TCS */
561 cmd_enable |= read_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0);
562 write_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0, cmd_enable);
563
564 if (trigger) {
Lina Iyer88a8fda2016-04-01 08:23:31 -0600565 /* HW req: Clear the DRV_CONTROL and enable TCS again */
566 write_tcs_reg_sync(base, TCS_DRV_CONTROL, m, 0, 0);
567 write_tcs_reg_sync(base, TCS_DRV_CONTROL, m, 0, enable);
568 /* Enable the AMC mode on the TCS */
569 enable |= TCS_AMC_MODE_TRIGGER;
570 write_tcs_reg_sync(base, TCS_DRV_CONTROL, m, 0, enable);
571 }
572}
573
Lina Iyer0d81e942016-05-26 11:18:23 -0600574/**
575 * tcs_drv_is_idle: Check if any of the AMCs are busy.
576 *
577 * @mbox: The mailbox controller.
578 *
579 * Returns true if the AMCs are not engaged or absent.
580 */
581static bool tcs_drv_is_idle(struct mbox_controller *mbox)
582{
583 int m;
584 struct tcs_drv *drv = container_of(mbox, struct tcs_drv, mbox);
585 struct tcs_mbox *tcs = get_tcs_of_type(drv, ACTIVE_TCS);
586
Lina Iyer21c17882016-09-22 11:05:51 -0600587 /* Check for WAKE TCS if there are no ACTIVE TCS */
Lina Iyer0d81e942016-05-26 11:18:23 -0600588 if (IS_ERR(tcs))
Lina Iyer21c17882016-09-22 11:05:51 -0600589 tcs = get_tcs_of_type(drv, WAKE_TCS);
Lina Iyer0d81e942016-05-26 11:18:23 -0600590
591 for (m = tcs->tcs_offset; m < tcs->tcs_offset + tcs->num_tcs; m++)
Lina Iyerb68814f2017-04-14 12:49:07 -0600592 if (!tcs_is_free(drv, m))
Lina Iyer0d81e942016-05-26 11:18:23 -0600593 return false;
594
595 return true;
596}
597
Lina Iyer88a8fda2016-04-01 08:23:31 -0600598static void wait_for_req_inflight(struct tcs_drv *drv, struct tcs_mbox *tcs,
599 struct tcs_mbox_msg *msg)
600{
601 u32 curr_enabled;
602 int i, j, k;
603 bool is_free;
604
605 do {
606 is_free = true;
607 for (i = 1; i > tcs->tcs_mask; i = i << 1) {
608 if (!(tcs->tcs_mask & i))
609 continue;
Lina Iyerb68814f2017-04-14 12:49:07 -0600610 if (tcs_is_free(drv, i))
Lina Iyer88a8fda2016-04-01 08:23:31 -0600611 continue;
612 curr_enabled = read_tcs_reg(drv->reg_base,
613 TCS_DRV_CMD_ENABLE, i, 0);
614 for (j = 0; j < msg->num_payload; j++) {
615 for (k = 0; k < curr_enabled; k++) {
616 if (!(curr_enabled & BIT(k)))
617 continue;
618 if (tcs->cmd_addr[k] ==
619 msg->payload[j].addr) {
620 is_free = false;
621 goto retry;
622 }
623 }
624 }
625 }
626retry:
627 if (!is_free)
Lina Iyer8bb7d5a2017-04-20 09:50:41 -0600628 udelay(1);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600629 } while (!is_free);
630}
631
632static int find_free_tcs(struct tcs_mbox *tcs)
633{
634 int slot, m = 0;
Lina Iyerc8712ca2017-04-20 00:18:49 -0600635 u32 irq_status;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600636
637 /* Loop until we find a free AMC */
638 do {
Lina Iyerb68814f2017-04-14 12:49:07 -0600639 if (tcs_is_free(tcs->drv, tcs->tcs_offset + m)) {
Lina Iyer88a8fda2016-04-01 08:23:31 -0600640 slot = m * tcs->ncpt;
641 break;
642 }
Lina Iyerc8712ca2017-04-20 00:18:49 -0600643 if (++m >= tcs->num_tcs) {
Lina Iyer88a8fda2016-04-01 08:23:31 -0600644 m = 0;
Lina Iyerc8712ca2017-04-20 00:18:49 -0600645 irq_status = read_tcs_reg(tcs->drv->reg_base,
646 TCS_DRV_IRQ_STATUS, 0, 0);
647 WARN((irq_status & tcs->tcs_mask && in_irq()),
648 "TCS busy. Request should not be made from hard IRQ context.");
649 udelay(10);
650 }
Lina Iyer88a8fda2016-04-01 08:23:31 -0600651 } while (1);
652
653 return slot;
654}
655
656static int find_match(struct tcs_mbox *tcs, struct tcs_cmd *cmd, int len)
657{
658 bool found = false;
659 int i = 0, j;
660
661 /* Check for already cached commands */
662 while ((i = find_next_bit(tcs->slots, MAX_TCS_SLOTS, i)) <
663 MAX_TCS_SLOTS) {
664 if (tcs->cmd_addr[i] != cmd[0].addr) {
665 i++;
666 continue;
667 }
668 /* sanity check to ensure the seq is same */
669 for (j = 1; j < len; j++) {
670 WARN((tcs->cmd_addr[i + j] != cmd[j].addr),
671 "Message does not match previous sequence.\n");
672 return -EINVAL;
673 }
674 found = true;
675 break;
676 }
677
678 return found ? i : -1;
679}
680
681static int find_slots(struct tcs_mbox *tcs, struct tcs_mbox_msg *msg)
682{
683 int slot;
684 int n = 0;
685
686 /* For active requests find the first free AMC. */
687 if (tcs->type == ACTIVE_TCS)
688 return find_free_tcs(tcs);
689
690 /* Find if we already have the msg in our TCS */
691 slot = find_match(tcs, msg->payload, msg->num_payload);
692 if (slot >= 0)
693 return slot;
694
695 /* Do over, until we can fit the full payload in a TCS */
696 do {
697 slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS,
698 n, msg->num_payload, 0);
699 if (slot == MAX_TCS_SLOTS)
700 break;
701 n += tcs->ncpt;
702 } while (slot + msg->num_payload - 1 >= n);
703
704 return (slot != MAX_TCS_SLOTS) ? slot : -ENOMEM;
705}
706
Lina Iyer88a8fda2016-04-01 08:23:31 -0600707static int tcs_mbox_write(struct mbox_chan *chan, struct tcs_mbox_msg *msg,
708 bool trigger)
709{
710 const struct device *dev = chan->cl->dev;
711 struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
712 int d = drv->drv_id;
713 struct tcs_mbox *tcs;
714 int i, slot, offset, m, n;
Channagoud Kadabi075db3b2017-03-16 14:26:17 -0700715 struct tcs_response *resp = NULL;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600716
717 tcs = get_tcs_for_msg(drv, msg);
718 if (IS_ERR(tcs))
719 return PTR_ERR(tcs);
720
Lina Iyerc8712ca2017-04-20 00:18:49 -0600721 if (trigger)
722 resp = setup_response(drv, msg, chan, TCS_M_INIT, 0);
723
Lina Iyer88a8fda2016-04-01 08:23:31 -0600724 /* Identify the sequential slots that we can write to */
725 spin_lock(&tcs->tcs_lock);
726 slot = find_slots(tcs, msg);
727 if (slot < 0) {
728 dev_err(dev, "No TCS slot found.\n");
729 spin_unlock(&tcs->tcs_lock);
Lina Iyerc8712ca2017-04-20 00:18:49 -0600730 if (resp)
731 free_response(resp);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600732 return slot;
733 }
Lina Iyerc8712ca2017-04-20 00:18:49 -0600734
Lina Iyer88a8fda2016-04-01 08:23:31 -0600735 /* Mark the slots as in-use, before we unlock */
736 if (tcs->type == SLEEP_TCS || tcs->type == WAKE_TCS)
737 bitmap_set(tcs->slots, slot, msg->num_payload);
738
739 /* Copy the addresses of the resources over to the slots */
740 for (i = 0; tcs->cmd_addr && i < msg->num_payload; i++)
741 tcs->cmd_addr[slot + i] = msg->payload[i].addr;
742
Lina Iyer88a8fda2016-04-01 08:23:31 -0600743 offset = slot / tcs->ncpt;
744 m = offset + tcs->tcs_offset;
745 n = slot % tcs->ncpt;
746
Lina Iyerc8712ca2017-04-20 00:18:49 -0600747 /* Block, if we have an address from the msg in flight */
Lina Iyer88a8fda2016-04-01 08:23:31 -0600748 if (trigger) {
Lina Iyerc8712ca2017-04-20 00:18:49 -0600749 resp->m = m;
750 /* Mark the TCS as busy */
751 atomic_set(&drv->tcs_in_use[m], 1);
Lina Iyer088ccec2017-04-24 20:18:48 -0600752 atomic_inc(&drv->tcs_send_count[m]);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600753 wait_for_req_inflight(drv, tcs, msg);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600754 }
755
756 /* Write to the TCS or AMC */
Lina Iyer884981e2017-03-21 13:43:05 -0600757 __tcs_buffer_write(drv, d, m, n, msg, trigger);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600758
759 /* Schedule a timeout response, incase there is no actual response */
760 if (trigger)
761 schedule_tcs_err_response(resp);
762
Lina Iyerc8712ca2017-04-20 00:18:49 -0600763 spin_unlock(&tcs->tcs_lock);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600764
765 return 0;
766}
767
Lina Iyer7846e212017-03-22 10:35:53 -0600768static void __tcs_buffer_invalidate(void __iomem *base, int m)
769{
770 write_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0, 0);
771}
772
773static int tcs_mbox_invalidate(struct mbox_chan *chan)
774{
775 struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
776 struct tcs_mbox *tcs;
777 int m, i;
778 int inv_types[] = { WAKE_TCS, SLEEP_TCS };
779 int type = 0;
780
781 do {
782 tcs = get_tcs_of_type(drv, inv_types[type]);
783 if (IS_ERR(tcs))
784 return PTR_ERR(tcs);
785
786 spin_lock(&tcs->tcs_lock);
787 for (i = 0; i < tcs->num_tcs; i++) {
788 m = i + tcs->tcs_offset;
Lina Iyerb68814f2017-04-14 12:49:07 -0600789 while (!tcs_is_free(drv, m))
Lina Iyer8bb7d5a2017-04-20 09:50:41 -0600790 udelay(1);
Lina Iyer7846e212017-03-22 10:35:53 -0600791 __tcs_buffer_invalidate(drv->reg_base, m);
Lina Iyer7846e212017-03-22 10:35:53 -0600792 }
793 /* Mark the TCS as free */
794 bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
795 spin_unlock(&tcs->tcs_lock);
796 } while (++type < ARRAY_SIZE(inv_types));
797
798 return 0;
799}
800
Lina Iyer88a8fda2016-04-01 08:23:31 -0600801/**
802 * chan_tcs_write: Validate the incoming message and write to the
803 * appropriate TCS block.
804 *
805 * @chan: the MBOX channel
806 * @data: the tcs_mbox_msg*
807 *
808 * Returns a negative error for invalid message structure and invalid
809 * message combination, -EBUSY if there is an other active request for
810 * the channel in process, otherwise bubbles up internal error.
811 */
812static int chan_tcs_write(struct mbox_chan *chan, void *data)
813{
814 struct tcs_mbox_msg *msg = data;
815 const struct device *dev = chan->cl->dev;
816 int ret = -EINVAL;
817
818 if (!msg) {
819 dev_err(dev, "Payload error.\n");
820 goto tx_fail;
821 }
822
823 if (!msg->payload || msg->num_payload > MAX_RPMH_PAYLOAD) {
824 dev_err(dev, "Payload error.\n");
825 goto tx_fail;
826 }
827
828 if (msg->invalidate || msg->is_control) {
829 dev_err(dev, "Incorrect API.\n");
830 goto tx_fail;
831 }
832
Lina Iyer21c17882016-09-22 11:05:51 -0600833 if (msg->state != RPMH_ACTIVE_ONLY_STATE &&
834 msg->state != RPMH_AWAKE_STATE) {
Lina Iyer88a8fda2016-04-01 08:23:31 -0600835 dev_err(dev, "Incorrect API.\n");
836 goto tx_fail;
837 }
838
839 /* Read requests should always be single */
840 if (msg->is_read && msg->num_payload > 1) {
841 dev_err(dev, "Incorrect read request.\n");
842 goto tx_fail;
843 }
844
Lina Iyer7846e212017-03-22 10:35:53 -0600845 /*
846 * Since we are re-purposing the wake TCS, invalidate previous
847 * contents to avoid confusion.
848 */
849 if (msg->state == RPMH_AWAKE_STATE)
850 tcs_mbox_invalidate(chan);
851
Lina Iyer88a8fda2016-04-01 08:23:31 -0600852 /* Post the message to the TCS and trigger */
853 ret = tcs_mbox_write(chan, msg, true);
854
855tx_fail:
856 if (ret) {
857 struct tcs_drv *drv = container_of(chan->mbox,
Lina Iyerc8712ca2017-04-20 00:18:49 -0600858 struct tcs_drv, mbox);
859 struct tcs_response *resp = setup_response(
860 drv, msg, chan, TCS_M_INIT, ret);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600861
862 dev_err(dev, "Error sending RPMH message %d\n", ret);
863 send_tcs_response(resp);
864 }
865
866 return 0;
867}
868
Lina Iyer884981e2017-03-21 13:43:05 -0600869static void __tcs_write_hidden(struct tcs_drv *drv, int d,
870 struct tcs_mbox_msg *msg)
Lina Iyer88a8fda2016-04-01 08:23:31 -0600871{
872 int i;
Lina Iyer884981e2017-03-21 13:43:05 -0600873 void __iomem *addr = drv->base + TCS_HIDDEN_CMD0_DRV_DATA;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600874
Lina Iyer88a8fda2016-04-01 08:23:31 -0600875 for (i = 0; i < msg->num_payload; i++) {
876 /* Only data is write capable */
Lina Iyerd93fbce2017-03-01 09:07:20 -0700877 writel_relaxed(cpu_to_le32(msg->payload[i].data), addr);
Lina Iyer884981e2017-03-21 13:43:05 -0600878 trace_rpmh_control_msg(drv->name, msg->payload[i].data);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600879 addr += TCS_HIDDEN_CMD_SHIFT;
880 }
881}
882
883static int tcs_control_write(struct mbox_chan *chan, struct tcs_mbox_msg *msg)
884{
885 const struct device *dev = chan->cl->dev;
886 struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
887 struct tcs_mbox *tcs;
888
889 tcs = get_tcs_of_type(drv, CONTROL_TCS);
890 if (IS_ERR(tcs))
891 return PTR_ERR(tcs);
892
893 if (msg->num_payload != tcs->ncpt) {
894 dev_err(dev, "Request must fit the control TCS size.\n");
895 return -EINVAL;
896 }
897
898 spin_lock(&tcs->tcs_lock);
Lina Iyer884981e2017-03-21 13:43:05 -0600899 __tcs_write_hidden(tcs->drv, drv->drv_id, msg);
Lina Iyer88a8fda2016-04-01 08:23:31 -0600900 spin_unlock(&tcs->tcs_lock);
901
902 return 0;
903}
904
905/**
906 * chan_tcs_ctrl_write: Write message to the controller, no ACK sent.
907 *
908 * @chan: the MBOX channel
909 * @data: the tcs_mbox_msg*
910 */
911static int chan_tcs_ctrl_write(struct mbox_chan *chan, void *data)
912{
913 struct tcs_mbox_msg *msg = data;
914 const struct device *dev = chan->cl->dev;
915 int ret = -EINVAL;
916
917 if (!msg) {
918 dev_err(dev, "Payload error.\n");
919 goto tx_done;
920 }
921
922 if (msg->num_payload > MAX_RPMH_PAYLOAD) {
923 dev_err(dev, "Payload error.\n");
924 goto tx_done;
925 }
926
927 /* Invalidate sleep/wake TCS */
928 if (msg->invalidate) {
929 ret = tcs_mbox_invalidate(chan);
930 goto tx_done;
931 }
932
933 /* Control slots are unique. They carry specific data. */
934 if (msg->is_control) {
935 ret = tcs_control_write(chan, msg);
936 goto tx_done;
937 }
938
Lina Iyer88a8fda2016-04-01 08:23:31 -0600939 /* Post the message to the TCS without trigger */
940 ret = tcs_mbox_write(chan, msg, false);
941
942tx_done:
943 return ret;
944}
945
946static int chan_init(struct mbox_chan *chan)
947{
948 return 0;
949}
950
951static void chan_shutdown(struct mbox_chan *chan)
952{ }
953
954static const struct mbox_chan_ops mbox_ops = {
955 .send_data = chan_tcs_write,
956 .send_controller_data = chan_tcs_ctrl_write,
957 .startup = chan_init,
958 .shutdown = chan_shutdown,
959};
960
961static struct mbox_chan *of_tcs_mbox_xlate(struct mbox_controller *mbox,
962 const struct of_phandle_args *sp)
963{
964 struct tcs_drv *drv = container_of(mbox, struct tcs_drv, mbox);
965 struct mbox_chan *chan;
966
967 if (drv->num_assigned >= mbox->num_chans) {
968 pr_err("TCS-Mbox out of channel memory\n");
969 return ERR_PTR(-ENOMEM);
970 }
971
972 chan = &mbox->chans[drv->num_assigned++];
973
974 return chan;
975}
976
977static int tcs_drv_probe(struct platform_device *pdev)
978{
979 struct device_node *dn = pdev->dev.of_node;
980 struct device_node *np;
981 struct tcs_drv *drv;
982 struct mbox_chan *chans;
983 struct tcs_mbox *tcs;
984 struct of_phandle_args p;
985 int irq;
986 u32 val[8] = { 0 };
987 int num_chans = 0;
988 int st = 0;
989 int i, j, ret, nelem;
990 u32 config, max_tcs, ncpt;
Lina Iyer73101422017-02-16 14:09:25 -0700991 int tcs_type_count[TCS_TYPE_NR] = { 0 };
992 struct resource *res;
Lina Iyer7846e212017-03-22 10:35:53 -0600993 u32 irq_mask;
Lina Iyer88a8fda2016-04-01 08:23:31 -0600994
995 drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
996 if (!drv)
997 return -ENOMEM;
998
Lina Iyer73101422017-02-16 14:09:25 -0700999 ret = of_property_read_u32(dn, "qcom,drv-id", &drv->drv_id);
1000 if (ret)
1001 return ret;
Lina Iyer88a8fda2016-04-01 08:23:31 -06001002
Lina Iyer73101422017-02-16 14:09:25 -07001003 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1004 if (!res)
1005 return -EINVAL;
1006 drv->base = devm_ioremap_resource(&pdev->dev, res);
Lina Iyer88a8fda2016-04-01 08:23:31 -06001007 if (IS_ERR(drv->base))
1008 return PTR_ERR(drv->base);
1009
Lina Iyer73101422017-02-16 14:09:25 -07001010 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1011 if (!res)
1012 return -EINVAL;
1013 drv->reg_base = devm_ioremap_resource(&pdev->dev, res);
Lina Iyer88a8fda2016-04-01 08:23:31 -06001014 if (IS_ERR(drv->reg_base))
1015 return PTR_ERR(drv->reg_base);
1016
1017 config = read_drv_config(drv->base);
1018 max_tcs = config & (DRV_NUM_TCS_MASK <<
1019 (DRV_NUM_TCS_SHIFT * drv->drv_id));
1020 max_tcs = max_tcs >> (DRV_NUM_TCS_SHIFT * drv->drv_id);
1021 ncpt = config & (DRV_NCPT_MASK << DRV_NCPT_SHIFT);
1022 ncpt = ncpt >> DRV_NCPT_SHIFT;
1023
1024 nelem = of_property_count_elems_of_size(dn, "qcom,tcs-config",
1025 sizeof(u32));
1026 if (!nelem || (nelem % 2) || (nelem > 2 * TCS_TYPE_NR))
1027 return -EINVAL;
1028
1029 ret = of_property_read_u32_array(dn, "qcom,tcs-config", val, nelem);
1030 if (ret)
1031 return ret;
1032
Lina Iyer73101422017-02-16 14:09:25 -07001033 /* Ensure we have exactly not more than one of each type in DT */
Lina Iyer88a8fda2016-04-01 08:23:31 -06001034 for (i = 0; i < (nelem / 2); i++) {
Lina Iyer73101422017-02-16 14:09:25 -07001035 if (val[2 * i] >= TCS_TYPE_NR)
1036 return -EINVAL;
1037 tcs_type_count[val[2 * i]]++;
1038 if (tcs_type_count[val[2 * i]] > 1)
1039 return -EINVAL;
1040 }
1041
1042 /* Ensure we have each type specified in DT */
1043 for (i = 0; i < ARRAY_SIZE(tcs_type_count); i++)
1044 if (!tcs_type_count[i])
1045 return -EINVAL;
1046
1047 for (i = 0; i < (nelem / 2); i++) {
1048 tcs = &drv->tcs[val[2 * i]];
Lina Iyer88a8fda2016-04-01 08:23:31 -06001049 tcs->drv = drv;
1050 tcs->type = val[2 * i];
1051 tcs->num_tcs = val[2 * i + 1];
1052 tcs->ncpt = (tcs->type == CONTROL_TCS) ? TCS_HIDDEN_MAX_SLOTS
1053 : ncpt;
1054 spin_lock_init(&tcs->tcs_lock);
1055
1056 if (tcs->num_tcs <= 0 || tcs->type == CONTROL_TCS)
1057 continue;
1058
1059 if (tcs->num_tcs > MAX_TCS_PER_TYPE)
1060 return -EINVAL;
1061
1062 if (st > max_tcs)
1063 return -EINVAL;
1064
1065 tcs->tcs_mask = ((1 << tcs->num_tcs) - 1) << st;
1066 tcs->tcs_offset = st;
1067 st += tcs->num_tcs;
1068
1069 tcs->cmd_addr = devm_kzalloc(&pdev->dev, sizeof(u32) *
1070 tcs->num_tcs * tcs->ncpt, GFP_KERNEL);
1071 if (!tcs->cmd_addr)
1072 return -ENOMEM;
1073
Lina Iyer88a8fda2016-04-01 08:23:31 -06001074 }
1075
1076 /* Allocate only that many channels specified in DT for our MBOX */
1077 for_each_node_with_property(np, "mboxes") {
1078 if (!of_device_is_available(np))
1079 continue;
1080 i = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
1081 for (j = 0; j < i; j++) {
1082 ret = of_parse_phandle_with_args(np, "mboxes",
1083 "#mbox-cells", j, &p);
1084 if (!ret && p.np == pdev->dev.of_node)
1085 break;
1086 }
1087 num_chans++;
1088 }
1089
1090 if (!num_chans) {
1091 pr_err("%s: No clients for controller (%s)\n", __func__,
1092 dn->full_name);
1093 return -ENODEV;
1094 }
1095
1096 chans = devm_kzalloc(&pdev->dev, num_chans * sizeof(*chans),
1097 GFP_KERNEL);
1098 if (!chans)
1099 return -ENOMEM;
1100
1101 for (i = 0; i < num_chans; i++) {
1102 chans[i].mbox = &drv->mbox;
1103 chans[i].txdone_method = TXDONE_BY_IRQ;
1104 }
1105
1106 drv->mbox.dev = &pdev->dev;
1107 drv->mbox.ops = &mbox_ops;
1108 drv->mbox.chans = chans;
1109 drv->mbox.num_chans = num_chans;
1110 drv->mbox.txdone_irq = true;
1111 drv->mbox.of_xlate = of_tcs_mbox_xlate;
Lina Iyer0d81e942016-05-26 11:18:23 -06001112 drv->mbox.is_idle = tcs_drv_is_idle;
Lina Iyer88a8fda2016-04-01 08:23:31 -06001113 drv->num_tcs = st;
1114 drv->pdev = pdev;
1115
Lina Iyer884981e2017-03-21 13:43:05 -06001116 drv->name = of_get_property(pdev->dev.of_node, "label", NULL);
1117 if (!drv->name)
1118 drv->name = dev_name(&pdev->dev);
1119
Lina Iyer88a8fda2016-04-01 08:23:31 -06001120 ret = tcs_response_pool_init(drv);
1121 if (ret)
1122 return ret;
1123
1124 irq = of_irq_get(dn, 0);
1125 if (irq < 0)
1126 return irq;
1127
Lina Iyerafcdc182017-04-19 18:31:04 -06001128 ret = devm_request_irq(&pdev->dev, irq, tcs_irq_handler,
Lina Iyer88a8fda2016-04-01 08:23:31 -06001129 IRQF_ONESHOT | IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
1130 "tcs_irq", drv);
1131 if (ret)
1132 return ret;
1133
Lina Iyer7846e212017-03-22 10:35:53 -06001134 /*
1135 * Enable interrupts for AMC TCS,
1136 * if there are no AMC TCS, use wake TCS.
1137 */
1138 irq_mask = (drv->tcs[ACTIVE_TCS].num_tcs) ?
1139 drv->tcs[ACTIVE_TCS].tcs_mask :
1140 drv->tcs[WAKE_TCS].tcs_mask;
1141 write_tcs_reg(drv->reg_base, TCS_DRV_IRQ_ENABLE, 0, 0, irq_mask);
Lina Iyer88a8fda2016-04-01 08:23:31 -06001142
Lina Iyerb68814f2017-04-14 12:49:07 -06001143 for (i = 0; i < ARRAY_SIZE(drv->tcs_in_use); i++)
1144 atomic_set(&drv->tcs_in_use[i], 0);
1145
Lina Iyer88a8fda2016-04-01 08:23:31 -06001146 ret = mbox_controller_register(&drv->mbox);
1147 if (ret)
1148 return ret;
1149
1150 pr_debug("Mailbox controller (%s, drv=%d) registered\n",
1151 dn->full_name, drv->drv_id);
1152
1153 return 0;
1154}
1155
1156static const struct of_device_id tcs_drv_match[] = {
1157 { .compatible = "qcom,tcs-drv", },
1158 { }
1159};
1160
1161static struct platform_driver tcs_mbox_driver = {
1162 .probe = tcs_drv_probe,
1163 .driver = {
1164 .name = KBUILD_MODNAME,
1165 .of_match_table = tcs_drv_match,
1166 },
1167};
1168
1169static int __init tcs_mbox_driver_init(void)
1170{
1171 return platform_driver_register(&tcs_mbox_driver);
1172}
1173arch_initcall(tcs_mbox_driver_init);