blob: 79627d0fc6735917e27d4f68c3a8fd7cb1c9976a [file] [log] [blame]
Lina Iyeraa343942017-02-01 13:38:39 -07001/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
Lina Iyerd7194ff2016-04-20 17:13:34 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/atomic.h>
Lina Iyera6cc6482017-05-02 20:59:55 -060015#include <linux/delay.h>
Lina Iyerd7194ff2016-04-20 17:13:34 -060016#include <linux/interrupt.h>
17#include <linux/kernel.h>
18#include <linux/list.h>
19#include <linux/mailbox_client.h>
20#include <linux/module.h>
21#include <linux/of.h>
22#include <linux/platform_device.h>
23#include <linux/slab.h>
24#include <linux/spinlock.h>
25#include <linux/types.h>
26#include <linux/wait.h>
27
28#include <soc/qcom/rpmh.h>
29#include <soc/qcom/tcs.h>
Lina Iyeraa343942017-02-01 13:38:39 -070030#include <soc/qcom/cmd-db.h>
Lina Iyerd7194ff2016-04-20 17:13:34 -060031
32#define RPMH_MAX_MBOXES 2
33#define RPMH_MAX_FAST_RES 32
34#define RPMH_MAX_REQ_IN_BATCH 10
35
36#define DEFINE_RPMH_MSG_ONSTACK(rc, s, q, c, name) \
37 struct rpmh_msg name = { \
38 .msg = { 0 }, \
39 .msg.state = s, \
40 .msg.is_complete = true, \
Lina Iyer781da1e2017-04-15 11:07:06 -060041 .msg.payload = name.cmd, \
42 .msg.num_payload = 0, \
43 .cmd = { { 0 } }, \
Lina Iyerd7194ff2016-04-20 17:13:34 -060044 .waitq = q, \
45 .wait_count = c, \
46 .rc = rc, \
47 .bit = -1, \
Lina Iyerd7194ff2016-04-20 17:13:34 -060048 }
49
50struct rpmh_req {
51 u32 addr;
52 u32 sleep_val;
53 u32 wake_val;
54 struct list_head list;
55};
56
57struct rpmh_msg {
58 struct tcs_mbox_msg msg;
Lina Iyer781da1e2017-04-15 11:07:06 -060059 struct tcs_cmd cmd[MAX_RPMH_PAYLOAD];
Lina Iyerd7194ff2016-04-20 17:13:34 -060060 wait_queue_head_t *waitq;
61 atomic_t *wait_count;
62 struct rpmh_client *rc;
63 int bit;
Lina Iyerd7194ff2016-04-20 17:13:34 -060064 int err; /* relay error from mbox for sync calls */
65};
66
67struct rpmh_mbox {
68 struct device_node *mbox_dn;
69 struct list_head resources;
70 spinlock_t lock;
71 struct rpmh_msg *msg_pool;
72 DECLARE_BITMAP(fast_req, RPMH_MAX_FAST_RES);
73 bool dirty;
Lina Iyera6cc6482017-05-02 20:59:55 -060074 bool in_solver_mode;
Lina Iyerd7194ff2016-04-20 17:13:34 -060075};
76
77struct rpmh_client {
78 struct device *dev;
79 struct mbox_client client;
80 struct mbox_chan *chan;
81 struct rpmh_mbox *rpmh;
82};
83
84static struct rpmh_mbox mbox_ctrlr[RPMH_MAX_MBOXES];
85DEFINE_MUTEX(rpmh_mbox_mutex);
Lina Iyeraa343942017-02-01 13:38:39 -070086bool rpmh_standalone;
Lina Iyerd7194ff2016-04-20 17:13:34 -060087
88static struct rpmh_msg *get_msg_from_pool(struct rpmh_client *rc)
89{
90 struct rpmh_mbox *rpm = rc->rpmh;
91 struct rpmh_msg *msg = NULL;
92 int pos;
Lina Iyerdb1267a2017-04-25 21:03:16 -060093 unsigned long flags;
Lina Iyerd7194ff2016-04-20 17:13:34 -060094
Lina Iyerdb1267a2017-04-25 21:03:16 -060095 spin_lock_irqsave(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -060096 pos = find_first_zero_bit(rpm->fast_req, RPMH_MAX_FAST_RES);
97 if (pos != RPMH_MAX_FAST_RES) {
98 bitmap_set(rpm->fast_req, pos, 1);
99 msg = &rpm->msg_pool[pos];
100 memset(msg, 0, sizeof(*msg));
101 msg->bit = pos;
102 msg->rc = rc;
103 }
Lina Iyerdb1267a2017-04-25 21:03:16 -0600104 spin_unlock_irqrestore(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600105
106 return msg;
107}
108
Lina Iyerd7194ff2016-04-20 17:13:34 -0600109static void rpmh_rx_cb(struct mbox_client *cl, void *msg)
110{
111 struct rpmh_msg *rpm_msg = container_of(msg, struct rpmh_msg, msg);
112
113 atomic_dec(rpm_msg->wait_count);
Lina Iyere43901b2017-04-20 23:00:12 -0600114 wake_up(rpm_msg->waitq);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600115}
116
117static void rpmh_tx_done(struct mbox_client *cl, void *msg, int r)
118{
119 struct rpmh_msg *rpm_msg = container_of(msg, struct rpmh_msg, msg);
120 struct rpmh_mbox *rpm = rpm_msg->rc->rpmh;
121 atomic_t *wc = rpm_msg->wait_count;
122 wait_queue_head_t *waitq = rpm_msg->waitq;
Lina Iyerdb1267a2017-04-25 21:03:16 -0600123 unsigned long flags;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600124
125 rpm_msg->err = r;
126
127 if (r) {
128 dev_err(rpm_msg->rc->dev,
129 "RPMH TX fail in msg addr 0x%x, err=%d\n",
130 rpm_msg->msg.payload[0].addr, r);
131 /*
132 * If we fail TX for a read, call then we won't get
133 * a rx_callback. Force a rx_cb.
134 */
135 if (rpm_msg->msg.is_read)
136 rpmh_rx_cb(cl, msg);
137 }
138
139 /*
140 * Copy the child object pointers before freeing up the parent,
141 * This way even if the parent (rpm_msg) object gets reused, we
Lina Iyer781da1e2017-04-15 11:07:06 -0600142 * can free up the child objects (wq/wc) parallely.
Lina Iyerd7194ff2016-04-20 17:13:34 -0600143 * If you free up the children before the parent, then we run
144 * into an issue that the stack allocated parent object may be
145 * invalid before we can check the ->bit value.
146 */
147
148 /* If we allocated the pool, set it as available */
149 if (rpm_msg->bit >= 0 && rpm_msg->bit != RPMH_MAX_FAST_RES) {
Lina Iyerdb1267a2017-04-25 21:03:16 -0600150 spin_lock_irqsave(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600151 bitmap_clear(rpm->fast_req, rpm_msg->bit, 1);
Lina Iyerdb1267a2017-04-25 21:03:16 -0600152 spin_unlock_irqrestore(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600153 }
154
Lina Iyerd7194ff2016-04-20 17:13:34 -0600155 /* Signal the blocking thread we are done */
Lina Iyer948bd822017-05-16 11:53:55 -0600156 if (wc && atomic_dec_and_test(wc))
157 if (waitq)
158 wake_up(waitq);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600159}
160
161static struct rpmh_req *__find_req(struct rpmh_client *rc, u32 addr)
162{
163 struct rpmh_req *p, *req = NULL;
164
165 list_for_each_entry(p, &rc->rpmh->resources, list) {
166 if (p->addr == addr) {
167 req = p;
168 break;
169 }
170 }
171
172 return req;
173}
174
175static struct rpmh_req *cache_rpm_request(struct rpmh_client *rc,
176 enum rpmh_state state, struct tcs_cmd *cmd)
177{
178 struct rpmh_req *req;
179 struct rpmh_mbox *rpm = rc->rpmh;
Lina Iyerdb1267a2017-04-25 21:03:16 -0600180 unsigned long flags;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600181
Lina Iyerdb1267a2017-04-25 21:03:16 -0600182 spin_lock_irqsave(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600183 req = __find_req(rc, cmd->addr);
184 if (req)
185 goto existing;
186
187 req = kzalloc(sizeof(*req), GFP_ATOMIC);
188 if (!req) {
189 req = ERR_PTR(-ENOMEM);
190 goto unlock;
191 }
192
193 req->addr = cmd->addr;
194 req->sleep_val = req->wake_val = UINT_MAX;
195 INIT_LIST_HEAD(&req->list);
196 list_add_tail(&req->list, &rpm->resources);
197
198existing:
199 switch (state) {
200 case RPMH_ACTIVE_ONLY_STATE:
201 case RPMH_AWAKE_STATE:
202 if (req->sleep_val != UINT_MAX)
203 req->wake_val = cmd->data;
204 break;
205 case RPMH_WAKE_ONLY_STATE:
206 req->wake_val = cmd->data;
207 break;
208 case RPMH_SLEEP_STATE:
209 req->sleep_val = cmd->data;
210 break;
211 default:
212 break;
213 };
214
215unlock:
216 rpm->dirty = true;
Lina Iyerdb1267a2017-04-25 21:03:16 -0600217 spin_unlock_irqrestore(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600218
219 return req;
220}
221
222/**
223 * __rpmh_write: Cache and send the RPMH request
224 *
225 * @rc: The RPMH client
226 * @state: Active/Sleep request type
227 * @rpm_msg: The data that needs to be sent (payload).
228 *
229 * Cache the RPMH request and send if the state is ACTIVE_ONLY.
230 * SLEEP/WAKE_ONLY requests are not sent to the controller at
231 * this time. Use rpmh_flush() to send them to the controller.
232 */
233int __rpmh_write(struct rpmh_client *rc, enum rpmh_state state,
234 struct rpmh_msg *rpm_msg)
235{
236 struct rpmh_req *req;
237 int ret = 0;
238 int i;
239
Lina Iyerd7194ff2016-04-20 17:13:34 -0600240 /* Cache the request in our store and link the payload */
241 for (i = 0; i < rpm_msg->msg.num_payload; i++) {
242 req = cache_rpm_request(rc, state, &rpm_msg->msg.payload[i]);
243 if (IS_ERR(req))
244 return PTR_ERR(req);
245 }
246
247 rpm_msg->msg.state = state;
248
Lina Iyer9ed8f312016-09-22 11:09:59 -0600249 /* Send to mailbox only if active or awake */
250 if (state == RPMH_ACTIVE_ONLY_STATE || state == RPMH_AWAKE_STATE) {
Lina Iyerd7194ff2016-04-20 17:13:34 -0600251 ret = mbox_send_message(rc->chan, &rpm_msg->msg);
252 if (ret > 0)
253 ret = 0;
Lina Iyer31bbb3d2017-04-13 17:49:53 -0600254 } else {
255 /* Clean up our call by spoofing tx_done */
256 rpmh_tx_done(&rc->client, &rpm_msg->msg, ret);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600257 }
258
259 return ret;
260}
261
262/**
263 * rpmh_write_single_async: Write a single RPMH command
264 *
265 * @rc: The RPMh handle got from rpmh_get_dev_channel
266 * @state: Active/sleep set
267 * @addr: The ePCB address
268 * @data: The data
269 *
270 * Write a single value in fast-path. Fire and forget.
271 * May be called from atomic contexts.
272 */
273int rpmh_write_single_async(struct rpmh_client *rc, enum rpmh_state state,
274 u32 addr, u32 data)
275{
276 struct rpmh_msg *rpm_msg;
277
278 if (IS_ERR_OR_NULL(rc))
279 return -EINVAL;
280
Lina Iyeraa343942017-02-01 13:38:39 -0700281 if (rpmh_standalone)
282 return 0;
283
Lina Iyerd7194ff2016-04-20 17:13:34 -0600284 rpm_msg = get_msg_from_pool(rc);
285 if (!rpm_msg)
286 return -ENOMEM;
287
Lina Iyer781da1e2017-04-15 11:07:06 -0600288 rpm_msg->cmd[0].addr = addr;
289 rpm_msg->cmd[0].data = data;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600290
Lina Iyer781da1e2017-04-15 11:07:06 -0600291 rpm_msg->msg.payload = rpm_msg->cmd;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600292 rpm_msg->msg.num_payload = 1;
293
294 return __rpmh_write(rc, state, rpm_msg);
295}
296EXPORT_SYMBOL(rpmh_write_single_async);
297
298/**
299 * rpmh_write_single: Write a single RPMH command and
300 * wait for completion of the command.
301 *
302 * @rc: The RPMh handle got from rpmh_get_dev_channel
303 * @state: Active/sleep set
304 * @addr: The ePCB address
305 * @offset: Offset of the resource
306 * @data: The data
307 *
308 * Write a single value in slow-path and wait for the request to be
309 * complete. Blocks until the request is completed on the accelerator.
310 * Do not call from atomic contexts.
311 */
312int rpmh_write_single(struct rpmh_client *rc, enum rpmh_state state,
313 u32 addr, u32 data)
314{
315 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
316 atomic_t wait_count = ATOMIC_INIT(1);
317 DEFINE_RPMH_MSG_ONSTACK(rc, state, &waitq, &wait_count, rpm_msg);
318 int ret;
319
320 if (IS_ERR_OR_NULL(rc))
321 return -EINVAL;
322
323 might_sleep();
324
Lina Iyeraa343942017-02-01 13:38:39 -0700325 if (rpmh_standalone)
326 return 0;
327
Lina Iyer781da1e2017-04-15 11:07:06 -0600328 rpm_msg.cmd[0].addr = addr;
329 rpm_msg.cmd[0].data = data;
330 rpm_msg.msg.num_payload = 1;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600331
332 ret = __rpmh_write(rc, state, &rpm_msg);
333 if (ret < 0)
334 return ret;
335
Lina Iyere43901b2017-04-20 23:00:12 -0600336 wait_event(waitq, atomic_read(&wait_count) == 0);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600337
338 return rpm_msg.err;
339}
340EXPORT_SYMBOL(rpmh_write_single);
341
342struct rpmh_msg *__get_rpmh_msg_async(struct rpmh_client *rc,
Lina Iyer781da1e2017-04-15 11:07:06 -0600343 enum rpmh_state state, struct tcs_cmd *cmd, int n)
Lina Iyerd7194ff2016-04-20 17:13:34 -0600344{
345 struct rpmh_msg *rpm_msg;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600346
347 if (IS_ERR_OR_NULL(rc) || !cmd || n <= 0 || n > MAX_RPMH_PAYLOAD)
348 return ERR_PTR(-EINVAL);
349
Lina Iyerd7194ff2016-04-20 17:13:34 -0600350 rpm_msg = get_msg_from_pool(rc);
Lina Iyer781da1e2017-04-15 11:07:06 -0600351 if (!rpm_msg)
Lina Iyerd7194ff2016-04-20 17:13:34 -0600352 return ERR_PTR(-ENOMEM);
Lina Iyer781da1e2017-04-15 11:07:06 -0600353
354 memcpy(rpm_msg->cmd, cmd, n * sizeof(*cmd));
Lina Iyerd7194ff2016-04-20 17:13:34 -0600355
356 rpm_msg->msg.state = state;
Lina Iyer781da1e2017-04-15 11:07:06 -0600357 rpm_msg->msg.payload = rpm_msg->cmd;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600358 rpm_msg->msg.num_payload = n;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600359
360 return rpm_msg;
361}
362
363/**
364 * rpmh_write_async: Write a batch of RPMH commands
365 *
366 * @rc: The RPMh handle got from rpmh_get_dev_channel
367 * @state: Active/sleep set
368 * @cmd: The payload data
369 * @n: The number of elements in payload
370 *
371 * Write a batch of RPMH commands, the order of commands is maintained
372 * and will be sent as a single shot. By default the entire set of commands
373 * are considered active only (i.e, will not be cached in wake set, unless
374 * all of them have their corresponding sleep requests).
375 */
376int rpmh_write_async(struct rpmh_client *rc, enum rpmh_state state,
377 struct tcs_cmd *cmd, int n)
378{
Lina Iyeraa343942017-02-01 13:38:39 -0700379 struct rpmh_msg *rpm_msg;
380
381 if (rpmh_standalone)
382 return 0;
383
Lina Iyer781da1e2017-04-15 11:07:06 -0600384 rpm_msg = __get_rpmh_msg_async(rc, state, cmd, n);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600385 if (IS_ERR(rpm_msg))
386 return PTR_ERR(rpm_msg);
387
388 return __rpmh_write(rc, state, rpm_msg);
389}
390EXPORT_SYMBOL(rpmh_write_async);
391
392/**
393 * rpmh_write: Write a batch of RPMH commands
394 *
395 * @rc: The RPMh handle got from rpmh_get_dev_channel
396 * @state: Active/sleep set
397 * @cmd: The payload data
398 * @n: The number of elements in payload
399 *
400 * Write a batch of RPMH commands, the order of commands is maintained
401 * and will be sent as a single shot. By default the entire set of commands
402 * are considered active only (i.e, will not be cached in wake set, unless
403 * all of them have their corresponding sleep requests). All requests are
404 * sent as slow path requests.
405 *
406 * May sleep. Do not call from atomic contexts.
407 */
408int rpmh_write(struct rpmh_client *rc, enum rpmh_state state,
409 struct tcs_cmd *cmd, int n)
410{
411 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
412 atomic_t wait_count = ATOMIC_INIT(1);
413 DEFINE_RPMH_MSG_ONSTACK(rc, state, &waitq, &wait_count, rpm_msg);
414 int ret;
415
416 if (IS_ERR_OR_NULL(rc) || !cmd || n <= 0 || n > MAX_RPMH_PAYLOAD)
417 return -EINVAL;
418
419 might_sleep();
420
Lina Iyeraa343942017-02-01 13:38:39 -0700421 if (rpmh_standalone)
422 return 0;
423
Lina Iyer781da1e2017-04-15 11:07:06 -0600424 memcpy(rpm_msg.cmd, cmd, n * sizeof(*cmd));
Lina Iyerd7194ff2016-04-20 17:13:34 -0600425 rpm_msg.msg.num_payload = n;
426
427 ret = __rpmh_write(rc, state, &rpm_msg);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600428 if (ret)
429 return ret;
430
Lina Iyere43901b2017-04-20 23:00:12 -0600431 wait_event(waitq, atomic_read(&wait_count) == 0);
432
Lina Iyerd7194ff2016-04-20 17:13:34 -0600433 return rpm_msg.err;
434}
435EXPORT_SYMBOL(rpmh_write);
436
437/**
438 * rpmh_write_passthru: Write multiple batches of RPMH commands without caching
439 *
440 * @rc: The RPMh handle got from rpmh_get_dev_channel
441 * @state: Active/sleep set
442 * @cmd: The payload data
443 * @n: The array of count of elements in each batch, 0 terminated.
444 *
445 * Write a request to the mailbox controller without caching. If the request
Lina Iyer7846e212017-03-22 10:35:53 -0600446 * state is ACTIVE or AWAKE, then the requests are treated as completion request
Lina Iyerd7194ff2016-04-20 17:13:34 -0600447 * and sent to the controller immediately. The function waits until all the
448 * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the
449 * request is sent as fire-n-forget and no ack is expected.
450 *
451 * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests.
452 */
453int rpmh_write_passthru(struct rpmh_client *rc, enum rpmh_state state,
454 struct tcs_cmd *cmd, int *n)
455{
456 struct rpmh_msg *rpm_msg[RPMH_MAX_REQ_IN_BATCH];
457 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
458 atomic_t wait_count = ATOMIC_INIT(0); /* overwritten */
459 int count = 0;
Lina Iyer7846e212017-03-22 10:35:53 -0600460 int ret, i, j, k;
461 bool complete_set;
Lina Iyera6cc6482017-05-02 20:59:55 -0600462 unsigned long flags;
463 struct rpmh_mbox *rpm;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600464
Lina Iyer1aa5d272017-05-16 08:55:07 -0600465 if (IS_ERR_OR_NULL(rc) || !cmd || !n)
466 return -EINVAL;
467
Lina Iyeraa343942017-02-01 13:38:39 -0700468 if (rpmh_standalone)
469 return 0;
470
Lina Iyera6cc6482017-05-02 20:59:55 -0600471 /* Do not allow setting wake votes when in solver mode */
472 rpm = rc->rpmh;
473 spin_lock_irqsave(&rpm->lock, flags);
474 if (rpm->in_solver_mode && state == RPMH_WAKE_ONLY_STATE) {
475 spin_unlock_irqrestore(&rpm->lock, flags);
476 return -EIO;
477 }
478 spin_unlock_irqrestore(&rpm->lock, flags);
479
Lina Iyerd7194ff2016-04-20 17:13:34 -0600480 while (n[count++])
481 ;
482 count--;
Lina Iyer1aa5d272017-05-16 08:55:07 -0600483 if (!count || count >= RPMH_MAX_REQ_IN_BATCH)
Lina Iyerd7194ff2016-04-20 17:13:34 -0600484 return -EINVAL;
485
Lina Iyer7846e212017-03-22 10:35:53 -0600486 if (state == RPMH_ACTIVE_ONLY_STATE || state == RPMH_AWAKE_STATE) {
487 /*
488 * Ensure the 'complete' bit is set for atleast one command in
489 * each set for active/awake requests.
490 */
491 for (i = 0, k = 0; i < count; i++, k += n[i]) {
492 complete_set = false;
493 for (j = 0; j < n[i]; j++) {
494 if (cmd[k + j].complete) {
495 complete_set = true;
496 break;
497 }
498 }
499 if (!complete_set) {
500 dev_err(rc->dev, "No completion set for batch");
501 return -EINVAL;
502 }
503 }
504 }
505
506 /* Create async request batches */
Lina Iyerd7194ff2016-04-20 17:13:34 -0600507 for (i = 0; i < count; i++) {
Lina Iyer781da1e2017-04-15 11:07:06 -0600508 rpm_msg[i] = __get_rpmh_msg_async(rc, state, cmd, n[i]);
Lina Iyerec57adc2017-05-03 15:29:49 -0600509 if (IS_ERR_OR_NULL(rpm_msg[i])) {
510 /* Clean up our call by spoofing tx_done */
511 for (j = 0 ; j < i; j++)
512 rpmh_tx_done(&rc->client, &rpm_msg[j]->msg, 0);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600513 return PTR_ERR(rpm_msg[i]);
Lina Iyerec57adc2017-05-03 15:29:49 -0600514 }
Lina Iyerd7194ff2016-04-20 17:13:34 -0600515 cmd += n[i];
516 }
517
Lina Iyer7846e212017-03-22 10:35:53 -0600518 /* Send if Active or Awake and wait for the whole set to complete */
519 if (state == RPMH_ACTIVE_ONLY_STATE || state == RPMH_AWAKE_STATE) {
Lina Iyerd7194ff2016-04-20 17:13:34 -0600520 might_sleep();
521 atomic_set(&wait_count, count);
522 for (i = 0; i < count; i++) {
Lina Iyercb6b7432017-04-19 20:52:38 -0600523 rpm_msg[i]->waitq = &waitq;
524 rpm_msg[i]->wait_count = &wait_count;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600525 /* Bypass caching and write to mailbox directly */
526 ret = mbox_send_message(rc->chan, &rpm_msg[i]->msg);
Lina Iyerec57adc2017-05-03 15:29:49 -0600527 if (ret < 0) {
528 pr_err("Error(%d) sending RPM message addr=0x%x\n",
529 ret, rpm_msg[i]->msg.payload[0].addr);
530 break;
531 }
Lina Iyerd7194ff2016-04-20 17:13:34 -0600532 }
Lina Iyerec57adc2017-05-03 15:29:49 -0600533 wait_event(waitq, atomic_read(&wait_count) == (count - i));
Lina Iyerd7194ff2016-04-20 17:13:34 -0600534 } else {
Lina Iyer7846e212017-03-22 10:35:53 -0600535 /* Send Sleep requests to the controller, expect no response */
Lina Iyerd7194ff2016-04-20 17:13:34 -0600536 for (i = 0; i < count; i++) {
Lina Iyercb6b7432017-04-19 20:52:38 -0600537 rpm_msg[i]->waitq = NULL;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600538 ret = mbox_send_controller_data(rc->chan,
539 &rpm_msg[i]->msg);
540 /* Clean up our call by spoofing tx_done */
541 rpmh_tx_done(&rc->client, &rpm_msg[i]->msg, ret);
542 }
543 return 0;
544 }
Lina Iyere43901b2017-04-20 23:00:12 -0600545
546 return 0;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600547}
548EXPORT_SYMBOL(rpmh_write_passthru);
549
550/**
Lina Iyera6cc6482017-05-02 20:59:55 -0600551 * rpmh_mode_solver_set: Indicate that the RSC controller hardware has
552 * been configured to be in solver mode
553 *
554 * @rc: The RPMH handle
555 * @enable: Boolean value indicating if the controller is in solver mode.
556 *
557 * When solver mode is enabled, passthru API will not be able to send wake
558 * votes, just awake and active votes.
559 */
560int rpmh_mode_solver_set(struct rpmh_client *rc, bool enable)
561{
562 struct rpmh_mbox *rpm;
563 unsigned long flags;
564
565 if (IS_ERR_OR_NULL(rc))
566 return -EINVAL;
567
568 if (rpmh_standalone)
569 return 0;
570
571 rpm = rc->rpmh;
572 do {
573 spin_lock_irqsave(&rpm->lock, flags);
574 if (mbox_controller_is_idle(rc->chan)) {
575 rpm->in_solver_mode = enable;
576 spin_unlock_irqrestore(&rpm->lock, flags);
577 break;
578 }
579 spin_unlock_irqrestore(&rpm->lock, flags);
580 udelay(10);
581 } while (1);
582
583 return 0;
584}
585EXPORT_SYMBOL(rpmh_mode_solver_set);
586
587/**
Lina Iyerd7194ff2016-04-20 17:13:34 -0600588 * rpmh_write_control: Write async control commands to the controller
589 *
590 * @rc: The RPMh handle got from rpmh_get_dev_channel
591 * @cmd: The payload data
592 * @n: The number of elements in payload
593 *
594 * Write control commands to the controller. The messages are always sent
595 * async.
596 *
597 * May be called from atomic contexts.
598 */
599int rpmh_write_control(struct rpmh_client *rc, struct tcs_cmd *cmd, int n)
600{
601 DEFINE_RPMH_MSG_ONSTACK(rc, 0, NULL, NULL, rpm_msg);
602
Lina Iyer781da1e2017-04-15 11:07:06 -0600603 if (IS_ERR_OR_NULL(rc) || n > MAX_RPMH_PAYLOAD)
Lina Iyerd7194ff2016-04-20 17:13:34 -0600604 return -EINVAL;
605
Lina Iyeraa343942017-02-01 13:38:39 -0700606 if (rpmh_standalone)
607 return 0;
608
Lina Iyer781da1e2017-04-15 11:07:06 -0600609 memcpy(rpm_msg.cmd, cmd, n * sizeof(*cmd));
Lina Iyerd7194ff2016-04-20 17:13:34 -0600610 rpm_msg.msg.num_payload = n;
611 rpm_msg.msg.is_control = true;
Lina Iyer781da1e2017-04-15 11:07:06 -0600612 rpm_msg.msg.is_complete = false;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600613
614 return mbox_send_controller_data(rc->chan, &rpm_msg.msg);
615}
616EXPORT_SYMBOL(rpmh_write_control);
617
618/**
619 * rpmh_invalidate: Invalidate all sleep and active sets
620 * sets.
621 *
622 * @rc: The RPMh handle got from rpmh_get_dev_channel
623 *
624 * Invalidate the sleep and active values in the TCS blocks.
625 * Nothing to do here.
626 */
627int rpmh_invalidate(struct rpmh_client *rc)
628{
629 DEFINE_RPMH_MSG_ONSTACK(rc, 0, NULL, NULL, rpm_msg);
Kyle Piefer95712442017-03-15 14:10:19 -0700630 struct rpmh_mbox *rpm;
Lina Iyerdb1267a2017-04-25 21:03:16 -0600631 unsigned long flags;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600632
633 if (IS_ERR_OR_NULL(rc))
634 return -EINVAL;
635
Lina Iyeraa343942017-02-01 13:38:39 -0700636 if (rpmh_standalone)
637 return 0;
638
Kyle Piefer95712442017-03-15 14:10:19 -0700639 rpm = rc->rpmh;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600640 rpm_msg.msg.invalidate = true;
Lina Iyer781da1e2017-04-15 11:07:06 -0600641 rpm_msg.msg.is_complete = false;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600642
Lina Iyerdb1267a2017-04-25 21:03:16 -0600643 spin_lock_irqsave(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600644 rpm->dirty = true;
Lina Iyerdb1267a2017-04-25 21:03:16 -0600645 spin_unlock_irqrestore(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600646
647 return mbox_send_controller_data(rc->chan, &rpm_msg.msg);
648}
649EXPORT_SYMBOL(rpmh_invalidate);
650
651/**
652 * rpmh_read: Read a resource value
653 *
654 * @rc: The RPMh handle got from rpmh_get_dev_channel
655 * @addr: The ePCB address
656 * @resp: The store for the response received from RPMH
657 *
658 * Read a resource value from RPMH.
659 */
660int rpmh_read(struct rpmh_client *rc, u32 addr, u32 *resp)
661{
662 int ret;
663 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
664 atomic_t wait_count = ATOMIC_INIT(2); /* wait for rx_cb and tx_done */
665 DEFINE_RPMH_MSG_ONSTACK(rc, RPMH_ACTIVE_ONLY_STATE,
666 &waitq, &wait_count, rpm_msg);
667
668 if (IS_ERR_OR_NULL(rc) || !resp)
669 return -EINVAL;
670
671 might_sleep();
672
Lina Iyeraa343942017-02-01 13:38:39 -0700673 if (rpmh_standalone)
674 return 0;
675
Lina Iyer781da1e2017-04-15 11:07:06 -0600676 rpm_msg.cmd[0].addr = addr;
677 rpm_msg.cmd[0].data = 0;
678 rpm_msg.msg.num_payload = 1;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600679
680 rpm_msg.msg.is_read = true;
681
682 ret = mbox_send_message(rc->chan, &rpm_msg.msg);
683 if (ret < 0)
684 return ret;
685
686 /* Wait until the response is received from RPMH */
Lina Iyere43901b2017-04-20 23:00:12 -0600687 wait_event(waitq, atomic_read(&wait_count) == 0);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600688
689 /* Read the data back from the tcs_mbox_msg structrure */
Lina Iyer781da1e2017-04-15 11:07:06 -0600690 *resp = rpm_msg.cmd[0].data;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600691
692 return rpm_msg.err;
693}
694EXPORT_SYMBOL(rpmh_read);
695
Lina Iyer17a5aa02017-03-14 14:18:50 -0600696static inline int is_req_valid(struct rpmh_req *req)
697{
698 return (req->sleep_val != UINT_MAX && req->wake_val != UINT_MAX
699 && req->sleep_val != req->wake_val);
700}
701
Lina Iyerd7194ff2016-04-20 17:13:34 -0600702int send_single(struct rpmh_client *rc, enum rpmh_state state, u32 addr,
703 u32 data)
704{
705 DEFINE_RPMH_MSG_ONSTACK(rc, state, NULL, NULL, rpm_msg);
706
Lina Iyer50614ce2017-02-27 15:53:23 -0700707 /* Wake sets are always complete and sleep sets are not */
708 rpm_msg.msg.is_complete = (state == RPMH_WAKE_ONLY_STATE);
Lina Iyer781da1e2017-04-15 11:07:06 -0600709 rpm_msg.cmd[0].addr = addr;
710 rpm_msg.cmd[0].data = data;
711 rpm_msg.msg.num_payload = 1;
712 rpm_msg.msg.is_complete = false;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600713
714 return mbox_send_controller_data(rc->chan, &rpm_msg.msg);
715}
716
717/**
718 * rpmh_flush: Flushes the buffered active and sleep sets to TCS
719 *
720 * @rc: The RPMh handle got from rpmh_get_dev_channel
721 *
722 * This function is generally called from the sleep code from the last CPU
723 * that is powering down the entire system.
724 *
725 * Returns -EBUSY if the controller is busy, probably waiting on a response
726 * to a RPMH request sent earlier.
727 */
728int rpmh_flush(struct rpmh_client *rc)
729{
730 struct rpmh_req *p;
731 struct rpmh_mbox *rpm = rc->rpmh;
732 int ret;
Lina Iyerdb1267a2017-04-25 21:03:16 -0600733 unsigned long flags;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600734
735 if (IS_ERR_OR_NULL(rc))
736 return -EINVAL;
737
Lina Iyeraa343942017-02-01 13:38:39 -0700738 if (rpmh_standalone)
739 return 0;
740
Lina Iyerd7194ff2016-04-20 17:13:34 -0600741 if (!mbox_controller_is_idle(rc->chan))
742 return -EBUSY;
743
Lina Iyerdb1267a2017-04-25 21:03:16 -0600744 spin_lock_irqsave(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600745 if (!rpm->dirty) {
Lina Iyer20646542017-03-01 12:34:47 -0700746 pr_debug("Skipping flush, TCS has latest data.\n");
Lina Iyerdb1267a2017-04-25 21:03:16 -0600747 spin_unlock_irqrestore(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600748 return 0;
749 }
Lina Iyerdb1267a2017-04-25 21:03:16 -0600750 spin_unlock_irqrestore(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600751
752 /*
753 * Nobody else should be calling this function other than sleep,
754 * hence we can run without locks.
755 */
756 list_for_each_entry(p, &rc->rpmh->resources, list) {
Lina Iyer17a5aa02017-03-14 14:18:50 -0600757 if (!is_req_valid(p)) {
758 pr_debug("%s: skipping RPMH req: a:0x%x s:0x%x w:0x%x",
759 __func__, p->addr, p->sleep_val, p->wake_val);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600760 continue;
Lina Iyer17a5aa02017-03-14 14:18:50 -0600761 }
Lina Iyerd7194ff2016-04-20 17:13:34 -0600762 ret = send_single(rc, RPMH_SLEEP_STATE, p->addr, p->sleep_val);
763 if (ret)
764 return ret;
765 ret = send_single(rc, RPMH_WAKE_ONLY_STATE, p->addr,
766 p->wake_val);
767 if (ret)
768 return ret;
769 }
770
Lina Iyerdb1267a2017-04-25 21:03:16 -0600771 spin_lock_irqsave(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600772 rpm->dirty = false;
Lina Iyerdb1267a2017-04-25 21:03:16 -0600773 spin_unlock_irqrestore(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600774
775 return 0;
776}
777EXPORT_SYMBOL(rpmh_flush);
778
779/**
780 * get_mbox: Get the MBOX controller
781 * @pdev: the platform device
782 * @name: the MBOX name as specified in DT for the device.
783 * @index: the index in the mboxes property if name is not provided.
784 *
785 * Get the MBOX Device node. We will use that to know which
786 * MBOX controller this platform device is intending to talk
787 * to.
788 */
789static struct rpmh_mbox *get_mbox(struct platform_device *pdev,
790 const char *name, int index)
791{
792 int i;
793 struct property *prop;
794 struct of_phandle_args spec;
795 const char *mbox_name;
796 struct rpmh_mbox *rpmh;
797
798 if (index < 0) {
799 if (!name || !name[0])
800 return ERR_PTR(-EINVAL);
801 index = 0;
802 of_property_for_each_string(pdev->dev.of_node,
803 "mbox-names", prop, mbox_name) {
804 if (!strcmp(name, mbox_name))
805 break;
806 index++;
807 }
808 }
809
810 if (of_parse_phandle_with_args(pdev->dev.of_node, "mboxes",
811 "#mbox-cells", index, &spec)) {
812 dev_dbg(&pdev->dev, "%s: can't parse mboxes property\n",
813 __func__);
814 return ERR_PTR(-ENODEV);
815 }
816
817 for (i = 0; i < RPMH_MAX_MBOXES; i++)
818 if (mbox_ctrlr[i].mbox_dn == spec.np) {
819 rpmh = &mbox_ctrlr[i];
820 goto found;
821 }
822
823 /* A new MBOX */
824 for (i = 0; i < RPMH_MAX_MBOXES; i++)
825 if (!mbox_ctrlr[i].mbox_dn)
826 break;
827
828 /* More controllers than expected - not recoverable */
829 WARN_ON(i == RPMH_MAX_MBOXES);
830
831 rpmh = &mbox_ctrlr[i];
832
833 rpmh->msg_pool = kzalloc(sizeof(struct rpmh_msg) *
834 RPMH_MAX_FAST_RES, GFP_KERNEL);
835 if (!rpmh->msg_pool)
836 return ERR_PTR(-ENOMEM);
837
838 rpmh->mbox_dn = spec.np;
839 INIT_LIST_HEAD(&rpmh->resources);
840 spin_lock_init(&rpmh->lock);
841
842found:
843 of_node_put(spec.np);
844
845 return rpmh;
846}
847
848static struct rpmh_client *get_rpmh_client(struct platform_device *pdev,
849 const char *name, int index)
850{
851 struct rpmh_client *rc;
852 int ret = 0;
853
Lina Iyeraa343942017-02-01 13:38:39 -0700854 ret = cmd_db_ready();
855 if (ret)
856 return ERR_PTR(ret);
857
Lina Iyerd7194ff2016-04-20 17:13:34 -0600858 rc = kzalloc(sizeof(*rc), GFP_KERNEL);
859 if (!rc)
860 return ERR_PTR(-ENOMEM);
861
862 rc->client.rx_callback = rpmh_rx_cb;
863 rc->client.tx_prepare = NULL;
864 rc->client.tx_done = rpmh_tx_done;
865 rc->client.tx_block = false;
866 rc->client.knows_txdone = false;
867 rc->client.dev = &pdev->dev;
868 rc->dev = &pdev->dev;
869
870 rc->chan = ERR_PTR(-EINVAL);
871
872 /* Initialize by index or name, whichever is present */
873 if (index >= 0)
874 rc->chan = mbox_request_channel(&rc->client, index);
875 else if (name)
876 rc->chan = mbox_request_channel_byname(&rc->client, name);
877
878 if (IS_ERR_OR_NULL(rc->chan)) {
879 ret = PTR_ERR(rc->chan);
880 goto cleanup;
881 }
882
883 mutex_lock(&rpmh_mbox_mutex);
884 rc->rpmh = get_mbox(pdev, name, index);
Lina Iyeraa343942017-02-01 13:38:39 -0700885 rpmh_standalone = (cmd_db_is_standalone() > 0);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600886 mutex_unlock(&rpmh_mbox_mutex);
887
888 if (IS_ERR(rc->rpmh)) {
889 ret = PTR_ERR(rc->rpmh);
890 mbox_free_channel(rc->chan);
891 goto cleanup;
892 }
893
894 return rc;
895
896cleanup:
897 kfree(rc);
898 return ERR_PTR(ret);
899}
900
901/**
902 * rpmh_get_byname: Get the RPMh handle by mbox name
903 *
904 * @pdev: the platform device which needs to communicate with RPM
905 * accelerators
906 * @name: The mbox-name assigned to the client's mailbox handle
907 *
908 * May sleep.
909 */
910struct rpmh_client *rpmh_get_byname(struct platform_device *pdev,
911 const char *name)
912{
913 return get_rpmh_client(pdev, name, -1);
914}
915EXPORT_SYMBOL(rpmh_get_byname);
916
917/**
918 * rpmh_get_byindex: Get the RPMh handle by mbox index
919 *
920 * @pdev: the platform device which needs to communicate with RPM
921 * accelerators
922 * @index : The index of the mbox tuple as specified in order in DT
923 *
924 * May sleep.
925 */
926struct rpmh_client *rpmh_get_byindex(struct platform_device *pdev,
927 int index)
928{
929 return get_rpmh_client(pdev, NULL, index);
930}
931EXPORT_SYMBOL(rpmh_get_byindex);
932
933/**
934 * rpmh_release: Release the RPMH client
935 *
936 * @rc: The RPMh handle to be freed.
937 */
938void rpmh_release(struct rpmh_client *rc)
939{
940 if (rc && !IS_ERR_OR_NULL(rc->chan))
941 mbox_free_channel(rc->chan);
942
943 kfree(rc);
944}
945EXPORT_SYMBOL(rpmh_release);