blob: 306510f700d678f34b8b90995ed1b5c69d7e2f2a [file] [log] [blame]
Lina Iyeraa343942017-02-01 13:38:39 -07001/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
Lina Iyerd7194ff2016-04-20 17:13:34 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/atomic.h>
Lina Iyera6cc6482017-05-02 20:59:55 -060015#include <linux/delay.h>
Lina Iyerd7194ff2016-04-20 17:13:34 -060016#include <linux/interrupt.h>
17#include <linux/kernel.h>
18#include <linux/list.h>
19#include <linux/mailbox_client.h>
20#include <linux/module.h>
21#include <linux/of.h>
22#include <linux/platform_device.h>
23#include <linux/slab.h>
24#include <linux/spinlock.h>
25#include <linux/types.h>
26#include <linux/wait.h>
27
28#include <soc/qcom/rpmh.h>
29#include <soc/qcom/tcs.h>
Lina Iyeraa343942017-02-01 13:38:39 -070030#include <soc/qcom/cmd-db.h>
Lina Iyerd7194ff2016-04-20 17:13:34 -060031
32#define RPMH_MAX_MBOXES 2
33#define RPMH_MAX_FAST_RES 32
34#define RPMH_MAX_REQ_IN_BATCH 10
35
36#define DEFINE_RPMH_MSG_ONSTACK(rc, s, q, c, name) \
Lina Iyer71dce402017-06-02 20:44:19 -060037 struct rpmh_msg name = { \
38 .msg = { \
39 .state = s, \
40 .payload = name.cmd, \
41 .num_payload = 0, \
42 .is_read = false, \
43 .is_control = false, \
44 .is_complete = true, \
45 .invalidate = false, \
46 }, \
47 .cmd = { { 0 } }, \
48 .completion = q, \
49 .wait_count = c, \
50 .rc = rc, \
51 .bit = -1, \
Lina Iyerd7194ff2016-04-20 17:13:34 -060052 }
53
54struct rpmh_req {
55 u32 addr;
56 u32 sleep_val;
57 u32 wake_val;
58 struct list_head list;
59};
60
61struct rpmh_msg {
62 struct tcs_mbox_msg msg;
Lina Iyer781da1e2017-04-15 11:07:06 -060063 struct tcs_cmd cmd[MAX_RPMH_PAYLOAD];
Lina Iyer71dce402017-06-02 20:44:19 -060064 struct completion *completion;
Lina Iyerd7194ff2016-04-20 17:13:34 -060065 atomic_t *wait_count;
66 struct rpmh_client *rc;
67 int bit;
Lina Iyerd7194ff2016-04-20 17:13:34 -060068 int err; /* relay error from mbox for sync calls */
69};
70
71struct rpmh_mbox {
72 struct device_node *mbox_dn;
73 struct list_head resources;
74 spinlock_t lock;
75 struct rpmh_msg *msg_pool;
76 DECLARE_BITMAP(fast_req, RPMH_MAX_FAST_RES);
77 bool dirty;
Lina Iyera6cc6482017-05-02 20:59:55 -060078 bool in_solver_mode;
Lina Iyerd7194ff2016-04-20 17:13:34 -060079};
80
81struct rpmh_client {
82 struct device *dev;
83 struct mbox_client client;
84 struct mbox_chan *chan;
85 struct rpmh_mbox *rpmh;
86};
87
88static struct rpmh_mbox mbox_ctrlr[RPMH_MAX_MBOXES];
89DEFINE_MUTEX(rpmh_mbox_mutex);
Lina Iyeraa343942017-02-01 13:38:39 -070090bool rpmh_standalone;
Lina Iyerd7194ff2016-04-20 17:13:34 -060091
92static struct rpmh_msg *get_msg_from_pool(struct rpmh_client *rc)
93{
94 struct rpmh_mbox *rpm = rc->rpmh;
95 struct rpmh_msg *msg = NULL;
96 int pos;
Lina Iyerdb1267a2017-04-25 21:03:16 -060097 unsigned long flags;
Lina Iyerd7194ff2016-04-20 17:13:34 -060098
Lina Iyerdb1267a2017-04-25 21:03:16 -060099 spin_lock_irqsave(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600100 pos = find_first_zero_bit(rpm->fast_req, RPMH_MAX_FAST_RES);
101 if (pos != RPMH_MAX_FAST_RES) {
102 bitmap_set(rpm->fast_req, pos, 1);
103 msg = &rpm->msg_pool[pos];
104 memset(msg, 0, sizeof(*msg));
105 msg->bit = pos;
106 msg->rc = rc;
107 }
Lina Iyerdb1267a2017-04-25 21:03:16 -0600108 spin_unlock_irqrestore(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600109
110 return msg;
111}
112
Lina Iyer71dce402017-06-02 20:44:19 -0600113static void free_msg_to_pool(struct rpmh_msg *rpm_msg)
114{
115 struct rpmh_mbox *rpm = rpm_msg->rc->rpmh;
116 unsigned long flags;
117
118 /* If we allocated the pool, set it as available */
119 if (rpm_msg->bit >= 0 && rpm_msg->bit != RPMH_MAX_FAST_RES) {
120 spin_lock_irqsave(&rpm->lock, flags);
121 bitmap_clear(rpm->fast_req, rpm_msg->bit, 1);
122 spin_unlock_irqrestore(&rpm->lock, flags);
123 }
124}
125
Lina Iyerd7194ff2016-04-20 17:13:34 -0600126static void rpmh_rx_cb(struct mbox_client *cl, void *msg)
127{
128 struct rpmh_msg *rpm_msg = container_of(msg, struct rpmh_msg, msg);
129
130 atomic_dec(rpm_msg->wait_count);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600131}
132
133static void rpmh_tx_done(struct mbox_client *cl, void *msg, int r)
134{
135 struct rpmh_msg *rpm_msg = container_of(msg, struct rpmh_msg, msg);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600136 atomic_t *wc = rpm_msg->wait_count;
Lina Iyer71dce402017-06-02 20:44:19 -0600137 struct completion *compl = rpm_msg->completion;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600138
139 rpm_msg->err = r;
140
141 if (r) {
142 dev_err(rpm_msg->rc->dev,
143 "RPMH TX fail in msg addr 0x%x, err=%d\n",
144 rpm_msg->msg.payload[0].addr, r);
145 /*
146 * If we fail TX for a read, call then we won't get
147 * a rx_callback. Force a rx_cb.
148 */
149 if (rpm_msg->msg.is_read)
150 rpmh_rx_cb(cl, msg);
151 }
152
153 /*
154 * Copy the child object pointers before freeing up the parent,
155 * This way even if the parent (rpm_msg) object gets reused, we
Lina Iyer781da1e2017-04-15 11:07:06 -0600156 * can free up the child objects (wq/wc) parallely.
Lina Iyerd7194ff2016-04-20 17:13:34 -0600157 * If you free up the children before the parent, then we run
158 * into an issue that the stack allocated parent object may be
159 * invalid before we can check the ->bit value.
160 */
Lina Iyer71dce402017-06-02 20:44:19 -0600161 free_msg_to_pool(rpm_msg);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600162
Lina Iyerd7194ff2016-04-20 17:13:34 -0600163 /* Signal the blocking thread we are done */
Lina Iyer948bd822017-05-16 11:53:55 -0600164 if (wc && atomic_dec_and_test(wc))
Lina Iyer71dce402017-06-02 20:44:19 -0600165 if (compl)
166 complete(compl);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600167}
168
169static struct rpmh_req *__find_req(struct rpmh_client *rc, u32 addr)
170{
171 struct rpmh_req *p, *req = NULL;
172
173 list_for_each_entry(p, &rc->rpmh->resources, list) {
174 if (p->addr == addr) {
175 req = p;
176 break;
177 }
178 }
179
180 return req;
181}
182
183static struct rpmh_req *cache_rpm_request(struct rpmh_client *rc,
184 enum rpmh_state state, struct tcs_cmd *cmd)
185{
186 struct rpmh_req *req;
187 struct rpmh_mbox *rpm = rc->rpmh;
Lina Iyerdb1267a2017-04-25 21:03:16 -0600188 unsigned long flags;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600189
Lina Iyerdb1267a2017-04-25 21:03:16 -0600190 spin_lock_irqsave(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600191 req = __find_req(rc, cmd->addr);
192 if (req)
193 goto existing;
194
195 req = kzalloc(sizeof(*req), GFP_ATOMIC);
196 if (!req) {
197 req = ERR_PTR(-ENOMEM);
198 goto unlock;
199 }
200
201 req->addr = cmd->addr;
202 req->sleep_val = req->wake_val = UINT_MAX;
203 INIT_LIST_HEAD(&req->list);
204 list_add_tail(&req->list, &rpm->resources);
205
206existing:
207 switch (state) {
208 case RPMH_ACTIVE_ONLY_STATE:
209 case RPMH_AWAKE_STATE:
210 if (req->sleep_val != UINT_MAX)
211 req->wake_val = cmd->data;
212 break;
213 case RPMH_WAKE_ONLY_STATE:
214 req->wake_val = cmd->data;
215 break;
216 case RPMH_SLEEP_STATE:
217 req->sleep_val = cmd->data;
218 break;
219 default:
220 break;
221 };
222
223unlock:
224 rpm->dirty = true;
Lina Iyerdb1267a2017-04-25 21:03:16 -0600225 spin_unlock_irqrestore(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600226
227 return req;
228}
229
230/**
231 * __rpmh_write: Cache and send the RPMH request
232 *
233 * @rc: The RPMH client
234 * @state: Active/Sleep request type
235 * @rpm_msg: The data that needs to be sent (payload).
236 *
237 * Cache the RPMH request and send if the state is ACTIVE_ONLY.
238 * SLEEP/WAKE_ONLY requests are not sent to the controller at
239 * this time. Use rpmh_flush() to send them to the controller.
240 */
241int __rpmh_write(struct rpmh_client *rc, enum rpmh_state state,
242 struct rpmh_msg *rpm_msg)
243{
244 struct rpmh_req *req;
245 int ret = 0;
246 int i;
247
Lina Iyerd7194ff2016-04-20 17:13:34 -0600248 /* Cache the request in our store and link the payload */
249 for (i = 0; i < rpm_msg->msg.num_payload; i++) {
250 req = cache_rpm_request(rc, state, &rpm_msg->msg.payload[i]);
251 if (IS_ERR(req))
252 return PTR_ERR(req);
253 }
254
255 rpm_msg->msg.state = state;
256
Lina Iyer9ed8f312016-09-22 11:09:59 -0600257 /* Send to mailbox only if active or awake */
258 if (state == RPMH_ACTIVE_ONLY_STATE || state == RPMH_AWAKE_STATE) {
Lina Iyerd7194ff2016-04-20 17:13:34 -0600259 ret = mbox_send_message(rc->chan, &rpm_msg->msg);
260 if (ret > 0)
261 ret = 0;
Lina Iyer31bbb3d2017-04-13 17:49:53 -0600262 } else {
263 /* Clean up our call by spoofing tx_done */
264 rpmh_tx_done(&rc->client, &rpm_msg->msg, ret);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600265 }
266
267 return ret;
268}
269
270/**
271 * rpmh_write_single_async: Write a single RPMH command
272 *
273 * @rc: The RPMh handle got from rpmh_get_dev_channel
274 * @state: Active/sleep set
275 * @addr: The ePCB address
276 * @data: The data
277 *
278 * Write a single value in fast-path. Fire and forget.
279 * May be called from atomic contexts.
280 */
281int rpmh_write_single_async(struct rpmh_client *rc, enum rpmh_state state,
282 u32 addr, u32 data)
283{
284 struct rpmh_msg *rpm_msg;
285
286 if (IS_ERR_OR_NULL(rc))
287 return -EINVAL;
288
Lina Iyeraa343942017-02-01 13:38:39 -0700289 if (rpmh_standalone)
290 return 0;
291
Lina Iyerd7194ff2016-04-20 17:13:34 -0600292 rpm_msg = get_msg_from_pool(rc);
293 if (!rpm_msg)
294 return -ENOMEM;
295
Lina Iyer781da1e2017-04-15 11:07:06 -0600296 rpm_msg->cmd[0].addr = addr;
297 rpm_msg->cmd[0].data = data;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600298
Lina Iyer781da1e2017-04-15 11:07:06 -0600299 rpm_msg->msg.payload = rpm_msg->cmd;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600300 rpm_msg->msg.num_payload = 1;
301
302 return __rpmh_write(rc, state, rpm_msg);
303}
304EXPORT_SYMBOL(rpmh_write_single_async);
305
306/**
307 * rpmh_write_single: Write a single RPMH command and
308 * wait for completion of the command.
309 *
310 * @rc: The RPMh handle got from rpmh_get_dev_channel
311 * @state: Active/sleep set
312 * @addr: The ePCB address
313 * @offset: Offset of the resource
314 * @data: The data
315 *
316 * Write a single value in slow-path and wait for the request to be
317 * complete. Blocks until the request is completed on the accelerator.
318 * Do not call from atomic contexts.
319 */
320int rpmh_write_single(struct rpmh_client *rc, enum rpmh_state state,
321 u32 addr, u32 data)
322{
Lina Iyer71dce402017-06-02 20:44:19 -0600323 DECLARE_COMPLETION_ONSTACK(compl);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600324 atomic_t wait_count = ATOMIC_INIT(1);
Lina Iyer71dce402017-06-02 20:44:19 -0600325 DEFINE_RPMH_MSG_ONSTACK(rc, state, &compl, &wait_count, rpm_msg);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600326 int ret;
327
328 if (IS_ERR_OR_NULL(rc))
329 return -EINVAL;
330
331 might_sleep();
332
Lina Iyeraa343942017-02-01 13:38:39 -0700333 if (rpmh_standalone)
334 return 0;
335
Lina Iyer781da1e2017-04-15 11:07:06 -0600336 rpm_msg.cmd[0].addr = addr;
337 rpm_msg.cmd[0].data = data;
338 rpm_msg.msg.num_payload = 1;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600339
340 ret = __rpmh_write(rc, state, &rpm_msg);
341 if (ret < 0)
342 return ret;
343
Lina Iyer71dce402017-06-02 20:44:19 -0600344 wait_for_completion(&compl);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600345
346 return rpm_msg.err;
347}
348EXPORT_SYMBOL(rpmh_write_single);
349
350struct rpmh_msg *__get_rpmh_msg_async(struct rpmh_client *rc,
Lina Iyer781da1e2017-04-15 11:07:06 -0600351 enum rpmh_state state, struct tcs_cmd *cmd, int n)
Lina Iyerd7194ff2016-04-20 17:13:34 -0600352{
353 struct rpmh_msg *rpm_msg;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600354
355 if (IS_ERR_OR_NULL(rc) || !cmd || n <= 0 || n > MAX_RPMH_PAYLOAD)
356 return ERR_PTR(-EINVAL);
357
Lina Iyerd7194ff2016-04-20 17:13:34 -0600358 rpm_msg = get_msg_from_pool(rc);
Lina Iyer781da1e2017-04-15 11:07:06 -0600359 if (!rpm_msg)
Lina Iyerd7194ff2016-04-20 17:13:34 -0600360 return ERR_PTR(-ENOMEM);
Lina Iyer781da1e2017-04-15 11:07:06 -0600361
362 memcpy(rpm_msg->cmd, cmd, n * sizeof(*cmd));
Lina Iyerd7194ff2016-04-20 17:13:34 -0600363
364 rpm_msg->msg.state = state;
Lina Iyer781da1e2017-04-15 11:07:06 -0600365 rpm_msg->msg.payload = rpm_msg->cmd;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600366 rpm_msg->msg.num_payload = n;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600367
368 return rpm_msg;
369}
370
371/**
372 * rpmh_write_async: Write a batch of RPMH commands
373 *
374 * @rc: The RPMh handle got from rpmh_get_dev_channel
375 * @state: Active/sleep set
376 * @cmd: The payload data
377 * @n: The number of elements in payload
378 *
379 * Write a batch of RPMH commands, the order of commands is maintained
380 * and will be sent as a single shot. By default the entire set of commands
381 * are considered active only (i.e, will not be cached in wake set, unless
382 * all of them have their corresponding sleep requests).
383 */
384int rpmh_write_async(struct rpmh_client *rc, enum rpmh_state state,
385 struct tcs_cmd *cmd, int n)
386{
Lina Iyeraa343942017-02-01 13:38:39 -0700387 struct rpmh_msg *rpm_msg;
388
389 if (rpmh_standalone)
390 return 0;
391
Lina Iyer781da1e2017-04-15 11:07:06 -0600392 rpm_msg = __get_rpmh_msg_async(rc, state, cmd, n);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600393 if (IS_ERR(rpm_msg))
394 return PTR_ERR(rpm_msg);
395
396 return __rpmh_write(rc, state, rpm_msg);
397}
398EXPORT_SYMBOL(rpmh_write_async);
399
400/**
401 * rpmh_write: Write a batch of RPMH commands
402 *
403 * @rc: The RPMh handle got from rpmh_get_dev_channel
404 * @state: Active/sleep set
405 * @cmd: The payload data
406 * @n: The number of elements in payload
407 *
408 * Write a batch of RPMH commands, the order of commands is maintained
409 * and will be sent as a single shot. By default the entire set of commands
410 * are considered active only (i.e, will not be cached in wake set, unless
411 * all of them have their corresponding sleep requests). All requests are
412 * sent as slow path requests.
413 *
414 * May sleep. Do not call from atomic contexts.
415 */
416int rpmh_write(struct rpmh_client *rc, enum rpmh_state state,
417 struct tcs_cmd *cmd, int n)
418{
Lina Iyer71dce402017-06-02 20:44:19 -0600419 DECLARE_COMPLETION_ONSTACK(compl);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600420 atomic_t wait_count = ATOMIC_INIT(1);
Lina Iyer71dce402017-06-02 20:44:19 -0600421 DEFINE_RPMH_MSG_ONSTACK(rc, state, &compl, &wait_count, rpm_msg);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600422 int ret;
423
424 if (IS_ERR_OR_NULL(rc) || !cmd || n <= 0 || n > MAX_RPMH_PAYLOAD)
425 return -EINVAL;
426
427 might_sleep();
428
Lina Iyeraa343942017-02-01 13:38:39 -0700429 if (rpmh_standalone)
430 return 0;
431
Lina Iyer781da1e2017-04-15 11:07:06 -0600432 memcpy(rpm_msg.cmd, cmd, n * sizeof(*cmd));
Lina Iyerd7194ff2016-04-20 17:13:34 -0600433 rpm_msg.msg.num_payload = n;
434
435 ret = __rpmh_write(rc, state, &rpm_msg);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600436 if (ret)
437 return ret;
438
Lina Iyer71dce402017-06-02 20:44:19 -0600439 wait_for_completion(&compl);
Lina Iyere43901b2017-04-20 23:00:12 -0600440
Lina Iyerd7194ff2016-04-20 17:13:34 -0600441 return rpm_msg.err;
442}
443EXPORT_SYMBOL(rpmh_write);
444
445/**
446 * rpmh_write_passthru: Write multiple batches of RPMH commands without caching
447 *
448 * @rc: The RPMh handle got from rpmh_get_dev_channel
449 * @state: Active/sleep set
450 * @cmd: The payload data
451 * @n: The array of count of elements in each batch, 0 terminated.
452 *
453 * Write a request to the mailbox controller without caching. If the request
Lina Iyer7846e212017-03-22 10:35:53 -0600454 * state is ACTIVE or AWAKE, then the requests are treated as completion request
Lina Iyerd7194ff2016-04-20 17:13:34 -0600455 * and sent to the controller immediately. The function waits until all the
456 * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the
457 * request is sent as fire-n-forget and no ack is expected.
458 *
459 * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests.
460 */
461int rpmh_write_passthru(struct rpmh_client *rc, enum rpmh_state state,
462 struct tcs_cmd *cmd, int *n)
463{
464 struct rpmh_msg *rpm_msg[RPMH_MAX_REQ_IN_BATCH];
Lina Iyer71dce402017-06-02 20:44:19 -0600465 DECLARE_COMPLETION_ONSTACK(compl);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600466 atomic_t wait_count = ATOMIC_INIT(0); /* overwritten */
467 int count = 0;
Lina Iyer7846e212017-03-22 10:35:53 -0600468 int ret, i, j, k;
469 bool complete_set;
Lina Iyera6cc6482017-05-02 20:59:55 -0600470 unsigned long flags;
471 struct rpmh_mbox *rpm;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600472
Lina Iyer1aa5d272017-05-16 08:55:07 -0600473 if (IS_ERR_OR_NULL(rc) || !cmd || !n)
474 return -EINVAL;
475
Lina Iyeraa343942017-02-01 13:38:39 -0700476 if (rpmh_standalone)
477 return 0;
478
Lina Iyera6cc6482017-05-02 20:59:55 -0600479 /* Do not allow setting wake votes when in solver mode */
480 rpm = rc->rpmh;
481 spin_lock_irqsave(&rpm->lock, flags);
482 if (rpm->in_solver_mode && state == RPMH_WAKE_ONLY_STATE) {
483 spin_unlock_irqrestore(&rpm->lock, flags);
484 return -EIO;
485 }
486 spin_unlock_irqrestore(&rpm->lock, flags);
487
Lina Iyerd7194ff2016-04-20 17:13:34 -0600488 while (n[count++])
489 ;
490 count--;
Lina Iyer400ae132017-05-17 14:25:27 -0600491 if (!count || count > RPMH_MAX_REQ_IN_BATCH)
Lina Iyerd7194ff2016-04-20 17:13:34 -0600492 return -EINVAL;
493
Lina Iyer7846e212017-03-22 10:35:53 -0600494 if (state == RPMH_ACTIVE_ONLY_STATE || state == RPMH_AWAKE_STATE) {
495 /*
496 * Ensure the 'complete' bit is set for atleast one command in
497 * each set for active/awake requests.
498 */
499 for (i = 0, k = 0; i < count; i++, k += n[i]) {
500 complete_set = false;
501 for (j = 0; j < n[i]; j++) {
502 if (cmd[k + j].complete) {
503 complete_set = true;
504 break;
505 }
506 }
507 if (!complete_set) {
508 dev_err(rc->dev, "No completion set for batch");
509 return -EINVAL;
510 }
511 }
512 }
513
514 /* Create async request batches */
Lina Iyerd7194ff2016-04-20 17:13:34 -0600515 for (i = 0; i < count; i++) {
Lina Iyer781da1e2017-04-15 11:07:06 -0600516 rpm_msg[i] = __get_rpmh_msg_async(rc, state, cmd, n[i]);
Lina Iyerec57adc2017-05-03 15:29:49 -0600517 if (IS_ERR_OR_NULL(rpm_msg[i])) {
Lina Iyerec57adc2017-05-03 15:29:49 -0600518 for (j = 0 ; j < i; j++)
Lina Iyer71dce402017-06-02 20:44:19 -0600519 free_msg_to_pool(rpm_msg[j]);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600520 return PTR_ERR(rpm_msg[i]);
Lina Iyerec57adc2017-05-03 15:29:49 -0600521 }
Lina Iyerd7194ff2016-04-20 17:13:34 -0600522 cmd += n[i];
523 }
524
Lina Iyer7846e212017-03-22 10:35:53 -0600525 /* Send if Active or Awake and wait for the whole set to complete */
526 if (state == RPMH_ACTIVE_ONLY_STATE || state == RPMH_AWAKE_STATE) {
Lina Iyerd7194ff2016-04-20 17:13:34 -0600527 might_sleep();
528 atomic_set(&wait_count, count);
529 for (i = 0; i < count; i++) {
Lina Iyer71dce402017-06-02 20:44:19 -0600530 rpm_msg[i]->completion = &compl;
Lina Iyercb6b7432017-04-19 20:52:38 -0600531 rpm_msg[i]->wait_count = &wait_count;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600532 /* Bypass caching and write to mailbox directly */
533 ret = mbox_send_message(rc->chan, &rpm_msg[i]->msg);
Lina Iyerec57adc2017-05-03 15:29:49 -0600534 if (ret < 0) {
535 pr_err("Error(%d) sending RPM message addr=0x%x\n",
536 ret, rpm_msg[i]->msg.payload[0].addr);
537 break;
538 }
Lina Iyerd7194ff2016-04-20 17:13:34 -0600539 }
Lina Iyer71dce402017-06-02 20:44:19 -0600540 /* For those unsent requests, spoof tx_done */
541 for (j = i; j < count; j++)
542 rpmh_tx_done(&rc->client, &rpm_msg[j]->msg, ret);
543 wait_for_completion(&compl);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600544 } else {
Lina Iyer7846e212017-03-22 10:35:53 -0600545 /* Send Sleep requests to the controller, expect no response */
Lina Iyerd7194ff2016-04-20 17:13:34 -0600546 for (i = 0; i < count; i++) {
Lina Iyer71dce402017-06-02 20:44:19 -0600547 rpm_msg[i]->completion = NULL;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600548 ret = mbox_send_controller_data(rc->chan,
549 &rpm_msg[i]->msg);
Lina Iyer71dce402017-06-02 20:44:19 -0600550 free_msg_to_pool(rpm_msg[i]);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600551 }
552 return 0;
553 }
Lina Iyere43901b2017-04-20 23:00:12 -0600554
555 return 0;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600556}
557EXPORT_SYMBOL(rpmh_write_passthru);
558
559/**
Lina Iyera6cc6482017-05-02 20:59:55 -0600560 * rpmh_mode_solver_set: Indicate that the RSC controller hardware has
561 * been configured to be in solver mode
562 *
563 * @rc: The RPMH handle
564 * @enable: Boolean value indicating if the controller is in solver mode.
565 *
566 * When solver mode is enabled, passthru API will not be able to send wake
567 * votes, just awake and active votes.
568 */
569int rpmh_mode_solver_set(struct rpmh_client *rc, bool enable)
570{
571 struct rpmh_mbox *rpm;
572 unsigned long flags;
573
574 if (IS_ERR_OR_NULL(rc))
575 return -EINVAL;
576
577 if (rpmh_standalone)
578 return 0;
579
580 rpm = rc->rpmh;
581 do {
582 spin_lock_irqsave(&rpm->lock, flags);
583 if (mbox_controller_is_idle(rc->chan)) {
584 rpm->in_solver_mode = enable;
585 spin_unlock_irqrestore(&rpm->lock, flags);
586 break;
587 }
588 spin_unlock_irqrestore(&rpm->lock, flags);
589 udelay(10);
590 } while (1);
591
592 return 0;
593}
594EXPORT_SYMBOL(rpmh_mode_solver_set);
595
596/**
Lina Iyerd7194ff2016-04-20 17:13:34 -0600597 * rpmh_write_control: Write async control commands to the controller
598 *
599 * @rc: The RPMh handle got from rpmh_get_dev_channel
600 * @cmd: The payload data
601 * @n: The number of elements in payload
602 *
603 * Write control commands to the controller. The messages are always sent
604 * async.
605 *
606 * May be called from atomic contexts.
607 */
608int rpmh_write_control(struct rpmh_client *rc, struct tcs_cmd *cmd, int n)
609{
610 DEFINE_RPMH_MSG_ONSTACK(rc, 0, NULL, NULL, rpm_msg);
611
Lina Iyer781da1e2017-04-15 11:07:06 -0600612 if (IS_ERR_OR_NULL(rc) || n > MAX_RPMH_PAYLOAD)
Lina Iyerd7194ff2016-04-20 17:13:34 -0600613 return -EINVAL;
614
Lina Iyeraa343942017-02-01 13:38:39 -0700615 if (rpmh_standalone)
616 return 0;
617
Lina Iyer781da1e2017-04-15 11:07:06 -0600618 memcpy(rpm_msg.cmd, cmd, n * sizeof(*cmd));
Lina Iyerd7194ff2016-04-20 17:13:34 -0600619 rpm_msg.msg.num_payload = n;
620 rpm_msg.msg.is_control = true;
Lina Iyer781da1e2017-04-15 11:07:06 -0600621 rpm_msg.msg.is_complete = false;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600622
623 return mbox_send_controller_data(rc->chan, &rpm_msg.msg);
624}
625EXPORT_SYMBOL(rpmh_write_control);
626
627/**
628 * rpmh_invalidate: Invalidate all sleep and active sets
629 * sets.
630 *
631 * @rc: The RPMh handle got from rpmh_get_dev_channel
632 *
633 * Invalidate the sleep and active values in the TCS blocks.
634 * Nothing to do here.
635 */
636int rpmh_invalidate(struct rpmh_client *rc)
637{
638 DEFINE_RPMH_MSG_ONSTACK(rc, 0, NULL, NULL, rpm_msg);
Kyle Piefer95712442017-03-15 14:10:19 -0700639 struct rpmh_mbox *rpm;
Lina Iyerdb1267a2017-04-25 21:03:16 -0600640 unsigned long flags;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600641
642 if (IS_ERR_OR_NULL(rc))
643 return -EINVAL;
644
Lina Iyeraa343942017-02-01 13:38:39 -0700645 if (rpmh_standalone)
646 return 0;
647
Kyle Piefer95712442017-03-15 14:10:19 -0700648 rpm = rc->rpmh;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600649 rpm_msg.msg.invalidate = true;
Lina Iyer781da1e2017-04-15 11:07:06 -0600650 rpm_msg.msg.is_complete = false;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600651
Lina Iyerdb1267a2017-04-25 21:03:16 -0600652 spin_lock_irqsave(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600653 rpm->dirty = true;
Lina Iyerdb1267a2017-04-25 21:03:16 -0600654 spin_unlock_irqrestore(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600655
656 return mbox_send_controller_data(rc->chan, &rpm_msg.msg);
657}
658EXPORT_SYMBOL(rpmh_invalidate);
659
660/**
661 * rpmh_read: Read a resource value
662 *
663 * @rc: The RPMh handle got from rpmh_get_dev_channel
664 * @addr: The ePCB address
665 * @resp: The store for the response received from RPMH
666 *
667 * Read a resource value from RPMH.
668 */
669int rpmh_read(struct rpmh_client *rc, u32 addr, u32 *resp)
670{
671 int ret;
Lina Iyer71dce402017-06-02 20:44:19 -0600672 DECLARE_COMPLETION_ONSTACK(compl);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600673 atomic_t wait_count = ATOMIC_INIT(2); /* wait for rx_cb and tx_done */
674 DEFINE_RPMH_MSG_ONSTACK(rc, RPMH_ACTIVE_ONLY_STATE,
Lina Iyer71dce402017-06-02 20:44:19 -0600675 &compl, &wait_count, rpm_msg);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600676
677 if (IS_ERR_OR_NULL(rc) || !resp)
678 return -EINVAL;
679
680 might_sleep();
681
Lina Iyeraa343942017-02-01 13:38:39 -0700682 if (rpmh_standalone)
683 return 0;
684
Lina Iyer781da1e2017-04-15 11:07:06 -0600685 rpm_msg.cmd[0].addr = addr;
686 rpm_msg.cmd[0].data = 0;
687 rpm_msg.msg.num_payload = 1;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600688
689 rpm_msg.msg.is_read = true;
690
691 ret = mbox_send_message(rc->chan, &rpm_msg.msg);
692 if (ret < 0)
693 return ret;
694
695 /* Wait until the response is received from RPMH */
Lina Iyer71dce402017-06-02 20:44:19 -0600696 wait_for_completion(&compl);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600697
698 /* Read the data back from the tcs_mbox_msg structrure */
Lina Iyer781da1e2017-04-15 11:07:06 -0600699 *resp = rpm_msg.cmd[0].data;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600700
701 return rpm_msg.err;
702}
703EXPORT_SYMBOL(rpmh_read);
704
Lina Iyer17a5aa02017-03-14 14:18:50 -0600705static inline int is_req_valid(struct rpmh_req *req)
706{
707 return (req->sleep_val != UINT_MAX && req->wake_val != UINT_MAX
708 && req->sleep_val != req->wake_val);
709}
710
Lina Iyerd7194ff2016-04-20 17:13:34 -0600711int send_single(struct rpmh_client *rc, enum rpmh_state state, u32 addr,
712 u32 data)
713{
714 DEFINE_RPMH_MSG_ONSTACK(rc, state, NULL, NULL, rpm_msg);
715
Lina Iyer50614ce2017-02-27 15:53:23 -0700716 /* Wake sets are always complete and sleep sets are not */
717 rpm_msg.msg.is_complete = (state == RPMH_WAKE_ONLY_STATE);
Lina Iyer781da1e2017-04-15 11:07:06 -0600718 rpm_msg.cmd[0].addr = addr;
719 rpm_msg.cmd[0].data = data;
720 rpm_msg.msg.num_payload = 1;
721 rpm_msg.msg.is_complete = false;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600722
723 return mbox_send_controller_data(rc->chan, &rpm_msg.msg);
724}
725
726/**
727 * rpmh_flush: Flushes the buffered active and sleep sets to TCS
728 *
729 * @rc: The RPMh handle got from rpmh_get_dev_channel
730 *
731 * This function is generally called from the sleep code from the last CPU
732 * that is powering down the entire system.
733 *
734 * Returns -EBUSY if the controller is busy, probably waiting on a response
735 * to a RPMH request sent earlier.
736 */
737int rpmh_flush(struct rpmh_client *rc)
738{
739 struct rpmh_req *p;
740 struct rpmh_mbox *rpm = rc->rpmh;
741 int ret;
Lina Iyerdb1267a2017-04-25 21:03:16 -0600742 unsigned long flags;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600743
744 if (IS_ERR_OR_NULL(rc))
745 return -EINVAL;
746
Lina Iyeraa343942017-02-01 13:38:39 -0700747 if (rpmh_standalone)
748 return 0;
749
Lina Iyerd7194ff2016-04-20 17:13:34 -0600750 if (!mbox_controller_is_idle(rc->chan))
751 return -EBUSY;
752
Lina Iyerdb1267a2017-04-25 21:03:16 -0600753 spin_lock_irqsave(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600754 if (!rpm->dirty) {
Lina Iyer20646542017-03-01 12:34:47 -0700755 pr_debug("Skipping flush, TCS has latest data.\n");
Lina Iyerdb1267a2017-04-25 21:03:16 -0600756 spin_unlock_irqrestore(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600757 return 0;
758 }
Lina Iyerdb1267a2017-04-25 21:03:16 -0600759 spin_unlock_irqrestore(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600760
761 /*
762 * Nobody else should be calling this function other than sleep,
763 * hence we can run without locks.
764 */
765 list_for_each_entry(p, &rc->rpmh->resources, list) {
Lina Iyer17a5aa02017-03-14 14:18:50 -0600766 if (!is_req_valid(p)) {
767 pr_debug("%s: skipping RPMH req: a:0x%x s:0x%x w:0x%x",
768 __func__, p->addr, p->sleep_val, p->wake_val);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600769 continue;
Lina Iyer17a5aa02017-03-14 14:18:50 -0600770 }
Lina Iyerd7194ff2016-04-20 17:13:34 -0600771 ret = send_single(rc, RPMH_SLEEP_STATE, p->addr, p->sleep_val);
772 if (ret)
773 return ret;
774 ret = send_single(rc, RPMH_WAKE_ONLY_STATE, p->addr,
775 p->wake_val);
776 if (ret)
777 return ret;
778 }
779
Lina Iyerdb1267a2017-04-25 21:03:16 -0600780 spin_lock_irqsave(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600781 rpm->dirty = false;
Lina Iyerdb1267a2017-04-25 21:03:16 -0600782 spin_unlock_irqrestore(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600783
784 return 0;
785}
786EXPORT_SYMBOL(rpmh_flush);
787
788/**
789 * get_mbox: Get the MBOX controller
790 * @pdev: the platform device
791 * @name: the MBOX name as specified in DT for the device.
792 * @index: the index in the mboxes property if name is not provided.
793 *
794 * Get the MBOX Device node. We will use that to know which
795 * MBOX controller this platform device is intending to talk
796 * to.
797 */
798static struct rpmh_mbox *get_mbox(struct platform_device *pdev,
799 const char *name, int index)
800{
801 int i;
802 struct property *prop;
803 struct of_phandle_args spec;
804 const char *mbox_name;
805 struct rpmh_mbox *rpmh;
806
807 if (index < 0) {
808 if (!name || !name[0])
809 return ERR_PTR(-EINVAL);
810 index = 0;
811 of_property_for_each_string(pdev->dev.of_node,
812 "mbox-names", prop, mbox_name) {
813 if (!strcmp(name, mbox_name))
814 break;
815 index++;
816 }
817 }
818
819 if (of_parse_phandle_with_args(pdev->dev.of_node, "mboxes",
820 "#mbox-cells", index, &spec)) {
821 dev_dbg(&pdev->dev, "%s: can't parse mboxes property\n",
822 __func__);
823 return ERR_PTR(-ENODEV);
824 }
825
826 for (i = 0; i < RPMH_MAX_MBOXES; i++)
827 if (mbox_ctrlr[i].mbox_dn == spec.np) {
828 rpmh = &mbox_ctrlr[i];
829 goto found;
830 }
831
832 /* A new MBOX */
833 for (i = 0; i < RPMH_MAX_MBOXES; i++)
834 if (!mbox_ctrlr[i].mbox_dn)
835 break;
836
837 /* More controllers than expected - not recoverable */
838 WARN_ON(i == RPMH_MAX_MBOXES);
839
840 rpmh = &mbox_ctrlr[i];
841
842 rpmh->msg_pool = kzalloc(sizeof(struct rpmh_msg) *
843 RPMH_MAX_FAST_RES, GFP_KERNEL);
844 if (!rpmh->msg_pool)
845 return ERR_PTR(-ENOMEM);
846
847 rpmh->mbox_dn = spec.np;
848 INIT_LIST_HEAD(&rpmh->resources);
849 spin_lock_init(&rpmh->lock);
850
851found:
852 of_node_put(spec.np);
853
854 return rpmh;
855}
856
857static struct rpmh_client *get_rpmh_client(struct platform_device *pdev,
858 const char *name, int index)
859{
860 struct rpmh_client *rc;
861 int ret = 0;
862
Lina Iyeraa343942017-02-01 13:38:39 -0700863 ret = cmd_db_ready();
864 if (ret)
865 return ERR_PTR(ret);
866
Lina Iyerd7194ff2016-04-20 17:13:34 -0600867 rc = kzalloc(sizeof(*rc), GFP_KERNEL);
868 if (!rc)
869 return ERR_PTR(-ENOMEM);
870
871 rc->client.rx_callback = rpmh_rx_cb;
872 rc->client.tx_prepare = NULL;
873 rc->client.tx_done = rpmh_tx_done;
874 rc->client.tx_block = false;
875 rc->client.knows_txdone = false;
876 rc->client.dev = &pdev->dev;
877 rc->dev = &pdev->dev;
878
879 rc->chan = ERR_PTR(-EINVAL);
880
881 /* Initialize by index or name, whichever is present */
882 if (index >= 0)
883 rc->chan = mbox_request_channel(&rc->client, index);
884 else if (name)
885 rc->chan = mbox_request_channel_byname(&rc->client, name);
886
887 if (IS_ERR_OR_NULL(rc->chan)) {
888 ret = PTR_ERR(rc->chan);
889 goto cleanup;
890 }
891
892 mutex_lock(&rpmh_mbox_mutex);
893 rc->rpmh = get_mbox(pdev, name, index);
Lina Iyeraa343942017-02-01 13:38:39 -0700894 rpmh_standalone = (cmd_db_is_standalone() > 0);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600895 mutex_unlock(&rpmh_mbox_mutex);
896
897 if (IS_ERR(rc->rpmh)) {
898 ret = PTR_ERR(rc->rpmh);
899 mbox_free_channel(rc->chan);
900 goto cleanup;
901 }
902
903 return rc;
904
905cleanup:
906 kfree(rc);
907 return ERR_PTR(ret);
908}
909
910/**
911 * rpmh_get_byname: Get the RPMh handle by mbox name
912 *
913 * @pdev: the platform device which needs to communicate with RPM
914 * accelerators
915 * @name: The mbox-name assigned to the client's mailbox handle
916 *
917 * May sleep.
918 */
919struct rpmh_client *rpmh_get_byname(struct platform_device *pdev,
920 const char *name)
921{
922 return get_rpmh_client(pdev, name, -1);
923}
924EXPORT_SYMBOL(rpmh_get_byname);
925
926/**
927 * rpmh_get_byindex: Get the RPMh handle by mbox index
928 *
929 * @pdev: the platform device which needs to communicate with RPM
930 * accelerators
931 * @index : The index of the mbox tuple as specified in order in DT
932 *
933 * May sleep.
934 */
935struct rpmh_client *rpmh_get_byindex(struct platform_device *pdev,
936 int index)
937{
938 return get_rpmh_client(pdev, NULL, index);
939}
940EXPORT_SYMBOL(rpmh_get_byindex);
941
942/**
943 * rpmh_release: Release the RPMH client
944 *
945 * @rc: The RPMh handle to be freed.
946 */
947void rpmh_release(struct rpmh_client *rc)
948{
949 if (rc && !IS_ERR_OR_NULL(rc->chan))
950 mbox_free_channel(rc->chan);
951
952 kfree(rc);
953}
954EXPORT_SYMBOL(rpmh_release);