blob: f7902e18b05cd5fe9b479e24670ff5ccf37785ad [file] [log] [blame]
Lina Iyeraa343942017-02-01 13:38:39 -07001/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
Lina Iyerd7194ff2016-04-20 17:13:34 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/atomic.h>
Lina Iyera6cc6482017-05-02 20:59:55 -060015#include <linux/delay.h>
Lina Iyerd7194ff2016-04-20 17:13:34 -060016#include <linux/interrupt.h>
17#include <linux/kernel.h>
18#include <linux/list.h>
19#include <linux/mailbox_client.h>
20#include <linux/module.h>
21#include <linux/of.h>
22#include <linux/platform_device.h>
23#include <linux/slab.h>
24#include <linux/spinlock.h>
25#include <linux/types.h>
26#include <linux/wait.h>
27
28#include <soc/qcom/rpmh.h>
29#include <soc/qcom/tcs.h>
Lina Iyeraa343942017-02-01 13:38:39 -070030#include <soc/qcom/cmd-db.h>
Lina Iyerd7194ff2016-04-20 17:13:34 -060031
32#define RPMH_MAX_MBOXES 2
33#define RPMH_MAX_FAST_RES 32
34#define RPMH_MAX_REQ_IN_BATCH 10
Lina Iyerae9c26b2017-07-13 12:56:07 -060035#define RPMH_TIMEOUT msecs_to_jiffies(10000)
Lina Iyerd7194ff2016-04-20 17:13:34 -060036
37#define DEFINE_RPMH_MSG_ONSTACK(rc, s, q, c, name) \
Lina Iyer71dce402017-06-02 20:44:19 -060038 struct rpmh_msg name = { \
39 .msg = { \
40 .state = s, \
41 .payload = name.cmd, \
42 .num_payload = 0, \
43 .is_read = false, \
44 .is_control = false, \
45 .is_complete = true, \
46 .invalidate = false, \
47 }, \
48 .cmd = { { 0 } }, \
49 .completion = q, \
50 .wait_count = c, \
51 .rc = rc, \
52 .bit = -1, \
Lina Iyerd7194ff2016-04-20 17:13:34 -060053 }
54
55struct rpmh_req {
56 u32 addr;
57 u32 sleep_val;
58 u32 wake_val;
59 struct list_head list;
60};
61
62struct rpmh_msg {
63 struct tcs_mbox_msg msg;
Lina Iyer781da1e2017-04-15 11:07:06 -060064 struct tcs_cmd cmd[MAX_RPMH_PAYLOAD];
Lina Iyer71dce402017-06-02 20:44:19 -060065 struct completion *completion;
Lina Iyerd7194ff2016-04-20 17:13:34 -060066 atomic_t *wait_count;
67 struct rpmh_client *rc;
68 int bit;
Lina Iyerd7194ff2016-04-20 17:13:34 -060069 int err; /* relay error from mbox for sync calls */
70};
71
72struct rpmh_mbox {
73 struct device_node *mbox_dn;
74 struct list_head resources;
75 spinlock_t lock;
76 struct rpmh_msg *msg_pool;
77 DECLARE_BITMAP(fast_req, RPMH_MAX_FAST_RES);
78 bool dirty;
Lina Iyera6cc6482017-05-02 20:59:55 -060079 bool in_solver_mode;
Lina Iyerde7b1042017-07-18 19:11:26 -060080 /* Cache sleep and wake requests sent as passthru */
81 struct rpmh_msg *passthru_cache[2 * RPMH_MAX_REQ_IN_BATCH];
Lina Iyerd7194ff2016-04-20 17:13:34 -060082};
83
84struct rpmh_client {
85 struct device *dev;
86 struct mbox_client client;
87 struct mbox_chan *chan;
88 struct rpmh_mbox *rpmh;
89};
90
91static struct rpmh_mbox mbox_ctrlr[RPMH_MAX_MBOXES];
92DEFINE_MUTEX(rpmh_mbox_mutex);
Lina Iyeraa343942017-02-01 13:38:39 -070093bool rpmh_standalone;
Lina Iyerd7194ff2016-04-20 17:13:34 -060094
95static struct rpmh_msg *get_msg_from_pool(struct rpmh_client *rc)
96{
97 struct rpmh_mbox *rpm = rc->rpmh;
98 struct rpmh_msg *msg = NULL;
99 int pos;
Lina Iyerdb1267a2017-04-25 21:03:16 -0600100 unsigned long flags;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600101
Lina Iyerdb1267a2017-04-25 21:03:16 -0600102 spin_lock_irqsave(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600103 pos = find_first_zero_bit(rpm->fast_req, RPMH_MAX_FAST_RES);
104 if (pos != RPMH_MAX_FAST_RES) {
105 bitmap_set(rpm->fast_req, pos, 1);
106 msg = &rpm->msg_pool[pos];
107 memset(msg, 0, sizeof(*msg));
108 msg->bit = pos;
109 msg->rc = rc;
110 }
Lina Iyerdb1267a2017-04-25 21:03:16 -0600111 spin_unlock_irqrestore(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600112
113 return msg;
114}
115
Lina Iyerde7b1042017-07-18 19:11:26 -0600116static void __free_msg_to_pool(struct rpmh_msg *rpm_msg)
117{
118 struct rpmh_mbox *rpm = rpm_msg->rc->rpmh;
119
120 /* If we allocated the pool, set it as available */
121 if (rpm_msg->bit >= 0 && rpm_msg->bit != RPMH_MAX_FAST_RES) {
122 bitmap_clear(rpm->fast_req, rpm_msg->bit, 1);
123 }
124}
125
Lina Iyer71dce402017-06-02 20:44:19 -0600126static void free_msg_to_pool(struct rpmh_msg *rpm_msg)
127{
128 struct rpmh_mbox *rpm = rpm_msg->rc->rpmh;
129 unsigned long flags;
130
Lina Iyerde7b1042017-07-18 19:11:26 -0600131 spin_lock_irqsave(&rpm->lock, flags);
132 __free_msg_to_pool(rpm_msg);
133 spin_unlock_irqrestore(&rpm->lock, flags);
Lina Iyer71dce402017-06-02 20:44:19 -0600134}
135
Lina Iyerd7194ff2016-04-20 17:13:34 -0600136static void rpmh_rx_cb(struct mbox_client *cl, void *msg)
137{
138 struct rpmh_msg *rpm_msg = container_of(msg, struct rpmh_msg, msg);
139
140 atomic_dec(rpm_msg->wait_count);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600141}
142
143static void rpmh_tx_done(struct mbox_client *cl, void *msg, int r)
144{
145 struct rpmh_msg *rpm_msg = container_of(msg, struct rpmh_msg, msg);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600146 atomic_t *wc = rpm_msg->wait_count;
Lina Iyer71dce402017-06-02 20:44:19 -0600147 struct completion *compl = rpm_msg->completion;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600148
149 rpm_msg->err = r;
150
151 if (r) {
152 dev_err(rpm_msg->rc->dev,
153 "RPMH TX fail in msg addr 0x%x, err=%d\n",
154 rpm_msg->msg.payload[0].addr, r);
155 /*
156 * If we fail TX for a read, call then we won't get
157 * a rx_callback. Force a rx_cb.
158 */
159 if (rpm_msg->msg.is_read)
160 rpmh_rx_cb(cl, msg);
161 }
162
163 /*
164 * Copy the child object pointers before freeing up the parent,
165 * This way even if the parent (rpm_msg) object gets reused, we
Lina Iyer781da1e2017-04-15 11:07:06 -0600166 * can free up the child objects (wq/wc) parallely.
Lina Iyerd7194ff2016-04-20 17:13:34 -0600167 * If you free up the children before the parent, then we run
168 * into an issue that the stack allocated parent object may be
169 * invalid before we can check the ->bit value.
170 */
Lina Iyer71dce402017-06-02 20:44:19 -0600171 free_msg_to_pool(rpm_msg);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600172
Lina Iyerd7194ff2016-04-20 17:13:34 -0600173 /* Signal the blocking thread we are done */
Lina Iyer948bd822017-05-16 11:53:55 -0600174 if (wc && atomic_dec_and_test(wc))
Lina Iyer71dce402017-06-02 20:44:19 -0600175 if (compl)
176 complete(compl);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600177}
178
Lina Iyerae9c26b2017-07-13 12:56:07 -0600179/**
180 * wait_for_tx_done: Wait forever until the response is received.
181 *
182 * @rc: The RPMH client
183 * @compl: The completion object
184 * @addr: An addr that we sent in that request
185 * @data: The data for the address in that request
186 *
187 */
188static inline void wait_for_tx_done(struct rpmh_client *rc,
189 struct completion *compl, u32 addr, u32 data)
190{
191 int ret;
192 int count = 4;
Lina Iyerd9abfa62017-07-27 17:23:05 -0600193 int skip = 0;
Lina Iyerae9c26b2017-07-13 12:56:07 -0600194
195 do {
196 ret = wait_for_completion_timeout(compl, RPMH_TIMEOUT);
197 if (ret) {
198 if (count != 4)
199 dev_notice(rc->dev,
200 "RPMH response received addr=0x%x data=0x%x\n",
201 addr, data);
202 return;
203 }
204 if (!count) {
Lina Iyerd9abfa62017-07-27 17:23:05 -0600205 if (skip++ % 100)
206 continue;
207 dev_err(rc->dev,
208 "RPMH waiting for interrupt from AOSS\n");
Lina Iyerae9c26b2017-07-13 12:56:07 -0600209 mbox_chan_debug(rc->chan);
Lina Iyer5f259752017-08-12 18:28:04 -0600210 BUG();
Lina Iyerae9c26b2017-07-13 12:56:07 -0600211 } else {
212 dev_err(rc->dev,
213 "RPMH response timeout (%d) addr=0x%x,data=0x%x\n",
214 count, addr, data);
215 count--;
216 }
217 } while (true);
218}
219
Lina Iyerd7194ff2016-04-20 17:13:34 -0600220static struct rpmh_req *__find_req(struct rpmh_client *rc, u32 addr)
221{
222 struct rpmh_req *p, *req = NULL;
223
224 list_for_each_entry(p, &rc->rpmh->resources, list) {
225 if (p->addr == addr) {
226 req = p;
227 break;
228 }
229 }
230
231 return req;
232}
233
234static struct rpmh_req *cache_rpm_request(struct rpmh_client *rc,
235 enum rpmh_state state, struct tcs_cmd *cmd)
236{
237 struct rpmh_req *req;
238 struct rpmh_mbox *rpm = rc->rpmh;
Lina Iyerdb1267a2017-04-25 21:03:16 -0600239 unsigned long flags;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600240
Lina Iyerdb1267a2017-04-25 21:03:16 -0600241 spin_lock_irqsave(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600242 req = __find_req(rc, cmd->addr);
243 if (req)
244 goto existing;
245
246 req = kzalloc(sizeof(*req), GFP_ATOMIC);
247 if (!req) {
248 req = ERR_PTR(-ENOMEM);
249 goto unlock;
250 }
251
252 req->addr = cmd->addr;
253 req->sleep_val = req->wake_val = UINT_MAX;
254 INIT_LIST_HEAD(&req->list);
255 list_add_tail(&req->list, &rpm->resources);
256
257existing:
258 switch (state) {
259 case RPMH_ACTIVE_ONLY_STATE:
260 case RPMH_AWAKE_STATE:
261 if (req->sleep_val != UINT_MAX)
262 req->wake_val = cmd->data;
263 break;
264 case RPMH_WAKE_ONLY_STATE:
265 req->wake_val = cmd->data;
266 break;
267 case RPMH_SLEEP_STATE:
268 req->sleep_val = cmd->data;
269 break;
270 default:
271 break;
272 };
273
274unlock:
275 rpm->dirty = true;
Lina Iyerdb1267a2017-04-25 21:03:16 -0600276 spin_unlock_irqrestore(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600277
278 return req;
279}
280
Lina Iyer43779b02017-06-28 14:34:02 -0600281static int check_ctrlr_state(struct rpmh_client *rc, enum rpmh_state state)
282{
283 struct rpmh_mbox *rpm = rc->rpmh;
284 unsigned long flags;
285 int ret = 0;
286
287 /* Do not allow setting active votes when in solver mode */
288 spin_lock_irqsave(&rpm->lock, flags);
289 if (rpm->in_solver_mode && state == RPMH_AWAKE_STATE)
290 ret = -EBUSY;
291 spin_unlock_irqrestore(&rpm->lock, flags);
292
293 return ret;
294}
295
Lina Iyerd7194ff2016-04-20 17:13:34 -0600296/**
297 * __rpmh_write: Cache and send the RPMH request
298 *
299 * @rc: The RPMH client
300 * @state: Active/Sleep request type
301 * @rpm_msg: The data that needs to be sent (payload).
302 *
303 * Cache the RPMH request and send if the state is ACTIVE_ONLY.
304 * SLEEP/WAKE_ONLY requests are not sent to the controller at
305 * this time. Use rpmh_flush() to send them to the controller.
306 */
307int __rpmh_write(struct rpmh_client *rc, enum rpmh_state state,
308 struct rpmh_msg *rpm_msg)
309{
310 struct rpmh_req *req;
311 int ret = 0;
312 int i;
313
Lina Iyerd7194ff2016-04-20 17:13:34 -0600314 /* Cache the request in our store and link the payload */
315 for (i = 0; i < rpm_msg->msg.num_payload; i++) {
316 req = cache_rpm_request(rc, state, &rpm_msg->msg.payload[i]);
317 if (IS_ERR(req))
318 return PTR_ERR(req);
319 }
320
321 rpm_msg->msg.state = state;
322
Lina Iyer9ed8f312016-09-22 11:09:59 -0600323 /* Send to mailbox only if active or awake */
324 if (state == RPMH_ACTIVE_ONLY_STATE || state == RPMH_AWAKE_STATE) {
Lina Iyerd7194ff2016-04-20 17:13:34 -0600325 ret = mbox_send_message(rc->chan, &rpm_msg->msg);
326 if (ret > 0)
327 ret = 0;
Lina Iyer31bbb3d2017-04-13 17:49:53 -0600328 } else {
329 /* Clean up our call by spoofing tx_done */
330 rpmh_tx_done(&rc->client, &rpm_msg->msg, ret);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600331 }
332
333 return ret;
334}
335
336/**
337 * rpmh_write_single_async: Write a single RPMH command
338 *
339 * @rc: The RPMh handle got from rpmh_get_dev_channel
340 * @state: Active/sleep set
341 * @addr: The ePCB address
342 * @data: The data
343 *
344 * Write a single value in fast-path. Fire and forget.
345 * May be called from atomic contexts.
346 */
347int rpmh_write_single_async(struct rpmh_client *rc, enum rpmh_state state,
348 u32 addr, u32 data)
349{
350 struct rpmh_msg *rpm_msg;
Lina Iyer43779b02017-06-28 14:34:02 -0600351 int ret;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600352
353 if (IS_ERR_OR_NULL(rc))
354 return -EINVAL;
355
Lina Iyeraa343942017-02-01 13:38:39 -0700356 if (rpmh_standalone)
357 return 0;
358
Lina Iyer43779b02017-06-28 14:34:02 -0600359 ret = check_ctrlr_state(rc, state);
360 if (ret)
361 return ret;
362
Lina Iyerd7194ff2016-04-20 17:13:34 -0600363 rpm_msg = get_msg_from_pool(rc);
364 if (!rpm_msg)
365 return -ENOMEM;
366
Lina Iyer781da1e2017-04-15 11:07:06 -0600367 rpm_msg->cmd[0].addr = addr;
368 rpm_msg->cmd[0].data = data;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600369
Lina Iyer781da1e2017-04-15 11:07:06 -0600370 rpm_msg->msg.payload = rpm_msg->cmd;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600371 rpm_msg->msg.num_payload = 1;
372
373 return __rpmh_write(rc, state, rpm_msg);
374}
375EXPORT_SYMBOL(rpmh_write_single_async);
376
377/**
378 * rpmh_write_single: Write a single RPMH command and
379 * wait for completion of the command.
380 *
381 * @rc: The RPMh handle got from rpmh_get_dev_channel
382 * @state: Active/sleep set
383 * @addr: The ePCB address
384 * @offset: Offset of the resource
385 * @data: The data
386 *
387 * Write a single value in slow-path and wait for the request to be
388 * complete. Blocks until the request is completed on the accelerator.
389 * Do not call from atomic contexts.
390 */
391int rpmh_write_single(struct rpmh_client *rc, enum rpmh_state state,
392 u32 addr, u32 data)
393{
Lina Iyer71dce402017-06-02 20:44:19 -0600394 DECLARE_COMPLETION_ONSTACK(compl);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600395 atomic_t wait_count = ATOMIC_INIT(1);
Lina Iyer71dce402017-06-02 20:44:19 -0600396 DEFINE_RPMH_MSG_ONSTACK(rc, state, &compl, &wait_count, rpm_msg);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600397 int ret;
398
399 if (IS_ERR_OR_NULL(rc))
400 return -EINVAL;
401
402 might_sleep();
403
Lina Iyeraa343942017-02-01 13:38:39 -0700404 if (rpmh_standalone)
405 return 0;
406
Lina Iyer43779b02017-06-28 14:34:02 -0600407 ret = check_ctrlr_state(rc, state);
408 if (ret)
409 return ret;
410
Lina Iyer781da1e2017-04-15 11:07:06 -0600411 rpm_msg.cmd[0].addr = addr;
412 rpm_msg.cmd[0].data = data;
413 rpm_msg.msg.num_payload = 1;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600414
415 ret = __rpmh_write(rc, state, &rpm_msg);
416 if (ret < 0)
417 return ret;
418
Lina Iyerae9c26b2017-07-13 12:56:07 -0600419 wait_for_tx_done(rc, &compl, addr, data);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600420
421 return rpm_msg.err;
422}
423EXPORT_SYMBOL(rpmh_write_single);
424
425struct rpmh_msg *__get_rpmh_msg_async(struct rpmh_client *rc,
Lina Iyer781da1e2017-04-15 11:07:06 -0600426 enum rpmh_state state, struct tcs_cmd *cmd, int n)
Lina Iyerd7194ff2016-04-20 17:13:34 -0600427{
428 struct rpmh_msg *rpm_msg;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600429
430 if (IS_ERR_OR_NULL(rc) || !cmd || n <= 0 || n > MAX_RPMH_PAYLOAD)
431 return ERR_PTR(-EINVAL);
432
Lina Iyerd7194ff2016-04-20 17:13:34 -0600433 rpm_msg = get_msg_from_pool(rc);
Lina Iyer781da1e2017-04-15 11:07:06 -0600434 if (!rpm_msg)
Lina Iyerd7194ff2016-04-20 17:13:34 -0600435 return ERR_PTR(-ENOMEM);
Lina Iyer781da1e2017-04-15 11:07:06 -0600436
437 memcpy(rpm_msg->cmd, cmd, n * sizeof(*cmd));
Lina Iyerd7194ff2016-04-20 17:13:34 -0600438
439 rpm_msg->msg.state = state;
Lina Iyer781da1e2017-04-15 11:07:06 -0600440 rpm_msg->msg.payload = rpm_msg->cmd;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600441 rpm_msg->msg.num_payload = n;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600442
443 return rpm_msg;
444}
445
446/**
447 * rpmh_write_async: Write a batch of RPMH commands
448 *
449 * @rc: The RPMh handle got from rpmh_get_dev_channel
450 * @state: Active/sleep set
451 * @cmd: The payload data
452 * @n: The number of elements in payload
453 *
454 * Write a batch of RPMH commands, the order of commands is maintained
455 * and will be sent as a single shot. By default the entire set of commands
456 * are considered active only (i.e, will not be cached in wake set, unless
457 * all of them have their corresponding sleep requests).
458 */
459int rpmh_write_async(struct rpmh_client *rc, enum rpmh_state state,
460 struct tcs_cmd *cmd, int n)
461{
Lina Iyeraa343942017-02-01 13:38:39 -0700462 struct rpmh_msg *rpm_msg;
Lina Iyer43779b02017-06-28 14:34:02 -0600463 int ret;
Lina Iyeraa343942017-02-01 13:38:39 -0700464
465 if (rpmh_standalone)
466 return 0;
467
Lina Iyer43779b02017-06-28 14:34:02 -0600468 ret = check_ctrlr_state(rc, state);
469 if (ret)
470 return ret;
471
Lina Iyer781da1e2017-04-15 11:07:06 -0600472 rpm_msg = __get_rpmh_msg_async(rc, state, cmd, n);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600473 if (IS_ERR(rpm_msg))
474 return PTR_ERR(rpm_msg);
475
476 return __rpmh_write(rc, state, rpm_msg);
477}
478EXPORT_SYMBOL(rpmh_write_async);
479
480/**
481 * rpmh_write: Write a batch of RPMH commands
482 *
483 * @rc: The RPMh handle got from rpmh_get_dev_channel
484 * @state: Active/sleep set
485 * @cmd: The payload data
486 * @n: The number of elements in payload
487 *
488 * Write a batch of RPMH commands, the order of commands is maintained
489 * and will be sent as a single shot. By default the entire set of commands
490 * are considered active only (i.e, will not be cached in wake set, unless
491 * all of them have their corresponding sleep requests). All requests are
492 * sent as slow path requests.
493 *
494 * May sleep. Do not call from atomic contexts.
495 */
496int rpmh_write(struct rpmh_client *rc, enum rpmh_state state,
497 struct tcs_cmd *cmd, int n)
498{
Lina Iyer71dce402017-06-02 20:44:19 -0600499 DECLARE_COMPLETION_ONSTACK(compl);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600500 atomic_t wait_count = ATOMIC_INIT(1);
Lina Iyer71dce402017-06-02 20:44:19 -0600501 DEFINE_RPMH_MSG_ONSTACK(rc, state, &compl, &wait_count, rpm_msg);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600502 int ret;
503
504 if (IS_ERR_OR_NULL(rc) || !cmd || n <= 0 || n > MAX_RPMH_PAYLOAD)
505 return -EINVAL;
506
507 might_sleep();
508
Lina Iyeraa343942017-02-01 13:38:39 -0700509 if (rpmh_standalone)
510 return 0;
511
Lina Iyer43779b02017-06-28 14:34:02 -0600512 ret = check_ctrlr_state(rc, state);
513 if (ret)
514 return ret;
515
Lina Iyer781da1e2017-04-15 11:07:06 -0600516 memcpy(rpm_msg.cmd, cmd, n * sizeof(*cmd));
Lina Iyerd7194ff2016-04-20 17:13:34 -0600517 rpm_msg.msg.num_payload = n;
518
519 ret = __rpmh_write(rc, state, &rpm_msg);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600520 if (ret)
521 return ret;
522
Lina Iyerae9c26b2017-07-13 12:56:07 -0600523 wait_for_tx_done(rc, &compl, cmd[0].addr, cmd[0].data);
Lina Iyere43901b2017-04-20 23:00:12 -0600524
Lina Iyerd7194ff2016-04-20 17:13:34 -0600525 return rpm_msg.err;
526}
527EXPORT_SYMBOL(rpmh_write);
528
Lina Iyerde7b1042017-07-18 19:11:26 -0600529static int cache_passthru(struct rpmh_client *rc, struct rpmh_msg **rpm_msg,
530 int count)
531{
532 struct rpmh_mbox *rpm = rc->rpmh;
533 unsigned long flags;
534 int ret = 0;
535 int index = 0;
536 int i;
537
538 spin_lock_irqsave(&rpm->lock, flags);
539 while (rpm->passthru_cache[index])
540 index++;
541 if (index + count >= 2 * RPMH_MAX_REQ_IN_BATCH) {
542 ret = -ENOMEM;
543 goto fail;
544 }
545
546 for (i = 0; i < count; i++)
547 rpm->passthru_cache[index + i] = rpm_msg[i];
548fail:
549 spin_unlock_irqrestore(&rpm->lock, flags);
550
551 return ret;
552}
553
554static int flush_passthru(struct rpmh_client *rc)
555{
556 struct rpmh_mbox *rpm = rc->rpmh;
557 struct rpmh_msg *rpm_msg;
558 unsigned long flags;
559 int ret = 0;
560 int i;
561
562 /* Send Sleep/Wake requests to the controller, expect no response */
563 spin_lock_irqsave(&rpm->lock, flags);
564 for (i = 0; rpm->passthru_cache[i]; i++) {
565 rpm_msg = rpm->passthru_cache[i];
Lina Iyer3d9bd812017-09-06 11:34:01 -0600566 ret = mbox_write_controller_data(rc->chan, &rpm_msg->msg);
Lina Iyerde7b1042017-07-18 19:11:26 -0600567 if (ret)
568 goto fail;
569 }
570fail:
571 spin_unlock_irqrestore(&rpm->lock, flags);
572
573 return ret;
574}
575
576static void invalidate_passthru(struct rpmh_client *rc)
577{
578 struct rpmh_mbox *rpm = rc->rpmh;
579 unsigned long flags;
580 int index = 0;
581 int i;
582
583 spin_lock_irqsave(&rpm->lock, flags);
584 while (rpm->passthru_cache[index])
585 index++;
586 for (i = 0; i < index; i++) {
587 __free_msg_to_pool(rpm->passthru_cache[i]);
588 rpm->passthru_cache[i] = NULL;
589 }
590 spin_unlock_irqrestore(&rpm->lock, flags);
591}
592
Lina Iyerd7194ff2016-04-20 17:13:34 -0600593/**
Lina Iyerc4f79472017-08-29 15:24:18 -0600594 * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
595 * batch to finish.
Lina Iyerd7194ff2016-04-20 17:13:34 -0600596 *
597 * @rc: The RPMh handle got from rpmh_get_dev_channel
598 * @state: Active/sleep set
599 * @cmd: The payload data
600 * @n: The array of count of elements in each batch, 0 terminated.
601 *
602 * Write a request to the mailbox controller without caching. If the request
Lina Iyer7846e212017-03-22 10:35:53 -0600603 * state is ACTIVE or AWAKE, then the requests are treated as completion request
Lina Iyerd7194ff2016-04-20 17:13:34 -0600604 * and sent to the controller immediately. The function waits until all the
605 * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the
606 * request is sent as fire-n-forget and no ack is expected.
607 *
608 * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests.
609 */
Lina Iyerc4f79472017-08-29 15:24:18 -0600610int rpmh_write_batch(struct rpmh_client *rc, enum rpmh_state state,
Lina Iyerd7194ff2016-04-20 17:13:34 -0600611 struct tcs_cmd *cmd, int *n)
612{
Lina Iyerfd7e0c22017-07-14 18:08:46 -0600613 struct rpmh_msg *rpm_msg[RPMH_MAX_REQ_IN_BATCH] = { NULL };
Lina Iyer71dce402017-06-02 20:44:19 -0600614 DECLARE_COMPLETION_ONSTACK(compl);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600615 atomic_t wait_count = ATOMIC_INIT(0); /* overwritten */
616 int count = 0;
Lina Iyer7846e212017-03-22 10:35:53 -0600617 int ret, i, j, k;
618 bool complete_set;
Lina Iyerae9c26b2017-07-13 12:56:07 -0600619 u32 addr, data;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600620
Lina Iyer1aa5d272017-05-16 08:55:07 -0600621 if (IS_ERR_OR_NULL(rc) || !cmd || !n)
622 return -EINVAL;
623
Lina Iyeraa343942017-02-01 13:38:39 -0700624 if (rpmh_standalone)
625 return 0;
626
Lina Iyer43779b02017-06-28 14:34:02 -0600627 ret = check_ctrlr_state(rc, state);
628 if (ret)
629 return ret;
Lina Iyera6cc6482017-05-02 20:59:55 -0600630
Lina Iyerfd7e0c22017-07-14 18:08:46 -0600631 while (n[count++] > 0)
Lina Iyerd7194ff2016-04-20 17:13:34 -0600632 ;
633 count--;
Lina Iyer400ae132017-05-17 14:25:27 -0600634 if (!count || count > RPMH_MAX_REQ_IN_BATCH)
Lina Iyerd7194ff2016-04-20 17:13:34 -0600635 return -EINVAL;
636
Lina Iyer7846e212017-03-22 10:35:53 -0600637 if (state == RPMH_ACTIVE_ONLY_STATE || state == RPMH_AWAKE_STATE) {
638 /*
639 * Ensure the 'complete' bit is set for atleast one command in
640 * each set for active/awake requests.
641 */
642 for (i = 0, k = 0; i < count; i++, k += n[i]) {
643 complete_set = false;
644 for (j = 0; j < n[i]; j++) {
645 if (cmd[k + j].complete) {
646 complete_set = true;
647 break;
648 }
649 }
650 if (!complete_set) {
651 dev_err(rc->dev, "No completion set for batch");
652 return -EINVAL;
653 }
654 }
655 }
656
Lina Iyerae9c26b2017-07-13 12:56:07 -0600657 addr = cmd[0].addr;
658 data = cmd[0].data;
Lina Iyer7846e212017-03-22 10:35:53 -0600659 /* Create async request batches */
Lina Iyerd7194ff2016-04-20 17:13:34 -0600660 for (i = 0; i < count; i++) {
Lina Iyer781da1e2017-04-15 11:07:06 -0600661 rpm_msg[i] = __get_rpmh_msg_async(rc, state, cmd, n[i]);
Lina Iyerec57adc2017-05-03 15:29:49 -0600662 if (IS_ERR_OR_NULL(rpm_msg[i])) {
Lina Iyerec57adc2017-05-03 15:29:49 -0600663 for (j = 0 ; j < i; j++)
Lina Iyer71dce402017-06-02 20:44:19 -0600664 free_msg_to_pool(rpm_msg[j]);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600665 return PTR_ERR(rpm_msg[i]);
Lina Iyerec57adc2017-05-03 15:29:49 -0600666 }
Lina Iyerd7194ff2016-04-20 17:13:34 -0600667 cmd += n[i];
668 }
669
Lina Iyer7846e212017-03-22 10:35:53 -0600670 /* Send if Active or Awake and wait for the whole set to complete */
671 if (state == RPMH_ACTIVE_ONLY_STATE || state == RPMH_AWAKE_STATE) {
Lina Iyerd7194ff2016-04-20 17:13:34 -0600672 might_sleep();
673 atomic_set(&wait_count, count);
674 for (i = 0; i < count; i++) {
Lina Iyer71dce402017-06-02 20:44:19 -0600675 rpm_msg[i]->completion = &compl;
Lina Iyercb6b7432017-04-19 20:52:38 -0600676 rpm_msg[i]->wait_count = &wait_count;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600677 /* Bypass caching and write to mailbox directly */
678 ret = mbox_send_message(rc->chan, &rpm_msg[i]->msg);
Lina Iyerec57adc2017-05-03 15:29:49 -0600679 if (ret < 0) {
680 pr_err("Error(%d) sending RPM message addr=0x%x\n",
681 ret, rpm_msg[i]->msg.payload[0].addr);
682 break;
683 }
Lina Iyerd7194ff2016-04-20 17:13:34 -0600684 }
Lina Iyer71dce402017-06-02 20:44:19 -0600685 /* For those unsent requests, spoof tx_done */
686 for (j = i; j < count; j++)
687 rpmh_tx_done(&rc->client, &rpm_msg[j]->msg, ret);
Lina Iyerae9c26b2017-07-13 12:56:07 -0600688 wait_for_tx_done(rc, &compl, addr, data);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600689 } else {
Lina Iyerde7b1042017-07-18 19:11:26 -0600690 /*
691 * Cache sleep/wake data in store.
692 * But flush passthru first before flushing all other data.
693 */
694 return cache_passthru(rc, rpm_msg, count);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600695 }
Lina Iyere43901b2017-04-20 23:00:12 -0600696
697 return 0;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600698}
Lina Iyerc4f79472017-08-29 15:24:18 -0600699EXPORT_SYMBOL(rpmh_write_batch);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600700
701/**
Lina Iyera6cc6482017-05-02 20:59:55 -0600702 * rpmh_mode_solver_set: Indicate that the RSC controller hardware has
703 * been configured to be in solver mode
704 *
705 * @rc: The RPMH handle
706 * @enable: Boolean value indicating if the controller is in solver mode.
707 *
708 * When solver mode is enabled, passthru API will not be able to send wake
709 * votes, just awake and active votes.
710 */
711int rpmh_mode_solver_set(struct rpmh_client *rc, bool enable)
712{
713 struct rpmh_mbox *rpm;
714 unsigned long flags;
715
716 if (IS_ERR_OR_NULL(rc))
717 return -EINVAL;
718
719 if (rpmh_standalone)
720 return 0;
721
722 rpm = rc->rpmh;
723 do {
724 spin_lock_irqsave(&rpm->lock, flags);
725 if (mbox_controller_is_idle(rc->chan)) {
726 rpm->in_solver_mode = enable;
727 spin_unlock_irqrestore(&rpm->lock, flags);
728 break;
729 }
730 spin_unlock_irqrestore(&rpm->lock, flags);
731 udelay(10);
732 } while (1);
733
734 return 0;
735}
736EXPORT_SYMBOL(rpmh_mode_solver_set);
737
738/**
Lina Iyerd7194ff2016-04-20 17:13:34 -0600739 * rpmh_write_control: Write async control commands to the controller
740 *
741 * @rc: The RPMh handle got from rpmh_get_dev_channel
742 * @cmd: The payload data
743 * @n: The number of elements in payload
744 *
745 * Write control commands to the controller. The messages are always sent
746 * async.
747 *
748 * May be called from atomic contexts.
749 */
750int rpmh_write_control(struct rpmh_client *rc, struct tcs_cmd *cmd, int n)
751{
752 DEFINE_RPMH_MSG_ONSTACK(rc, 0, NULL, NULL, rpm_msg);
753
Lina Iyerfd7e0c22017-07-14 18:08:46 -0600754 if (IS_ERR_OR_NULL(rc) || n <= 0 || n > MAX_RPMH_PAYLOAD)
Lina Iyerd7194ff2016-04-20 17:13:34 -0600755 return -EINVAL;
756
Lina Iyeraa343942017-02-01 13:38:39 -0700757 if (rpmh_standalone)
758 return 0;
759
Lina Iyer781da1e2017-04-15 11:07:06 -0600760 memcpy(rpm_msg.cmd, cmd, n * sizeof(*cmd));
Lina Iyerd7194ff2016-04-20 17:13:34 -0600761 rpm_msg.msg.num_payload = n;
762 rpm_msg.msg.is_control = true;
Lina Iyer781da1e2017-04-15 11:07:06 -0600763 rpm_msg.msg.is_complete = false;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600764
Lina Iyer3d9bd812017-09-06 11:34:01 -0600765 return mbox_write_controller_data(rc->chan, &rpm_msg.msg);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600766}
767EXPORT_SYMBOL(rpmh_write_control);
768
769/**
770 * rpmh_invalidate: Invalidate all sleep and active sets
771 * sets.
772 *
773 * @rc: The RPMh handle got from rpmh_get_dev_channel
774 *
775 * Invalidate the sleep and active values in the TCS blocks.
776 * Nothing to do here.
777 */
778int rpmh_invalidate(struct rpmh_client *rc)
779{
780 DEFINE_RPMH_MSG_ONSTACK(rc, 0, NULL, NULL, rpm_msg);
Kyle Piefer95712442017-03-15 14:10:19 -0700781 struct rpmh_mbox *rpm;
Lina Iyerdb1267a2017-04-25 21:03:16 -0600782 unsigned long flags;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600783
784 if (IS_ERR_OR_NULL(rc))
785 return -EINVAL;
786
Lina Iyeraa343942017-02-01 13:38:39 -0700787 if (rpmh_standalone)
788 return 0;
789
Lina Iyerde7b1042017-07-18 19:11:26 -0600790 invalidate_passthru(rc);
791
Kyle Piefer95712442017-03-15 14:10:19 -0700792 rpm = rc->rpmh;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600793 rpm_msg.msg.invalidate = true;
Lina Iyer781da1e2017-04-15 11:07:06 -0600794 rpm_msg.msg.is_complete = false;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600795
Lina Iyerdb1267a2017-04-25 21:03:16 -0600796 spin_lock_irqsave(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600797 rpm->dirty = true;
Lina Iyerdb1267a2017-04-25 21:03:16 -0600798 spin_unlock_irqrestore(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600799
Lina Iyer3d9bd812017-09-06 11:34:01 -0600800 return mbox_write_controller_data(rc->chan, &rpm_msg.msg);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600801}
802EXPORT_SYMBOL(rpmh_invalidate);
803
804/**
805 * rpmh_read: Read a resource value
806 *
807 * @rc: The RPMh handle got from rpmh_get_dev_channel
808 * @addr: The ePCB address
809 * @resp: The store for the response received from RPMH
810 *
811 * Read a resource value from RPMH.
812 */
813int rpmh_read(struct rpmh_client *rc, u32 addr, u32 *resp)
814{
815 int ret;
Lina Iyer71dce402017-06-02 20:44:19 -0600816 DECLARE_COMPLETION_ONSTACK(compl);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600817 atomic_t wait_count = ATOMIC_INIT(2); /* wait for rx_cb and tx_done */
818 DEFINE_RPMH_MSG_ONSTACK(rc, RPMH_ACTIVE_ONLY_STATE,
Lina Iyer71dce402017-06-02 20:44:19 -0600819 &compl, &wait_count, rpm_msg);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600820
821 if (IS_ERR_OR_NULL(rc) || !resp)
822 return -EINVAL;
823
824 might_sleep();
825
Lina Iyeraa343942017-02-01 13:38:39 -0700826 if (rpmh_standalone)
827 return 0;
828
Lina Iyer781da1e2017-04-15 11:07:06 -0600829 rpm_msg.cmd[0].addr = addr;
830 rpm_msg.cmd[0].data = 0;
831 rpm_msg.msg.num_payload = 1;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600832
833 rpm_msg.msg.is_read = true;
834
835 ret = mbox_send_message(rc->chan, &rpm_msg.msg);
836 if (ret < 0)
837 return ret;
838
839 /* Wait until the response is received from RPMH */
Lina Iyerae9c26b2017-07-13 12:56:07 -0600840 wait_for_tx_done(rc, &compl, addr, 0);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600841
842 /* Read the data back from the tcs_mbox_msg structrure */
Lina Iyer781da1e2017-04-15 11:07:06 -0600843 *resp = rpm_msg.cmd[0].data;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600844
845 return rpm_msg.err;
846}
847EXPORT_SYMBOL(rpmh_read);
848
Lina Iyer4a8655f2017-08-15 11:30:08 -0600849/**
850 * rpmh_ctrlr_idle: Check if the controller is idle
851 *
852 * @rc: The RPMH handle got from rpmh_get_dev_channel
853 *
854 * Returns if the controller is idle or not.
855 */
856int rpmh_ctrlr_idle(struct rpmh_client *rc)
857{
858 if (IS_ERR_OR_NULL(rc))
859 return -EINVAL;
860
861 if (rpmh_standalone)
862 return 0;
863
864 if (!mbox_controller_is_idle(rc->chan))
865 return -EBUSY;
866
867 return 0;
868}
869EXPORT_SYMBOL(rpmh_ctrlr_idle);
870
Lina Iyer17a5aa02017-03-14 14:18:50 -0600871static inline int is_req_valid(struct rpmh_req *req)
872{
873 return (req->sleep_val != UINT_MAX && req->wake_val != UINT_MAX
874 && req->sleep_val != req->wake_val);
875}
876
Lina Iyerd7194ff2016-04-20 17:13:34 -0600877int send_single(struct rpmh_client *rc, enum rpmh_state state, u32 addr,
878 u32 data)
879{
880 DEFINE_RPMH_MSG_ONSTACK(rc, state, NULL, NULL, rpm_msg);
881
Lina Iyer50614ce2017-02-27 15:53:23 -0700882 /* Wake sets are always complete and sleep sets are not */
883 rpm_msg.msg.is_complete = (state == RPMH_WAKE_ONLY_STATE);
Lina Iyer781da1e2017-04-15 11:07:06 -0600884 rpm_msg.cmd[0].addr = addr;
885 rpm_msg.cmd[0].data = data;
886 rpm_msg.msg.num_payload = 1;
887 rpm_msg.msg.is_complete = false;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600888
Lina Iyer3d9bd812017-09-06 11:34:01 -0600889 return mbox_write_controller_data(rc->chan, &rpm_msg.msg);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600890}
891
892/**
893 * rpmh_flush: Flushes the buffered active and sleep sets to TCS
894 *
895 * @rc: The RPMh handle got from rpmh_get_dev_channel
896 *
897 * This function is generally called from the sleep code from the last CPU
898 * that is powering down the entire system.
899 *
900 * Returns -EBUSY if the controller is busy, probably waiting on a response
901 * to a RPMH request sent earlier.
902 */
903int rpmh_flush(struct rpmh_client *rc)
904{
905 struct rpmh_req *p;
906 struct rpmh_mbox *rpm = rc->rpmh;
907 int ret;
Lina Iyerdb1267a2017-04-25 21:03:16 -0600908 unsigned long flags;
Lina Iyerd7194ff2016-04-20 17:13:34 -0600909
910 if (IS_ERR_OR_NULL(rc))
911 return -EINVAL;
912
Lina Iyeraa343942017-02-01 13:38:39 -0700913 if (rpmh_standalone)
914 return 0;
915
Lina Iyerd7194ff2016-04-20 17:13:34 -0600916 if (!mbox_controller_is_idle(rc->chan))
917 return -EBUSY;
918
Lina Iyerdb1267a2017-04-25 21:03:16 -0600919 spin_lock_irqsave(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600920 if (!rpm->dirty) {
Lina Iyer20646542017-03-01 12:34:47 -0700921 pr_debug("Skipping flush, TCS has latest data.\n");
Lina Iyerdb1267a2017-04-25 21:03:16 -0600922 spin_unlock_irqrestore(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600923 return 0;
924 }
Lina Iyerdb1267a2017-04-25 21:03:16 -0600925 spin_unlock_irqrestore(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600926
Lina Iyerde7b1042017-07-18 19:11:26 -0600927 /* First flush the cached passthru's */
928 ret = flush_passthru(rc);
929 if (ret)
930 return ret;
931
Lina Iyerd7194ff2016-04-20 17:13:34 -0600932 /*
933 * Nobody else should be calling this function other than sleep,
934 * hence we can run without locks.
935 */
936 list_for_each_entry(p, &rc->rpmh->resources, list) {
Lina Iyer17a5aa02017-03-14 14:18:50 -0600937 if (!is_req_valid(p)) {
938 pr_debug("%s: skipping RPMH req: a:0x%x s:0x%x w:0x%x",
939 __func__, p->addr, p->sleep_val, p->wake_val);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600940 continue;
Lina Iyer17a5aa02017-03-14 14:18:50 -0600941 }
Lina Iyerd7194ff2016-04-20 17:13:34 -0600942 ret = send_single(rc, RPMH_SLEEP_STATE, p->addr, p->sleep_val);
943 if (ret)
944 return ret;
945 ret = send_single(rc, RPMH_WAKE_ONLY_STATE, p->addr,
946 p->wake_val);
947 if (ret)
948 return ret;
949 }
950
Lina Iyerdb1267a2017-04-25 21:03:16 -0600951 spin_lock_irqsave(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600952 rpm->dirty = false;
Lina Iyerdb1267a2017-04-25 21:03:16 -0600953 spin_unlock_irqrestore(&rpm->lock, flags);
Lina Iyerd7194ff2016-04-20 17:13:34 -0600954
955 return 0;
956}
957EXPORT_SYMBOL(rpmh_flush);
958
959/**
960 * get_mbox: Get the MBOX controller
961 * @pdev: the platform device
962 * @name: the MBOX name as specified in DT for the device.
963 * @index: the index in the mboxes property if name is not provided.
964 *
965 * Get the MBOX Device node. We will use that to know which
966 * MBOX controller this platform device is intending to talk
967 * to.
968 */
969static struct rpmh_mbox *get_mbox(struct platform_device *pdev,
970 const char *name, int index)
971{
972 int i;
973 struct property *prop;
974 struct of_phandle_args spec;
975 const char *mbox_name;
976 struct rpmh_mbox *rpmh;
977
978 if (index < 0) {
979 if (!name || !name[0])
980 return ERR_PTR(-EINVAL);
981 index = 0;
982 of_property_for_each_string(pdev->dev.of_node,
983 "mbox-names", prop, mbox_name) {
984 if (!strcmp(name, mbox_name))
985 break;
986 index++;
987 }
988 }
989
990 if (of_parse_phandle_with_args(pdev->dev.of_node, "mboxes",
991 "#mbox-cells", index, &spec)) {
992 dev_dbg(&pdev->dev, "%s: can't parse mboxes property\n",
993 __func__);
994 return ERR_PTR(-ENODEV);
995 }
996
997 for (i = 0; i < RPMH_MAX_MBOXES; i++)
998 if (mbox_ctrlr[i].mbox_dn == spec.np) {
999 rpmh = &mbox_ctrlr[i];
1000 goto found;
1001 }
1002
1003 /* A new MBOX */
1004 for (i = 0; i < RPMH_MAX_MBOXES; i++)
1005 if (!mbox_ctrlr[i].mbox_dn)
1006 break;
1007
1008 /* More controllers than expected - not recoverable */
1009 WARN_ON(i == RPMH_MAX_MBOXES);
1010
1011 rpmh = &mbox_ctrlr[i];
1012
1013 rpmh->msg_pool = kzalloc(sizeof(struct rpmh_msg) *
1014 RPMH_MAX_FAST_RES, GFP_KERNEL);
Lina Iyerfd7e0c22017-07-14 18:08:46 -06001015 if (!rpmh->msg_pool) {
1016 of_node_put(spec.np);
Lina Iyerd7194ff2016-04-20 17:13:34 -06001017 return ERR_PTR(-ENOMEM);
Lina Iyerfd7e0c22017-07-14 18:08:46 -06001018 }
Lina Iyerd7194ff2016-04-20 17:13:34 -06001019
1020 rpmh->mbox_dn = spec.np;
1021 INIT_LIST_HEAD(&rpmh->resources);
1022 spin_lock_init(&rpmh->lock);
1023
1024found:
1025 of_node_put(spec.np);
1026
1027 return rpmh;
1028}
1029
1030static struct rpmh_client *get_rpmh_client(struct platform_device *pdev,
1031 const char *name, int index)
1032{
1033 struct rpmh_client *rc;
1034 int ret = 0;
1035
Lina Iyeraa343942017-02-01 13:38:39 -07001036 ret = cmd_db_ready();
1037 if (ret)
1038 return ERR_PTR(ret);
1039
Lina Iyerd7194ff2016-04-20 17:13:34 -06001040 rc = kzalloc(sizeof(*rc), GFP_KERNEL);
1041 if (!rc)
1042 return ERR_PTR(-ENOMEM);
1043
1044 rc->client.rx_callback = rpmh_rx_cb;
1045 rc->client.tx_prepare = NULL;
1046 rc->client.tx_done = rpmh_tx_done;
1047 rc->client.tx_block = false;
1048 rc->client.knows_txdone = false;
1049 rc->client.dev = &pdev->dev;
1050 rc->dev = &pdev->dev;
1051
1052 rc->chan = ERR_PTR(-EINVAL);
1053
1054 /* Initialize by index or name, whichever is present */
1055 if (index >= 0)
1056 rc->chan = mbox_request_channel(&rc->client, index);
1057 else if (name)
1058 rc->chan = mbox_request_channel_byname(&rc->client, name);
1059
1060 if (IS_ERR_OR_NULL(rc->chan)) {
1061 ret = PTR_ERR(rc->chan);
1062 goto cleanup;
1063 }
1064
1065 mutex_lock(&rpmh_mbox_mutex);
1066 rc->rpmh = get_mbox(pdev, name, index);
Lina Iyeraa343942017-02-01 13:38:39 -07001067 rpmh_standalone = (cmd_db_is_standalone() > 0);
Lina Iyerd7194ff2016-04-20 17:13:34 -06001068 mutex_unlock(&rpmh_mbox_mutex);
1069
1070 if (IS_ERR(rc->rpmh)) {
1071 ret = PTR_ERR(rc->rpmh);
1072 mbox_free_channel(rc->chan);
1073 goto cleanup;
1074 }
1075
1076 return rc;
1077
1078cleanup:
1079 kfree(rc);
1080 return ERR_PTR(ret);
1081}
1082
1083/**
1084 * rpmh_get_byname: Get the RPMh handle by mbox name
1085 *
1086 * @pdev: the platform device which needs to communicate with RPM
1087 * accelerators
1088 * @name: The mbox-name assigned to the client's mailbox handle
1089 *
1090 * May sleep.
1091 */
1092struct rpmh_client *rpmh_get_byname(struct platform_device *pdev,
1093 const char *name)
1094{
1095 return get_rpmh_client(pdev, name, -1);
1096}
1097EXPORT_SYMBOL(rpmh_get_byname);
1098
1099/**
1100 * rpmh_get_byindex: Get the RPMh handle by mbox index
1101 *
1102 * @pdev: the platform device which needs to communicate with RPM
1103 * accelerators
1104 * @index : The index of the mbox tuple as specified in order in DT
1105 *
1106 * May sleep.
1107 */
1108struct rpmh_client *rpmh_get_byindex(struct platform_device *pdev,
1109 int index)
1110{
1111 return get_rpmh_client(pdev, NULL, index);
1112}
1113EXPORT_SYMBOL(rpmh_get_byindex);
1114
1115/**
1116 * rpmh_release: Release the RPMH client
1117 *
1118 * @rc: The RPMh handle to be freed.
1119 */
1120void rpmh_release(struct rpmh_client *rc)
1121{
1122 if (rc && !IS_ERR_OR_NULL(rc->chan))
1123 mbox_free_channel(rc->chan);
1124
1125 kfree(rc);
1126}
1127EXPORT_SYMBOL(rpmh_release);