blob: b84ade9b0cb757f2629c10e88be297e0a6a11728 [file] [log] [blame]
Praveen Chidambaram51bb7962013-01-09 13:42:33 -07001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/bug.h>
18#include <linux/completion.h>
19#include <linux/delay.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23#include <linux/irq.h>
24#include <linux/list.h>
25#include <linux/mutex.h>
26#include <linux/spinlock.h>
David Collinsc26c6522012-07-03 16:04:37 -070027#include <linux/string.h>
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -060028#include <linux/device.h>
29#include <linux/notifier.h>
30#include <linux/slab.h>
31#include <linux/workqueue.h>
32#include <linux/platform_device.h>
33#include <linux/of.h>
34#include <linux/of_platform.h>
Mahesh Sivasubramanian0558d4b2012-10-12 18:05:28 -060035#include <linux/rbtree.h>
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -060036#include <mach/socinfo.h>
37#include <mach/msm_smd.h>
38#include <mach/rpm-smd.h>
39#include "rpm-notifier.h"
Mahesh Sivasubramaniand69058b2012-10-01 16:49:26 -060040#define CREATE_TRACE_POINTS
41#include "trace_rpm_smd.h"
David Collinsc26c6522012-07-03 16:04:37 -070042/* Debug Definitions */
43
44enum {
45 MSM_RPM_LOG_REQUEST_PRETTY = BIT(0),
46 MSM_RPM_LOG_REQUEST_RAW = BIT(1),
47 MSM_RPM_LOG_REQUEST_SHOW_MSG_ID = BIT(2),
48};
49
50static int msm_rpm_debug_mask;
51module_param_named(
52 debug_mask, msm_rpm_debug_mask, int, S_IRUGO | S_IWUSR
53);
54
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -060055struct msm_rpm_driver_data {
56 const char *ch_name;
57 uint32_t ch_type;
58 smd_channel_t *ch_info;
59 struct work_struct work;
60 spinlock_t smd_lock_write;
61 spinlock_t smd_lock_read;
62 struct completion smd_open;
63};
64
65#define DEFAULT_BUFFER_SIZE 256
Mahesh Sivasubramanian0558d4b2012-10-12 18:05:28 -060066#define DEBUG_PRINT_BUFFER_SIZE 512
67#define MAX_SLEEP_BUFFER 128
68
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -060069#define GFP_FLAG(noirq) (noirq ? GFP_ATOMIC : GFP_KERNEL)
Mahesh Sivasubramanian7ef2aad2012-07-30 13:52:31 -060070#define INV_RSC "resource does not exist"
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -060071#define ERR "err\0"
Praveen Chidambaram4647cdb2012-08-13 17:55:44 -060072#define MAX_ERR_BUFFER_SIZE 128
73#define INIT_ERROR 1
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -060074
Stephen Boydbc363fe2012-07-09 12:31:42 -070075static ATOMIC_NOTIFIER_HEAD(msm_rpm_sleep_notifier);
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -060076static bool standalone;
77
78int msm_rpm_register_notifier(struct notifier_block *nb)
79{
80 return atomic_notifier_chain_register(&msm_rpm_sleep_notifier, nb);
81}
82
83int msm_rpm_unregister_notifier(struct notifier_block *nb)
84{
85 return atomic_notifier_chain_unregister(&msm_rpm_sleep_notifier, nb);
86}
87
88static struct workqueue_struct *msm_rpm_smd_wq;
89
90enum {
91 MSM_RPM_MSG_REQUEST_TYPE = 0,
92 MSM_RPM_MSG_TYPE_NR,
93};
94
95static const uint32_t msm_rpm_request_service[MSM_RPM_MSG_TYPE_NR] = {
96 0x716572, /* 'req\0' */
97};
98
99/*the order of fields matter and reflect the order expected by the RPM*/
100struct rpm_request_header {
101 uint32_t service_type;
102 uint32_t request_len;
103};
104
105struct rpm_message_header {
106 uint32_t msg_id;
107 enum msm_rpm_set set;
108 uint32_t resource_type;
109 uint32_t resource_id;
110 uint32_t data_len;
111};
112
Mahesh Sivasubramanian0558d4b2012-10-12 18:05:28 -0600113struct kvp {
114 unsigned int k;
115 unsigned int s;
116};
117
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -0600118struct msm_rpm_kvp_data {
119 uint32_t key;
120 uint32_t nbytes; /* number of bytes */
121 uint8_t *value;
122 bool valid;
123};
124
Mahesh Sivasubramanian0558d4b2012-10-12 18:05:28 -0600125struct slp_buf {
126 struct rb_node node;
127 char ubuf[MAX_SLEEP_BUFFER];
128 char *buf;
129 bool valid;
130};
131static struct rb_root tr_root = RB_ROOT;
132
133static int msm_rpm_send_smd_buffer(char *buf, int size, bool noirq);
134static uint32_t msm_rpm_get_next_msg_id(void);
135
136static inline unsigned int get_rsc_type(char *buf)
137{
138 struct rpm_message_header *h;
139 h = (struct rpm_message_header *)
140 (buf + sizeof(struct rpm_request_header));
141 return h->resource_type;
142}
143
144static inline unsigned int get_rsc_id(char *buf)
145{
146 struct rpm_message_header *h;
147 h = (struct rpm_message_header *)
148 (buf + sizeof(struct rpm_request_header));
149 return h->resource_id;
150}
151
152#define get_data_len(buf) \
153 (((struct rpm_message_header *) \
154 (buf + sizeof(struct rpm_request_header)))->data_len)
155
156#define get_req_len(buf) \
157 (((struct rpm_request_header *)(buf))->request_len)
158
159#define get_msg_id(buf) \
160 (((struct rpm_message_header *) \
161 (buf + sizeof(struct rpm_request_header)))->msg_id)
162
163
164static inline int get_buf_len(char *buf)
165{
166 return get_req_len(buf) + sizeof(struct rpm_request_header);
167}
168
169static inline struct kvp *get_first_kvp(char *buf)
170{
171 return (struct kvp *)(buf + sizeof(struct rpm_request_header)
172 + sizeof(struct rpm_message_header));
173}
174
175static inline struct kvp *get_next_kvp(struct kvp *k)
176{
177 return (struct kvp *)((void *)k + sizeof(*k) + k->s);
178}
179
180static inline void *get_data(struct kvp *k)
181{
182 return (void *)k + sizeof(*k);
183}
184
185
186static void delete_kvp(char *msg, struct kvp *d)
187{
188 struct kvp *n;
189 int dec, size;
190
191 n = get_next_kvp(d);
192 dec = (void *)n - (void *)d;
193 size = get_data_len(msg) - ((void *)n - (void *)get_first_kvp(msg));
194
195 memcpy((void *)d, (void *)n, size);
196
197 get_data_len(msg) -= dec;
198 get_req_len(msg) -= dec;
199}
200
201static inline void update_kvp_data(struct kvp *dest, struct kvp *src)
202{
203 memcpy(get_data(dest), get_data(src), src->s);
204}
205
206static void add_kvp(char *buf, struct kvp *n)
207{
208 int inc = sizeof(*n) + n->s;
209 BUG_ON((get_req_len(buf) + inc) > MAX_SLEEP_BUFFER);
210
211 memcpy(buf + get_buf_len(buf), n, inc);
212
213 get_data_len(buf) += inc;
214 get_req_len(buf) += inc;
215}
216
217static struct slp_buf *tr_search(struct rb_root *root, char *slp)
218{
219 unsigned int type = get_rsc_type(slp);
220 unsigned int id = get_rsc_id(slp);
221
222 struct rb_node *node = root->rb_node;
223
224 while (node) {
225 struct slp_buf *cur = rb_entry(node, struct slp_buf, node);
226 unsigned int ctype = get_rsc_type(cur->buf);
227 unsigned int cid = get_rsc_id(cur->buf);
228
229 if (type < ctype)
230 node = node->rb_left;
231 else if (type > ctype)
232 node = node->rb_right;
233 else if (id < cid)
234 node = node->rb_left;
235 else if (id > cid)
236 node = node->rb_right;
237 else
238 return cur;
239 }
240 return NULL;
241}
242
243static int tr_insert(struct rb_root *root, struct slp_buf *slp)
244{
245 unsigned int type = get_rsc_type(slp->buf);
246 unsigned int id = get_rsc_id(slp->buf);
247
248 struct rb_node **node = &(root->rb_node), *parent = NULL;
249
250 while (*node) {
251 struct slp_buf *curr = rb_entry(*node, struct slp_buf, node);
252 unsigned int ctype = get_rsc_type(curr->buf);
253 unsigned int cid = get_rsc_id(curr->buf);
254
255 parent = *node;
256
257 if (type < ctype)
258 node = &((*node)->rb_left);
259 else if (type > ctype)
260 node = &((*node)->rb_right);
261 else if (id < cid)
262 node = &((*node)->rb_left);
263 else if (id > cid)
264 node = &((*node)->rb_right);
265 else
266 return -EINVAL;
267 }
268
269 rb_link_node(&slp->node, parent, node);
270 rb_insert_color(&slp->node, root);
271 slp->valid = true;
272 return 0;
273}
274
275#define for_each_kvp(buf, k) \
276 for (k = (struct kvp *)get_first_kvp(buf); \
277 ((void *)k - (void *)get_first_kvp(buf)) < get_data_len(buf);\
278 k = get_next_kvp(k))
279
280
281static void tr_update(struct slp_buf *s, char *buf)
282{
283 struct kvp *e, *n;
284
285 for_each_kvp(buf, n) {
286 for_each_kvp(s->buf, e) {
287 if (n->k == e->k) {
288 if (n->s == e->s) {
289 void *e_data = get_data(e);
290 void *n_data = get_data(n);
291 if (memcmp(e_data, n_data, n->s)) {
292 update_kvp_data(e, n);
293 s->valid = true;
294 }
295 } else {
296 delete_kvp(s->buf, e);
297 add_kvp(s->buf, n);
298 s->valid = true;
299 }
300 break;
301 }
302 }
303 }
304}
305
306int msm_rpm_smd_buffer_request(char *buf, int size, gfp_t flag)
307{
308 struct slp_buf *slp;
309 static DEFINE_SPINLOCK(slp_buffer_lock);
310 unsigned long flags;
311
312 if (size > MAX_SLEEP_BUFFER)
313 return -ENOMEM;
314
315 spin_lock_irqsave(&slp_buffer_lock, flags);
316 slp = tr_search(&tr_root, buf);
317
318 if (!slp) {
319 slp = kzalloc(sizeof(struct slp_buf), GFP_ATOMIC);
320 if (!slp) {
321 spin_unlock_irqrestore(&slp_buffer_lock, flags);
322 return -ENOMEM;
323 }
324 slp->buf = PTR_ALIGN(&slp->ubuf[0], sizeof(u32));
325 memcpy(slp->buf, buf, size);
326 if (tr_insert(&tr_root, slp))
327 pr_err("%s(): Error updating sleep request\n",
328 __func__);
329 } else {
330 /* handle unsent requests */
331 tr_update(slp, buf);
332 }
333
334 spin_unlock_irqrestore(&slp_buffer_lock, flags);
335
336 return 0;
337}
338static void msm_rpm_print_sleep_buffer(struct slp_buf *s)
339{
340 char buf[DEBUG_PRINT_BUFFER_SIZE] = {0};
341 int pos;
342 int buflen = DEBUG_PRINT_BUFFER_SIZE;
343 char ch[5] = {0};
344 u32 type;
345 struct kvp *e;
346
347 if (!s)
348 return;
349
350 if (!s->valid)
351 return;
352
353 type = get_rsc_type(s->buf);
354 memcpy(ch, &type, sizeof(u32));
355
356 pos = scnprintf(buf, buflen,
357 "Sleep request type = 0x%08x(%s)",
358 get_rsc_type(s->buf), ch);
359 pos += scnprintf(buf + pos, buflen - pos, " id = 0%x",
360 get_rsc_id(s->buf));
361 for_each_kvp(s->buf, e) {
362 int i;
363 char *data = get_data(e);
364
365 memcpy(ch, &e->k, sizeof(u32));
366
367 pos += scnprintf(buf + pos, buflen - pos,
368 "\n\t\tkey = 0x%08x(%s)",
369 e->k, ch);
370 pos += scnprintf(buf + pos, buflen - pos,
371 " sz= %d data =", e->s);
372
373 for (i = 0; i < e->s; i++)
374 pos += scnprintf(buf + pos, buflen - pos,
375 " 0x%02X", data[i]);
376 }
377 pos += scnprintf(buf + pos, buflen - pos, "\n");
378 printk(buf);
379}
380
381static int msm_rpm_flush_requests(bool print)
382{
383 struct rb_node *t;
384 int ret;
385
386 for (t = rb_first(&tr_root); t; t = rb_next(t)) {
387
388 struct slp_buf *s = rb_entry(t, struct slp_buf, node);
389
390 if (!s->valid)
391 continue;
392
393 if (print)
394 msm_rpm_print_sleep_buffer(s);
395
396 get_msg_id(s->buf) = msm_rpm_get_next_msg_id();
397 ret = msm_rpm_send_smd_buffer(s->buf,
398 get_buf_len(s->buf), true);
399 /* By not adding the message to a wait list we can reduce
400 * latency involved in waiting for a ACK from RPM. The ACK
401 * messages will be processed when we wakeup from sleep but
402 * processing should be minimal
403 * msm_rpm_wait_for_ack_noirq(get_msg_id(s->buf));
404 */
405
406 WARN_ON(ret != get_buf_len(s->buf));
407
408 trace_rpm_send_message(true, MSM_RPM_CTX_SLEEP_SET,
409 get_rsc_type(s->buf),
410 get_rsc_id(s->buf),
411 get_msg_id(s->buf));
412
413 s->valid = false;
414 }
415 return 0;
416
417}
418
419
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -0600420static atomic_t msm_rpm_msg_id = ATOMIC_INIT(0);
421
422static struct msm_rpm_driver_data msm_rpm_data;
423
424struct msm_rpm_request {
425 struct rpm_request_header req_hdr;
426 struct rpm_message_header msg_hdr;
427 struct msm_rpm_kvp_data *kvp;
428 uint32_t num_elements;
429 uint32_t write_idx;
430 uint8_t *buf;
431 uint32_t numbytes;
432};
433
434/*
435 * Data related to message acknowledgement
436 */
437
438LIST_HEAD(msm_rpm_wait_list);
439
440struct msm_rpm_wait_data {
441 struct list_head list;
442 uint32_t msg_id;
443 bool ack_recd;
444 int errno;
445 struct completion ack;
446};
447DEFINE_SPINLOCK(msm_rpm_list_lock);
448
449struct msm_rpm_ack_msg {
450 uint32_t req;
451 uint32_t req_len;
452 uint32_t rsc_id;
453 uint32_t msg_len;
454 uint32_t id_ack;
455};
456
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -0600457LIST_HEAD(msm_rpm_ack_list);
458
Mahesh Sivasubramanian06ff5d02012-11-09 15:47:12 -0700459static DECLARE_COMPLETION(data_ready);
460
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -0600461static void msm_rpm_notify_sleep_chain(struct rpm_message_header *hdr,
462 struct msm_rpm_kvp_data *kvp)
463{
464 struct msm_rpm_notifier_data notif;
465
466 notif.rsc_type = hdr->resource_type;
467 notif.rsc_id = hdr->resource_id;
468 notif.key = kvp->key;
469 notif.size = kvp->nbytes;
470 notif.value = kvp->value;
471 atomic_notifier_call_chain(&msm_rpm_sleep_notifier, 0, &notif);
472}
473
474static int msm_rpm_add_kvp_data_common(struct msm_rpm_request *handle,
475 uint32_t key, const uint8_t *data, int size, bool noirq)
476{
477 int i;
478 int data_size, msg_size;
479
Mahesh Sivasubramanian7ef2aad2012-07-30 13:52:31 -0600480 if (!handle) {
481 pr_err("%s(): Invalid handle\n", __func__);
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -0600482 return -EINVAL;
Mahesh Sivasubramanian7ef2aad2012-07-30 13:52:31 -0600483 }
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -0600484
485 data_size = ALIGN(size, SZ_4);
486 msg_size = data_size + sizeof(struct rpm_request_header);
487
488 for (i = 0; i < handle->write_idx; i++) {
489 if (handle->kvp[i].key != key)
490 continue;
491 if (handle->kvp[i].nbytes != data_size) {
492 kfree(handle->kvp[i].value);
493 handle->kvp[i].value = NULL;
494 } else {
495 if (!memcmp(handle->kvp[i].value, data, data_size))
496 return 0;
497 }
498 break;
499 }
500
Mahesh Sivasubramanian7ef2aad2012-07-30 13:52:31 -0600501 if (i >= handle->num_elements) {
502 pr_err("%s(): Number of resources exceeds max allocated\n",
503 __func__);
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -0600504 return -ENOMEM;
Mahesh Sivasubramanian7ef2aad2012-07-30 13:52:31 -0600505 }
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -0600506
507 if (i == handle->write_idx)
508 handle->write_idx++;
509
510 if (!handle->kvp[i].value) {
511 handle->kvp[i].value = kzalloc(data_size, GFP_FLAG(noirq));
512
Mahesh Sivasubramanian7ef2aad2012-07-30 13:52:31 -0600513 if (!handle->kvp[i].value) {
514 pr_err("%s(): Failed malloc\n", __func__);
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -0600515 return -ENOMEM;
Mahesh Sivasubramanian7ef2aad2012-07-30 13:52:31 -0600516 }
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -0600517 } else {
518 /* We enter the else case, if a key already exists but the
519 * data doesn't match. In which case, we should zero the data
520 * out.
521 */
522 memset(handle->kvp[i].value, 0, data_size);
523 }
524
525 if (!handle->kvp[i].valid)
526 handle->msg_hdr.data_len += msg_size;
527 else
528 handle->msg_hdr.data_len += (data_size - handle->kvp[i].nbytes);
529
530 handle->kvp[i].nbytes = data_size;
531 handle->kvp[i].key = key;
532 memcpy(handle->kvp[i].value, data, size);
533 handle->kvp[i].valid = true;
534
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -0600535 return 0;
536
537}
538
539static struct msm_rpm_request *msm_rpm_create_request_common(
540 enum msm_rpm_set set, uint32_t rsc_type, uint32_t rsc_id,
541 int num_elements, bool noirq)
542{
543 struct msm_rpm_request *cdata;
544
545 cdata = kzalloc(sizeof(struct msm_rpm_request),
546 GFP_FLAG(noirq));
547
548 if (!cdata) {
549 printk(KERN_INFO"%s():Cannot allocate memory for client data\n",
550 __func__);
551 goto cdata_alloc_fail;
552 }
553
554 cdata->msg_hdr.set = set;
555 cdata->msg_hdr.resource_type = rsc_type;
556 cdata->msg_hdr.resource_id = rsc_id;
557 cdata->msg_hdr.data_len = 0;
558
559 cdata->num_elements = num_elements;
560 cdata->write_idx = 0;
561
562 cdata->kvp = kzalloc(sizeof(struct msm_rpm_kvp_data) * num_elements,
563 GFP_FLAG(noirq));
564
565 if (!cdata->kvp) {
566 pr_warn("%s(): Cannot allocate memory for key value data\n",
567 __func__);
568 goto kvp_alloc_fail;
569 }
570
571 cdata->buf = kzalloc(DEFAULT_BUFFER_SIZE, GFP_FLAG(noirq));
572
573 if (!cdata->buf)
574 goto buf_alloc_fail;
575
576 cdata->numbytes = DEFAULT_BUFFER_SIZE;
577 return cdata;
578
579buf_alloc_fail:
580 kfree(cdata->kvp);
581kvp_alloc_fail:
582 kfree(cdata);
583cdata_alloc_fail:
584 return NULL;
585
586}
587
588void msm_rpm_free_request(struct msm_rpm_request *handle)
589{
590 int i;
591
592 if (!handle)
593 return;
Michael Bohan667aaf82012-09-20 14:32:55 -0700594 for (i = 0; i < handle->num_elements; i++)
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -0600595 kfree(handle->kvp[i].value);
596 kfree(handle->kvp);
Michael Bohan667aaf82012-09-20 14:32:55 -0700597 kfree(handle->buf);
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -0600598 kfree(handle);
599}
600EXPORT_SYMBOL(msm_rpm_free_request);
601
602struct msm_rpm_request *msm_rpm_create_request(
603 enum msm_rpm_set set, uint32_t rsc_type,
604 uint32_t rsc_id, int num_elements)
605{
606 return msm_rpm_create_request_common(set, rsc_type, rsc_id,
607 num_elements, false);
608}
609EXPORT_SYMBOL(msm_rpm_create_request);
610
611struct msm_rpm_request *msm_rpm_create_request_noirq(
612 enum msm_rpm_set set, uint32_t rsc_type,
613 uint32_t rsc_id, int num_elements)
614{
615 return msm_rpm_create_request_common(set, rsc_type, rsc_id,
616 num_elements, true);
617}
618EXPORT_SYMBOL(msm_rpm_create_request_noirq);
619
620int msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
621 uint32_t key, const uint8_t *data, int size)
622{
623 return msm_rpm_add_kvp_data_common(handle, key, data, size, false);
624
625}
626EXPORT_SYMBOL(msm_rpm_add_kvp_data);
627
628int msm_rpm_add_kvp_data_noirq(struct msm_rpm_request *handle,
629 uint32_t key, const uint8_t *data, int size)
630{
631 return msm_rpm_add_kvp_data_common(handle, key, data, size, true);
632}
633EXPORT_SYMBOL(msm_rpm_add_kvp_data_noirq);
634
635/* Runs in interrupt context */
636static void msm_rpm_notify(void *data, unsigned event)
637{
638 struct msm_rpm_driver_data *pdata = (struct msm_rpm_driver_data *)data;
639 BUG_ON(!pdata);
640
641 if (!(pdata->ch_info))
642 return;
643
644 switch (event) {
645 case SMD_EVENT_DATA:
Mahesh Sivasubramanian06ff5d02012-11-09 15:47:12 -0700646 complete(&data_ready);
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -0600647 break;
648 case SMD_EVENT_OPEN:
649 complete(&pdata->smd_open);
650 break;
651 case SMD_EVENT_CLOSE:
652 case SMD_EVENT_STATUS:
653 case SMD_EVENT_REOPEN_READY:
654 break;
655 default:
656 pr_info("Unknown SMD event\n");
657
658 }
659}
660
Mahesh Sivasubramanian9063a292012-11-09 09:15:30 -0700661bool msm_rpm_waiting_for_ack(void)
662{
663 bool ret;
664 unsigned long flags;
665
666 spin_lock_irqsave(&msm_rpm_list_lock, flags);
667 ret = list_empty(&msm_rpm_wait_list);
668 spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
669
670 return !ret;
671}
672
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -0600673static struct msm_rpm_wait_data *msm_rpm_get_entry_from_msg_id(uint32_t msg_id)
674{
675 struct list_head *ptr;
676 struct msm_rpm_wait_data *elem;
677 unsigned long flags;
678
679 spin_lock_irqsave(&msm_rpm_list_lock, flags);
680
681 list_for_each(ptr, &msm_rpm_wait_list) {
682 elem = list_entry(ptr, struct msm_rpm_wait_data, list);
683 if (elem && (elem->msg_id == msg_id))
684 break;
685 elem = NULL;
686 }
687 spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
688 return elem;
689}
690
Mahesh Sivasubramanian7ef2aad2012-07-30 13:52:31 -0600691static uint32_t msm_rpm_get_next_msg_id(void)
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -0600692{
Mahesh Sivasubramanian7ef2aad2012-07-30 13:52:31 -0600693 uint32_t id;
694
695 /*
696 * A message id of 0 is used by the driver to indicate a error
697 * condition. The RPM driver uses a id of 1 to indicate unsent data
698 * when the data sent over hasn't been modified. This isn't a error
699 * scenario and wait for ack returns a success when the message id is 1.
700 */
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -0600701
702 do {
703 id = atomic_inc_return(&msm_rpm_msg_id);
Mahesh Sivasubramanian7ef2aad2012-07-30 13:52:31 -0600704 } while ((id == 0) || (id == 1) || msm_rpm_get_entry_from_msg_id(id));
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -0600705
706 return id;
707}
708
709static int msm_rpm_add_wait_list(uint32_t msg_id)
710{
711 unsigned long flags;
712 struct msm_rpm_wait_data *data =
713 kzalloc(sizeof(struct msm_rpm_wait_data), GFP_ATOMIC);
714
715 if (!data)
716 return -ENOMEM;
717
718 init_completion(&data->ack);
719 data->ack_recd = false;
720 data->msg_id = msg_id;
Praveen Chidambaram4647cdb2012-08-13 17:55:44 -0600721 data->errno = INIT_ERROR;
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -0600722 spin_lock_irqsave(&msm_rpm_list_lock, flags);
723 list_add(&data->list, &msm_rpm_wait_list);
724 spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
725
726 return 0;
727}
728
729static void msm_rpm_free_list_entry(struct msm_rpm_wait_data *elem)
730{
731 unsigned long flags;
732
733 spin_lock_irqsave(&msm_rpm_list_lock, flags);
734 list_del(&elem->list);
735 spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
736 kfree(elem);
737}
738
739static void msm_rpm_process_ack(uint32_t msg_id, int errno)
740{
741 struct list_head *ptr;
742 struct msm_rpm_wait_data *elem;
743 unsigned long flags;
744
745 spin_lock_irqsave(&msm_rpm_list_lock, flags);
746
747 list_for_each(ptr, &msm_rpm_wait_list) {
748 elem = list_entry(ptr, struct msm_rpm_wait_data, list);
749 if (elem && (elem->msg_id == msg_id)) {
750 elem->errno = errno;
751 elem->ack_recd = true;
752 complete(&elem->ack);
753 break;
754 }
755 elem = NULL;
756 }
Mahesh Sivasubramanian0558d4b2012-10-12 18:05:28 -0600757 /* Special case where the sleep driver doesn't
758 * wait for ACKs. This would decrease the latency involved with
759 * entering RPM assisted power collapse.
760 */
761 if (!elem)
762 trace_rpm_ack_recd(0, msg_id);
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -0600763
764 spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
765}
766
767struct msm_rpm_kvp_packet {
768 uint32_t id;
769 uint32_t len;
770 uint32_t val;
771};
772
773static inline uint32_t msm_rpm_get_msg_id_from_ack(uint8_t *buf)
774{
775 return ((struct msm_rpm_ack_msg *)buf)->id_ack;
776}
777
778static inline int msm_rpm_get_error_from_ack(uint8_t *buf)
779{
780 uint8_t *tmp;
781 uint32_t req_len = ((struct msm_rpm_ack_msg *)buf)->req_len;
782
783 int rc = -ENODEV;
784
785 req_len -= sizeof(struct msm_rpm_ack_msg);
786 req_len += 2 * sizeof(uint32_t);
787 if (!req_len)
788 return 0;
789
790 tmp = buf + sizeof(struct msm_rpm_ack_msg);
791
792 BUG_ON(memcmp(tmp, ERR, sizeof(uint32_t)));
793
794 tmp += 2 * sizeof(uint32_t);
795
Mahesh Sivasubramanian7ef2aad2012-07-30 13:52:31 -0600796 if (!(memcmp(tmp, INV_RSC, min(req_len, sizeof(INV_RSC))-1))) {
797 pr_err("%s(): RPM NACK Unsupported resource\n", __func__);
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -0600798 rc = -EINVAL;
Mahesh Sivasubramanian7ef2aad2012-07-30 13:52:31 -0600799 } else {
800 pr_err("%s(): RPM NACK Invalid header\n", __func__);
801 }
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -0600802
803 return rc;
804}
805
Praveen Chidambaram4647cdb2012-08-13 17:55:44 -0600806static int msm_rpm_read_smd_data(char *buf)
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -0600807{
808 int pkt_sz;
809 int bytes_read = 0;
810
811 pkt_sz = smd_cur_packet_size(msm_rpm_data.ch_info);
812
Mahesh Sivasubramanian137cd702012-08-23 18:52:59 -0600813 if (!pkt_sz)
814 return -EAGAIN;
815
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -0600816 BUG_ON(pkt_sz > MAX_ERR_BUFFER_SIZE);
817
818 if (pkt_sz != smd_read_avail(msm_rpm_data.ch_info))
Praveen Chidambaram4647cdb2012-08-13 17:55:44 -0600819 return -EAGAIN;
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -0600820
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -0600821 do {
822 int len;
823
824 len = smd_read(msm_rpm_data.ch_info, buf + bytes_read, pkt_sz);
825 pkt_sz -= len;
826 bytes_read += len;
827
828 } while (pkt_sz > 0);
829
830 BUG_ON(pkt_sz < 0);
Praveen Chidambaram4647cdb2012-08-13 17:55:44 -0600831
832 return 0;
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -0600833}
834
835static void msm_rpm_smd_work(struct work_struct *work)
836{
837 uint32_t msg_id;
838 int errno;
839 char buf[MAX_ERR_BUFFER_SIZE] = {0};
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -0600840
Mahesh Sivasubramanian06ff5d02012-11-09 15:47:12 -0700841 while (1) {
842 wait_for_completion(&data_ready);
843
844 spin_lock(&msm_rpm_data.smd_lock_read);
845 while (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
846 if (msm_rpm_read_smd_data(buf))
847 break;
848 msg_id = msm_rpm_get_msg_id_from_ack(buf);
849 errno = msm_rpm_get_error_from_ack(buf);
850 msm_rpm_process_ack(msg_id, errno);
Praveen Chidambaram4647cdb2012-08-13 17:55:44 -0600851 }
Mahesh Sivasubramanian06ff5d02012-11-09 15:47:12 -0700852 spin_unlock(&msm_rpm_data.smd_lock_read);
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -0600853 }
854}
855
David Collinsc26c6522012-07-03 16:04:37 -0700856static void msm_rpm_log_request(struct msm_rpm_request *cdata)
857{
858 char buf[DEBUG_PRINT_BUFFER_SIZE];
859 size_t buflen = DEBUG_PRINT_BUFFER_SIZE;
860 char name[5];
861 u32 value;
862 int i, j, prev_valid;
863 int valid_count = 0;
864 int pos = 0;
865
866 name[4] = 0;
867
868 for (i = 0; i < cdata->write_idx; i++)
869 if (cdata->kvp[i].valid)
870 valid_count++;
871
872 pos += scnprintf(buf + pos, buflen - pos, "%sRPM req: ", KERN_INFO);
873 if (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_SHOW_MSG_ID)
874 pos += scnprintf(buf + pos, buflen - pos, "msg_id=%u, ",
875 cdata->msg_hdr.msg_id);
876 pos += scnprintf(buf + pos, buflen - pos, "s=%s",
877 (cdata->msg_hdr.set == MSM_RPM_CTX_ACTIVE_SET ? "act" : "slp"));
878
879 if ((msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_PRETTY)
880 && (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_RAW)) {
881 /* Both pretty and raw formatting */
882 memcpy(name, &cdata->msg_hdr.resource_type, sizeof(uint32_t));
883 pos += scnprintf(buf + pos, buflen - pos,
884 ", rsc_type=0x%08X (%s), rsc_id=%u; ",
885 cdata->msg_hdr.resource_type, name,
886 cdata->msg_hdr.resource_id);
887
888 for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
889 if (!cdata->kvp[i].valid)
890 continue;
891
892 memcpy(name, &cdata->kvp[i].key, sizeof(uint32_t));
893 pos += scnprintf(buf + pos, buflen - pos,
894 "[key=0x%08X (%s), value=%s",
895 cdata->kvp[i].key, name,
896 (cdata->kvp[i].nbytes ? "0x" : "null"));
897
898 for (j = 0; j < cdata->kvp[i].nbytes; j++)
899 pos += scnprintf(buf + pos, buflen - pos,
900 "%02X ",
901 cdata->kvp[i].value[j]);
902
903 if (cdata->kvp[i].nbytes)
904 pos += scnprintf(buf + pos, buflen - pos, "(");
905
906 for (j = 0; j < cdata->kvp[i].nbytes; j += 4) {
907 value = 0;
908 memcpy(&value, &cdata->kvp[i].value[j],
909 min(sizeof(uint32_t),
910 cdata->kvp[i].nbytes - j));
911 pos += scnprintf(buf + pos, buflen - pos, "%u",
912 value);
913 if (j + 4 < cdata->kvp[i].nbytes)
914 pos += scnprintf(buf + pos,
915 buflen - pos, " ");
916 }
917 if (cdata->kvp[i].nbytes)
918 pos += scnprintf(buf + pos, buflen - pos, ")");
919 pos += scnprintf(buf + pos, buflen - pos, "]");
920 if (prev_valid + 1 < valid_count)
921 pos += scnprintf(buf + pos, buflen - pos, ", ");
922 prev_valid++;
923 }
924 } else if (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_PRETTY) {
925 /* Pretty formatting only */
926 memcpy(name, &cdata->msg_hdr.resource_type, sizeof(uint32_t));
927 pos += scnprintf(buf + pos, buflen - pos, " %s %u; ", name,
928 cdata->msg_hdr.resource_id);
929
930 for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
931 if (!cdata->kvp[i].valid)
932 continue;
933
934 memcpy(name, &cdata->kvp[i].key, sizeof(uint32_t));
935 pos += scnprintf(buf + pos, buflen - pos, "%s=%s",
936 name, (cdata->kvp[i].nbytes ? "" : "null"));
937
938 for (j = 0; j < cdata->kvp[i].nbytes; j += 4) {
939 value = 0;
940 memcpy(&value, &cdata->kvp[i].value[j],
941 min(sizeof(uint32_t),
942 cdata->kvp[i].nbytes - j));
943 pos += scnprintf(buf + pos, buflen - pos, "%u",
944 value);
945
946 if (j + 4 < cdata->kvp[i].nbytes)
947 pos += scnprintf(buf + pos,
948 buflen - pos, " ");
949 }
950 if (prev_valid + 1 < valid_count)
951 pos += scnprintf(buf + pos, buflen - pos, ", ");
952 prev_valid++;
953 }
954 } else {
955 /* Raw formatting only */
956 pos += scnprintf(buf + pos, buflen - pos,
957 ", rsc_type=0x%08X, rsc_id=%u; ",
958 cdata->msg_hdr.resource_type,
959 cdata->msg_hdr.resource_id);
960
961 for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
962 if (!cdata->kvp[i].valid)
963 continue;
964
965 pos += scnprintf(buf + pos, buflen - pos,
966 "[key=0x%08X, value=%s",
967 cdata->kvp[i].key,
968 (cdata->kvp[i].nbytes ? "0x" : "null"));
969 for (j = 0; j < cdata->kvp[i].nbytes; j++) {
970 pos += scnprintf(buf + pos, buflen - pos,
971 "%02X",
972 cdata->kvp[i].value[j]);
973 if (j + 1 < cdata->kvp[i].nbytes)
974 pos += scnprintf(buf + pos,
975 buflen - pos, " ");
976 }
977 pos += scnprintf(buf + pos, buflen - pos, "]");
978 if (prev_valid + 1 < valid_count)
979 pos += scnprintf(buf + pos, buflen - pos, ", ");
980 prev_valid++;
981 }
982 }
983
984 pos += scnprintf(buf + pos, buflen - pos, "\n");
985 printk(buf);
986}
Mahesh Sivasubramanian0558d4b2012-10-12 18:05:28 -0600987static int msm_rpm_send_smd_buffer(char *buf, int size, bool noirq)
988{
989 unsigned long flags;
990 int ret;
David Collinsc26c6522012-07-03 16:04:37 -0700991
Mahesh Sivasubramanian0558d4b2012-10-12 18:05:28 -0600992 spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
993 ret = smd_write_avail(msm_rpm_data.ch_info);
994
995 while ((ret = smd_write_avail(msm_rpm_data.ch_info)) < size) {
996 if (ret < 0)
997 break;
998 if (!noirq) {
999 spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write,
1000 flags);
1001 cpu_relax();
1002 spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
1003 } else
1004 udelay(5);
1005 }
1006
1007 if (ret < 0) {
1008 pr_err("%s(): SMD not initialized\n", __func__);
1009 spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
1010 return ret;
1011 }
1012
1013 ret = smd_write(msm_rpm_data.ch_info, buf, size);
1014 spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
1015 return ret;
1016
1017}
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001018static int msm_rpm_send_data(struct msm_rpm_request *cdata,
1019 int msg_type, bool noirq)
1020{
1021 uint8_t *tmpbuff;
1022 int i, ret, msg_size;
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001023
1024 int req_hdr_sz, msg_hdr_sz;
1025
1026 if (!cdata->msg_hdr.data_len)
Mahesh Sivasubramanian7ef2aad2012-07-30 13:52:31 -06001027 return 1;
1028
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001029 req_hdr_sz = sizeof(cdata->req_hdr);
1030 msg_hdr_sz = sizeof(cdata->msg_hdr);
1031
1032 cdata->req_hdr.service_type = msm_rpm_request_service[msg_type];
1033
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001034 cdata->req_hdr.request_len = cdata->msg_hdr.data_len + msg_hdr_sz;
1035 msg_size = cdata->req_hdr.request_len + req_hdr_sz;
1036
1037 /* populate data_len */
1038 if (msg_size > cdata->numbytes) {
1039 kfree(cdata->buf);
1040 cdata->numbytes = msg_size;
1041 cdata->buf = kzalloc(msg_size, GFP_FLAG(noirq));
1042 }
1043
Mahesh Sivasubramanian7ef2aad2012-07-30 13:52:31 -06001044 if (!cdata->buf) {
1045 pr_err("%s(): Failed malloc\n", __func__);
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001046 return 0;
Mahesh Sivasubramanian7ef2aad2012-07-30 13:52:31 -06001047 }
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001048
1049 tmpbuff = cdata->buf;
1050
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001051 tmpbuff += req_hdr_sz + msg_hdr_sz;
1052
1053 for (i = 0; (i < cdata->write_idx); i++) {
1054 /* Sanity check */
1055 BUG_ON((tmpbuff - cdata->buf) > cdata->numbytes);
1056
1057 if (!cdata->kvp[i].valid)
1058 continue;
1059
1060 memcpy(tmpbuff, &cdata->kvp[i].key, sizeof(uint32_t));
1061 tmpbuff += sizeof(uint32_t);
1062
1063 memcpy(tmpbuff, &cdata->kvp[i].nbytes, sizeof(uint32_t));
1064 tmpbuff += sizeof(uint32_t);
1065
1066 memcpy(tmpbuff, cdata->kvp[i].value, cdata->kvp[i].nbytes);
1067 tmpbuff += cdata->kvp[i].nbytes;
Mahesh Sivasubramaniane733ddc2012-11-05 16:38:56 -07001068
1069 if (cdata->msg_hdr.set == MSM_RPM_CTX_SLEEP_SET)
1070 msm_rpm_notify_sleep_chain(&cdata->msg_hdr,
1071 &cdata->kvp[i]);
1072
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001073 }
1074
Mahesh Sivasubramanian0558d4b2012-10-12 18:05:28 -06001075 memcpy(cdata->buf, &cdata->req_hdr, req_hdr_sz + msg_hdr_sz);
1076
1077 if ((cdata->msg_hdr.set == MSM_RPM_CTX_SLEEP_SET) &&
1078 !msm_rpm_smd_buffer_request(cdata->buf, msg_size,
1079 GFP_FLAG(noirq)))
1080 return 1;
1081
1082 cdata->msg_hdr.msg_id = msm_rpm_get_next_msg_id();
1083
1084 memcpy(cdata->buf + req_hdr_sz, &cdata->msg_hdr, msg_hdr_sz);
1085
David Collinsc26c6522012-07-03 16:04:37 -07001086 if (msm_rpm_debug_mask
1087 & (MSM_RPM_LOG_REQUEST_PRETTY | MSM_RPM_LOG_REQUEST_RAW))
1088 msm_rpm_log_request(cdata);
1089
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001090 if (standalone) {
1091 for (i = 0; (i < cdata->write_idx); i++)
1092 cdata->kvp[i].valid = false;
1093
1094 cdata->msg_hdr.data_len = 0;
1095 ret = cdata->msg_hdr.msg_id;
1096 return ret;
1097 }
1098
1099 msm_rpm_add_wait_list(cdata->msg_hdr.msg_id);
1100
Mahesh Sivasubramanian0558d4b2012-10-12 18:05:28 -06001101 ret = msm_rpm_send_smd_buffer(&cdata->buf[0], msg_size, noirq);
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001102
1103 if (ret == msg_size) {
Mahesh Sivasubramaniand69058b2012-10-01 16:49:26 -06001104 trace_rpm_send_message(noirq, cdata->msg_hdr.set,
1105 cdata->msg_hdr.resource_type,
1106 cdata->msg_hdr.resource_id,
1107 cdata->msg_hdr.msg_id);
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001108 for (i = 0; (i < cdata->write_idx); i++)
1109 cdata->kvp[i].valid = false;
1110 cdata->msg_hdr.data_len = 0;
1111 ret = cdata->msg_hdr.msg_id;
1112 } else if (ret < msg_size) {
1113 struct msm_rpm_wait_data *rc;
1114 ret = 0;
Mahesh Sivasubramanian7ef2aad2012-07-30 13:52:31 -06001115 pr_err("Failed to write data msg_size:%d ret:%d\n",
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001116 msg_size, ret);
1117 rc = msm_rpm_get_entry_from_msg_id(cdata->msg_hdr.msg_id);
1118 if (rc)
1119 msm_rpm_free_list_entry(rc);
1120 }
1121 return ret;
1122}
1123
1124int msm_rpm_send_request(struct msm_rpm_request *handle)
1125{
Mahesh Sivasubramanian4f534cb2012-09-28 14:17:05 -06001126 int ret;
1127 static DEFINE_MUTEX(send_mtx);
1128
1129 mutex_lock(&send_mtx);
1130 ret = msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, false);
1131 mutex_unlock(&send_mtx);
1132
1133 return ret;
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001134}
1135EXPORT_SYMBOL(msm_rpm_send_request);
1136
1137int msm_rpm_send_request_noirq(struct msm_rpm_request *handle)
1138{
1139 return msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, true);
1140}
1141EXPORT_SYMBOL(msm_rpm_send_request_noirq);
1142
1143int msm_rpm_wait_for_ack(uint32_t msg_id)
1144{
1145 struct msm_rpm_wait_data *elem;
Mahesh Sivasubramanian55840772012-12-20 10:19:56 -07001146 int rc = 0;
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001147
Mahesh Sivasubramanian7ef2aad2012-07-30 13:52:31 -06001148 if (!msg_id) {
1149 pr_err("%s(): Invalid msg id\n", __func__);
1150 return -ENOMEM;
1151 }
1152
1153 if (msg_id == 1)
Mahesh Sivasubramanian55840772012-12-20 10:19:56 -07001154 return rc;
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001155
1156 if (standalone)
Mahesh Sivasubramanian55840772012-12-20 10:19:56 -07001157 return rc;
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001158
1159 elem = msm_rpm_get_entry_from_msg_id(msg_id);
1160 if (!elem)
Mahesh Sivasubramanian55840772012-12-20 10:19:56 -07001161 return rc;
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001162
Michael Bohan5eb00812012-08-20 11:56:33 -07001163 wait_for_completion(&elem->ack);
Mahesh Sivasubramaniand69058b2012-10-01 16:49:26 -06001164 trace_rpm_ack_recd(0, msg_id);
1165
Mahesh Sivasubramanian55840772012-12-20 10:19:56 -07001166 rc = elem->errno;
Michael Bohan5eb00812012-08-20 11:56:33 -07001167 msm_rpm_free_list_entry(elem);
Mahesh Sivasubramanian55840772012-12-20 10:19:56 -07001168
1169 return rc;
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001170}
1171EXPORT_SYMBOL(msm_rpm_wait_for_ack);
1172
1173int msm_rpm_wait_for_ack_noirq(uint32_t msg_id)
1174{
1175 struct msm_rpm_wait_data *elem;
1176 unsigned long flags;
1177 int rc = 0;
1178 uint32_t id = 0;
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001179
Mahesh Sivasubramanian7ef2aad2012-07-30 13:52:31 -06001180 if (!msg_id) {
1181 pr_err("%s(): Invalid msg id\n", __func__);
1182 return -ENOMEM;
1183 }
1184
1185 if (msg_id == 1)
1186 return 0;
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001187
1188 if (standalone)
1189 return 0;
1190
1191 spin_lock_irqsave(&msm_rpm_data.smd_lock_read, flags);
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001192
1193 elem = msm_rpm_get_entry_from_msg_id(msg_id);
1194
1195 if (!elem)
1196 /* Should this be a bug
1197 * Is it ok for another thread to read the msg?
1198 */
1199 goto wait_ack_cleanup;
1200
Praveen Chidambaram4647cdb2012-08-13 17:55:44 -06001201 if (elem->errno != INIT_ERROR) {
1202 rc = elem->errno;
1203 msm_rpm_free_list_entry(elem);
1204 goto wait_ack_cleanup;
1205 }
1206
Mahesh Sivasubramanian2ff713d2012-09-12 09:37:28 -06001207 while (id != msg_id) {
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001208 if (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
1209 int errno;
1210 char buf[MAX_ERR_BUFFER_SIZE] = {};
1211
1212 msm_rpm_read_smd_data(buf);
1213 id = msm_rpm_get_msg_id_from_ack(buf);
1214 errno = msm_rpm_get_error_from_ack(buf);
1215 msm_rpm_process_ack(id, errno);
Mahesh Sivasubramanian2ff713d2012-09-12 09:37:28 -06001216 }
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001217 }
1218
Mahesh Sivasubramanian2ff713d2012-09-12 09:37:28 -06001219 rc = elem->errno;
Mahesh Sivasubramaniand69058b2012-10-01 16:49:26 -06001220 trace_rpm_ack_recd(1, msg_id);
1221
Mahesh Sivasubramanian2ff713d2012-09-12 09:37:28 -06001222 msm_rpm_free_list_entry(elem);
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001223wait_ack_cleanup:
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001224 spin_unlock_irqrestore(&msm_rpm_data.smd_lock_read, flags);
Mahesh Sivasubramanian06ff5d02012-11-09 15:47:12 -07001225
1226 if (smd_is_pkt_avail(msm_rpm_data.ch_info))
1227 complete(&data_ready);
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001228 return rc;
1229}
1230EXPORT_SYMBOL(msm_rpm_wait_for_ack_noirq);
1231
1232int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
1233 uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
1234{
1235 int i, rc;
1236 struct msm_rpm_request *req =
1237 msm_rpm_create_request(set, rsc_type, rsc_id, nelems);
1238 if (!req)
1239 return -ENOMEM;
1240
1241 for (i = 0; i < nelems; i++) {
1242 rc = msm_rpm_add_kvp_data(req, kvp[i].key,
1243 kvp[i].data, kvp[i].length);
1244 if (rc)
1245 goto bail;
1246 }
1247
1248 rc = msm_rpm_wait_for_ack(msm_rpm_send_request(req));
1249bail:
1250 msm_rpm_free_request(req);
1251 return rc;
1252}
1253EXPORT_SYMBOL(msm_rpm_send_message);
1254
1255int msm_rpm_send_message_noirq(enum msm_rpm_set set, uint32_t rsc_type,
1256 uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
1257{
1258 int i, rc;
1259 struct msm_rpm_request *req =
1260 msm_rpm_create_request_noirq(set, rsc_type, rsc_id, nelems);
1261 if (!req)
1262 return -ENOMEM;
1263
1264 for (i = 0; i < nelems; i++) {
1265 rc = msm_rpm_add_kvp_data_noirq(req, kvp[i].key,
1266 kvp[i].data, kvp[i].length);
1267 if (rc)
1268 goto bail;
1269 }
1270
1271 rc = msm_rpm_wait_for_ack_noirq(msm_rpm_send_request_noirq(req));
1272bail:
1273 msm_rpm_free_request(req);
1274 return rc;
1275}
1276EXPORT_SYMBOL(msm_rpm_send_message_noirq);
Mahesh Sivasubramanian11dad772012-07-13 14:00:01 -06001277
1278/**
1279 * During power collapse, the rpm driver disables the SMD interrupts to make
1280 * sure that the interrupt doesn't wakes us from sleep.
1281 */
Mahesh Sivasubramanian0558d4b2012-10-12 18:05:28 -06001282int msm_rpm_enter_sleep(bool print)
Mahesh Sivasubramanian11dad772012-07-13 14:00:01 -06001283{
Praveen Chidambaram51bb7962013-01-09 13:42:33 -07001284 if (standalone)
1285 return 0;
1286
Mahesh Sivasubramanian0558d4b2012-10-12 18:05:28 -06001287 msm_rpm_flush_requests(print);
1288
Mahesh Sivasubramanian11dad772012-07-13 14:00:01 -06001289 return smd_mask_receive_interrupt(msm_rpm_data.ch_info, true);
1290}
1291EXPORT_SYMBOL(msm_rpm_enter_sleep);
1292
1293/**
1294 * When the system resumes from power collapse, the SMD interrupt disabled by
1295 * enter function has to reenabled to continue processing SMD message.
1296 */
1297void msm_rpm_exit_sleep(void)
1298{
Praveen Chidambaram51bb7962013-01-09 13:42:33 -07001299 if (standalone)
1300 return;
1301
Mahesh Sivasubramanian11dad772012-07-13 14:00:01 -06001302 smd_mask_receive_interrupt(msm_rpm_data.ch_info, false);
1303}
1304EXPORT_SYMBOL(msm_rpm_exit_sleep);
1305
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001306static int __devinit msm_rpm_dev_probe(struct platform_device *pdev)
1307{
1308 char *key = NULL;
1309 int ret;
1310
1311 key = "rpm-channel-name";
1312 ret = of_property_read_string(pdev->dev.of_node, key,
1313 &msm_rpm_data.ch_name);
1314 if (ret)
1315 goto fail;
1316
1317 key = "rpm-channel-type";
1318 ret = of_property_read_u32(pdev->dev.of_node, key,
1319 &msm_rpm_data.ch_type);
1320 if (ret)
1321 goto fail;
1322
Praveen Chidambaram51bb7962013-01-09 13:42:33 -07001323 key = "rpm-standalone";
1324 standalone = of_property_read_bool(pdev->dev.of_node, key);
1325
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001326 init_completion(&msm_rpm_data.smd_open);
1327 spin_lock_init(&msm_rpm_data.smd_lock_write);
1328 spin_lock_init(&msm_rpm_data.smd_lock_read);
1329 INIT_WORK(&msm_rpm_data.work, msm_rpm_smd_work);
1330
1331 if (smd_named_open_on_edge(msm_rpm_data.ch_name, msm_rpm_data.ch_type,
1332 &msm_rpm_data.ch_info, &msm_rpm_data,
1333 msm_rpm_notify)) {
1334 pr_info("Cannot open RPM channel %s %d\n", msm_rpm_data.ch_name,
1335 msm_rpm_data.ch_type);
1336
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001337 BUG_ON(!standalone);
1338 complete(&msm_rpm_data.smd_open);
Praveen Chidambaram51bb7962013-01-09 13:42:33 -07001339 } else {
1340 /*
1341 * Override DT's suggestion to try standalone; since we have an
1342 * SMD channel.
1343 */
1344 standalone = false;
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001345 }
1346
Michael Bohan5eb00812012-08-20 11:56:33 -07001347 wait_for_completion(&msm_rpm_data.smd_open);
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001348
1349 smd_disable_read_intr(msm_rpm_data.ch_info);
1350
1351 if (!standalone) {
1352 msm_rpm_smd_wq = create_singlethread_workqueue("rpm-smd");
1353 if (!msm_rpm_smd_wq)
1354 return -EINVAL;
Mahesh Sivasubramanian06ff5d02012-11-09 15:47:12 -07001355 queue_work(msm_rpm_smd_wq, &msm_rpm_data.work);
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001356 }
1357
1358 of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
Praveen Chidambaram51bb7962013-01-09 13:42:33 -07001359
1360 if (standalone)
1361 pr_info("%s(): RPM running in standalone mode\n", __func__);
1362
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001363 return 0;
1364fail:
1365 pr_err("%s(): Failed to read node: %s, key=%s\n", __func__,
1366 pdev->dev.of_node->full_name, key);
1367 return -EINVAL;
1368}
1369
1370static struct of_device_id msm_rpm_match_table[] = {
1371 {.compatible = "qcom,rpm-smd"},
1372 {},
1373};
1374
1375static struct platform_driver msm_rpm_device_driver = {
1376 .probe = msm_rpm_dev_probe,
1377 .driver = {
1378 .name = "rpm-smd",
1379 .owner = THIS_MODULE,
1380 .of_match_table = msm_rpm_match_table,
1381 },
1382};
1383
1384int __init msm_rpm_driver_init(void)
1385{
1386 static bool registered;
1387
1388 if (registered)
1389 return 0;
1390 registered = true;
1391
1392 return platform_driver_register(&msm_rpm_device_driver);
1393}
1394EXPORT_SYMBOL(msm_rpm_driver_init);
1395late_initcall(msm_rpm_driver_init);