blob: 75f4d928fc6a59c23e27d17c78eeb8f350a080b5 [file] [log] [blame]
Mahesh Sivasubramaniana8ff9922012-03-27 17:50:42 -06001/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/bug.h>
18#include <linux/completion.h>
19#include <linux/delay.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23#include <linux/irq.h>
24#include <linux/list.h>
25#include <linux/mutex.h>
26#include <linux/spinlock.h>
27#include <linux/device.h>
28#include <linux/notifier.h>
29#include <linux/slab.h>
30#include <linux/workqueue.h>
31#include <linux/platform_device.h>
32#include <linux/of.h>
33#include <linux/of_platform.h>
34#include <mach/socinfo.h>
35#include <mach/msm_smd.h>
36#include <mach/rpm-smd.h>
37#include "rpm-notifier.h"
38
39struct msm_rpm_driver_data {
40 const char *ch_name;
41 uint32_t ch_type;
42 smd_channel_t *ch_info;
43 struct work_struct work;
44 spinlock_t smd_lock_write;
45 spinlock_t smd_lock_read;
46 struct completion smd_open;
47};
48
49#define DEFAULT_BUFFER_SIZE 256
50#define GFP_FLAG(noirq) (noirq ? GFP_ATOMIC : GFP_KERNEL)
51#define INV_HDR "resource does not exist"
52#define ERR "err\0"
53#define MAX_ERR_BUFFER_SIZE 60
54
55static struct atomic_notifier_head msm_rpm_sleep_notifier;
56static bool standalone;
57
58int msm_rpm_register_notifier(struct notifier_block *nb)
59{
60 return atomic_notifier_chain_register(&msm_rpm_sleep_notifier, nb);
61}
62
63int msm_rpm_unregister_notifier(struct notifier_block *nb)
64{
65 return atomic_notifier_chain_unregister(&msm_rpm_sleep_notifier, nb);
66}
67
68static struct workqueue_struct *msm_rpm_smd_wq;
69
70enum {
71 MSM_RPM_MSG_REQUEST_TYPE = 0,
72 MSM_RPM_MSG_TYPE_NR,
73};
74
75static const uint32_t msm_rpm_request_service[MSM_RPM_MSG_TYPE_NR] = {
76 0x716572, /* 'req\0' */
77};
78
79/*the order of fields matter and reflect the order expected by the RPM*/
80struct rpm_request_header {
81 uint32_t service_type;
82 uint32_t request_len;
83};
84
85struct rpm_message_header {
86 uint32_t msg_id;
87 enum msm_rpm_set set;
88 uint32_t resource_type;
89 uint32_t resource_id;
90 uint32_t data_len;
91};
92
93struct msm_rpm_kvp_data {
94 uint32_t key;
95 uint32_t nbytes; /* number of bytes */
96 uint8_t *value;
97 bool valid;
98};
99
100static atomic_t msm_rpm_msg_id = ATOMIC_INIT(0);
101
102static struct msm_rpm_driver_data msm_rpm_data;
103
104struct msm_rpm_request {
105 struct rpm_request_header req_hdr;
106 struct rpm_message_header msg_hdr;
107 struct msm_rpm_kvp_data *kvp;
108 uint32_t num_elements;
109 uint32_t write_idx;
110 uint8_t *buf;
111 uint32_t numbytes;
112};
113
114/*
115 * Data related to message acknowledgement
116 */
117
118LIST_HEAD(msm_rpm_wait_list);
119
120struct msm_rpm_wait_data {
121 struct list_head list;
122 uint32_t msg_id;
123 bool ack_recd;
124 int errno;
125 struct completion ack;
126};
127DEFINE_SPINLOCK(msm_rpm_list_lock);
128
129struct msm_rpm_ack_msg {
130 uint32_t req;
131 uint32_t req_len;
132 uint32_t rsc_id;
133 uint32_t msg_len;
134 uint32_t id_ack;
135};
136
137static int irq_process;
138
139LIST_HEAD(msm_rpm_ack_list);
140
141static void msm_rpm_notify_sleep_chain(struct rpm_message_header *hdr,
142 struct msm_rpm_kvp_data *kvp)
143{
144 struct msm_rpm_notifier_data notif;
145
146 notif.rsc_type = hdr->resource_type;
147 notif.rsc_id = hdr->resource_id;
148 notif.key = kvp->key;
149 notif.size = kvp->nbytes;
150 notif.value = kvp->value;
151 atomic_notifier_call_chain(&msm_rpm_sleep_notifier, 0, &notif);
152}
153
154static int msm_rpm_add_kvp_data_common(struct msm_rpm_request *handle,
155 uint32_t key, const uint8_t *data, int size, bool noirq)
156{
157 int i;
158 int data_size, msg_size;
159
160 if (!handle)
161 return -EINVAL;
162
163 data_size = ALIGN(size, SZ_4);
164 msg_size = data_size + sizeof(struct rpm_request_header);
165
166 for (i = 0; i < handle->write_idx; i++) {
167 if (handle->kvp[i].key != key)
168 continue;
169 if (handle->kvp[i].nbytes != data_size) {
170 kfree(handle->kvp[i].value);
171 handle->kvp[i].value = NULL;
172 } else {
173 if (!memcmp(handle->kvp[i].value, data, data_size))
174 return 0;
175 }
176 break;
177 }
178
179 if (i >= handle->num_elements)
180 return -ENOMEM;
181
182 if (i == handle->write_idx)
183 handle->write_idx++;
184
185 if (!handle->kvp[i].value) {
186 handle->kvp[i].value = kzalloc(data_size, GFP_FLAG(noirq));
187
188 if (!handle->kvp[i].value)
189 return -ENOMEM;
190 } else {
191 /* We enter the else case, if a key already exists but the
192 * data doesn't match. In which case, we should zero the data
193 * out.
194 */
195 memset(handle->kvp[i].value, 0, data_size);
196 }
197
198 if (!handle->kvp[i].valid)
199 handle->msg_hdr.data_len += msg_size;
200 else
201 handle->msg_hdr.data_len += (data_size - handle->kvp[i].nbytes);
202
203 handle->kvp[i].nbytes = data_size;
204 handle->kvp[i].key = key;
205 memcpy(handle->kvp[i].value, data, size);
206 handle->kvp[i].valid = true;
207
208 if (handle->msg_hdr.set == MSM_RPM_CTX_SLEEP_SET)
209 msm_rpm_notify_sleep_chain(&handle->msg_hdr, &handle->kvp[i]);
210
211 return 0;
212
213}
214
215static struct msm_rpm_request *msm_rpm_create_request_common(
216 enum msm_rpm_set set, uint32_t rsc_type, uint32_t rsc_id,
217 int num_elements, bool noirq)
218{
219 struct msm_rpm_request *cdata;
220
221 cdata = kzalloc(sizeof(struct msm_rpm_request),
222 GFP_FLAG(noirq));
223
224 if (!cdata) {
225 printk(KERN_INFO"%s():Cannot allocate memory for client data\n",
226 __func__);
227 goto cdata_alloc_fail;
228 }
229
230 cdata->msg_hdr.set = set;
231 cdata->msg_hdr.resource_type = rsc_type;
232 cdata->msg_hdr.resource_id = rsc_id;
233 cdata->msg_hdr.data_len = 0;
234
235 cdata->num_elements = num_elements;
236 cdata->write_idx = 0;
237
238 cdata->kvp = kzalloc(sizeof(struct msm_rpm_kvp_data) * num_elements,
239 GFP_FLAG(noirq));
240
241 if (!cdata->kvp) {
242 pr_warn("%s(): Cannot allocate memory for key value data\n",
243 __func__);
244 goto kvp_alloc_fail;
245 }
246
247 cdata->buf = kzalloc(DEFAULT_BUFFER_SIZE, GFP_FLAG(noirq));
248
249 if (!cdata->buf)
250 goto buf_alloc_fail;
251
252 cdata->numbytes = DEFAULT_BUFFER_SIZE;
253 return cdata;
254
255buf_alloc_fail:
256 kfree(cdata->kvp);
257kvp_alloc_fail:
258 kfree(cdata);
259cdata_alloc_fail:
260 return NULL;
261
262}
263
264void msm_rpm_free_request(struct msm_rpm_request *handle)
265{
266 int i;
267
268 if (!handle)
269 return;
270 for (i = 0; i < handle->write_idx; i++)
271 kfree(handle->kvp[i].value);
272 kfree(handle->kvp);
273 kfree(handle);
274}
275EXPORT_SYMBOL(msm_rpm_free_request);
276
277struct msm_rpm_request *msm_rpm_create_request(
278 enum msm_rpm_set set, uint32_t rsc_type,
279 uint32_t rsc_id, int num_elements)
280{
281 return msm_rpm_create_request_common(set, rsc_type, rsc_id,
282 num_elements, false);
283}
284EXPORT_SYMBOL(msm_rpm_create_request);
285
286struct msm_rpm_request *msm_rpm_create_request_noirq(
287 enum msm_rpm_set set, uint32_t rsc_type,
288 uint32_t rsc_id, int num_elements)
289{
290 return msm_rpm_create_request_common(set, rsc_type, rsc_id,
291 num_elements, true);
292}
293EXPORT_SYMBOL(msm_rpm_create_request_noirq);
294
295int msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
296 uint32_t key, const uint8_t *data, int size)
297{
298 return msm_rpm_add_kvp_data_common(handle, key, data, size, false);
299
300}
301EXPORT_SYMBOL(msm_rpm_add_kvp_data);
302
303int msm_rpm_add_kvp_data_noirq(struct msm_rpm_request *handle,
304 uint32_t key, const uint8_t *data, int size)
305{
306 return msm_rpm_add_kvp_data_common(handle, key, data, size, true);
307}
308EXPORT_SYMBOL(msm_rpm_add_kvp_data_noirq);
309
310/* Runs in interrupt context */
311static void msm_rpm_notify(void *data, unsigned event)
312{
313 struct msm_rpm_driver_data *pdata = (struct msm_rpm_driver_data *)data;
314 BUG_ON(!pdata);
315
316 if (!(pdata->ch_info))
317 return;
318
319 switch (event) {
320 case SMD_EVENT_DATA:
321 queue_work(msm_rpm_smd_wq, &pdata->work);
322 break;
323 case SMD_EVENT_OPEN:
324 complete(&pdata->smd_open);
325 break;
326 case SMD_EVENT_CLOSE:
327 case SMD_EVENT_STATUS:
328 case SMD_EVENT_REOPEN_READY:
329 break;
330 default:
331 pr_info("Unknown SMD event\n");
332
333 }
334}
335
336static struct msm_rpm_wait_data *msm_rpm_get_entry_from_msg_id(uint32_t msg_id)
337{
338 struct list_head *ptr;
339 struct msm_rpm_wait_data *elem;
340 unsigned long flags;
341
342 spin_lock_irqsave(&msm_rpm_list_lock, flags);
343
344 list_for_each(ptr, &msm_rpm_wait_list) {
345 elem = list_entry(ptr, struct msm_rpm_wait_data, list);
346 if (elem && (elem->msg_id == msg_id))
347 break;
348 elem = NULL;
349 }
350 spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
351 return elem;
352}
353
354static int msm_rpm_get_next_msg_id(void)
355{
356 int id;
357
358 do {
359 id = atomic_inc_return(&msm_rpm_msg_id);
360 } while ((id == 0) || msm_rpm_get_entry_from_msg_id(id));
361
362 return id;
363}
364
365static int msm_rpm_add_wait_list(uint32_t msg_id)
366{
367 unsigned long flags;
368 struct msm_rpm_wait_data *data =
369 kzalloc(sizeof(struct msm_rpm_wait_data), GFP_ATOMIC);
370
371 if (!data)
372 return -ENOMEM;
373
374 init_completion(&data->ack);
375 data->ack_recd = false;
376 data->msg_id = msg_id;
377 spin_lock_irqsave(&msm_rpm_list_lock, flags);
378 list_add(&data->list, &msm_rpm_wait_list);
379 spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
380
381 return 0;
382}
383
384static void msm_rpm_free_list_entry(struct msm_rpm_wait_data *elem)
385{
386 unsigned long flags;
387
388 spin_lock_irqsave(&msm_rpm_list_lock, flags);
389 list_del(&elem->list);
390 spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
391 kfree(elem);
392}
393
394static void msm_rpm_process_ack(uint32_t msg_id, int errno)
395{
396 struct list_head *ptr;
397 struct msm_rpm_wait_data *elem;
398 unsigned long flags;
399
400 spin_lock_irqsave(&msm_rpm_list_lock, flags);
401
402 list_for_each(ptr, &msm_rpm_wait_list) {
403 elem = list_entry(ptr, struct msm_rpm_wait_data, list);
404 if (elem && (elem->msg_id == msg_id)) {
405 elem->errno = errno;
406 elem->ack_recd = true;
407 complete(&elem->ack);
408 break;
409 }
410 elem = NULL;
411 }
412 WARN_ON(!elem);
413
414 spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
415}
416
417struct msm_rpm_kvp_packet {
418 uint32_t id;
419 uint32_t len;
420 uint32_t val;
421};
422
423static inline uint32_t msm_rpm_get_msg_id_from_ack(uint8_t *buf)
424{
425 return ((struct msm_rpm_ack_msg *)buf)->id_ack;
426}
427
428static inline int msm_rpm_get_error_from_ack(uint8_t *buf)
429{
430 uint8_t *tmp;
431 uint32_t req_len = ((struct msm_rpm_ack_msg *)buf)->req_len;
432
433 int rc = -ENODEV;
434
435 req_len -= sizeof(struct msm_rpm_ack_msg);
436 req_len += 2 * sizeof(uint32_t);
437 if (!req_len)
438 return 0;
439
440 tmp = buf + sizeof(struct msm_rpm_ack_msg);
441
442 BUG_ON(memcmp(tmp, ERR, sizeof(uint32_t)));
443
444 tmp += 2 * sizeof(uint32_t);
445
446 if (!(memcmp(tmp, INV_HDR, min(req_len, sizeof(INV_HDR))-1)))
447 rc = -EINVAL;
448
449 return rc;
450}
451
452static void msm_rpm_read_smd_data(char *buf)
453{
454 int pkt_sz;
455 int bytes_read = 0;
456
457 pkt_sz = smd_cur_packet_size(msm_rpm_data.ch_info);
458
459 BUG_ON(pkt_sz > MAX_ERR_BUFFER_SIZE);
460
461 if (pkt_sz != smd_read_avail(msm_rpm_data.ch_info))
462 return;
463
464 BUG_ON(pkt_sz == 0);
465
466 do {
467 int len;
468
469 len = smd_read(msm_rpm_data.ch_info, buf + bytes_read, pkt_sz);
470 pkt_sz -= len;
471 bytes_read += len;
472
473 } while (pkt_sz > 0);
474
475 BUG_ON(pkt_sz < 0);
476}
477
478static void msm_rpm_smd_work(struct work_struct *work)
479{
480 uint32_t msg_id;
481 int errno;
482 char buf[MAX_ERR_BUFFER_SIZE] = {0};
483 unsigned long flags;
484
485 while (smd_is_pkt_avail(msm_rpm_data.ch_info) && !irq_process) {
486 spin_lock_irqsave(&msm_rpm_data.smd_lock_read, flags);
487 msm_rpm_read_smd_data(buf);
488 spin_unlock_irqrestore(&msm_rpm_data.smd_lock_read, flags);
489 msg_id = msm_rpm_get_msg_id_from_ack(buf);
490 errno = msm_rpm_get_error_from_ack(buf);
491 msm_rpm_process_ack(msg_id, errno);
492 }
493}
494
495static int msm_rpm_send_data(struct msm_rpm_request *cdata,
496 int msg_type, bool noirq)
497{
498 uint8_t *tmpbuff;
499 int i, ret, msg_size;
500 unsigned long flags;
501
502 int req_hdr_sz, msg_hdr_sz;
503
504 if (!cdata->msg_hdr.data_len)
505 return 0;
506 req_hdr_sz = sizeof(cdata->req_hdr);
507 msg_hdr_sz = sizeof(cdata->msg_hdr);
508
509 cdata->req_hdr.service_type = msm_rpm_request_service[msg_type];
510
511 cdata->msg_hdr.msg_id = msm_rpm_get_next_msg_id();
512
513 cdata->req_hdr.request_len = cdata->msg_hdr.data_len + msg_hdr_sz;
514 msg_size = cdata->req_hdr.request_len + req_hdr_sz;
515
516 /* populate data_len */
517 if (msg_size > cdata->numbytes) {
518 kfree(cdata->buf);
519 cdata->numbytes = msg_size;
520 cdata->buf = kzalloc(msg_size, GFP_FLAG(noirq));
521 }
522
523 if (!cdata->buf)
524 return 0;
525
526 tmpbuff = cdata->buf;
527
528 memcpy(tmpbuff, &cdata->req_hdr, req_hdr_sz + msg_hdr_sz);
529
530 tmpbuff += req_hdr_sz + msg_hdr_sz;
531
532 for (i = 0; (i < cdata->write_idx); i++) {
533 /* Sanity check */
534 BUG_ON((tmpbuff - cdata->buf) > cdata->numbytes);
535
536 if (!cdata->kvp[i].valid)
537 continue;
538
539 memcpy(tmpbuff, &cdata->kvp[i].key, sizeof(uint32_t));
540 tmpbuff += sizeof(uint32_t);
541
542 memcpy(tmpbuff, &cdata->kvp[i].nbytes, sizeof(uint32_t));
543 tmpbuff += sizeof(uint32_t);
544
545 memcpy(tmpbuff, cdata->kvp[i].value, cdata->kvp[i].nbytes);
546 tmpbuff += cdata->kvp[i].nbytes;
547 }
548
549 if (standalone) {
550 for (i = 0; (i < cdata->write_idx); i++)
551 cdata->kvp[i].valid = false;
552
553 cdata->msg_hdr.data_len = 0;
554 ret = cdata->msg_hdr.msg_id;
555 return ret;
556 }
557
558 msm_rpm_add_wait_list(cdata->msg_hdr.msg_id);
559
560 spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
561
562 ret = smd_write_avail(msm_rpm_data.ch_info);
563
564 if (ret < 0) {
565 pr_warn("%s(): SMD not initialized\n", __func__);
566 spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
567 return 0;
568 }
569
570 while ((ret < msg_size)) {
571 if (!noirq) {
572 spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write,
573 flags);
574 cpu_relax();
575 spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
576 } else
577 udelay(5);
578 ret = smd_write_avail(msm_rpm_data.ch_info);
579 }
580
581 ret = smd_write(msm_rpm_data.ch_info, &cdata->buf[0], msg_size);
582 spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
583
584 if (ret == msg_size) {
585 for (i = 0; (i < cdata->write_idx); i++)
586 cdata->kvp[i].valid = false;
587 cdata->msg_hdr.data_len = 0;
588 ret = cdata->msg_hdr.msg_id;
589 } else if (ret < msg_size) {
590 struct msm_rpm_wait_data *rc;
591 ret = 0;
592 pr_info("Failed to write data msg_size:%d ret:%d\n",
593 msg_size, ret);
594 rc = msm_rpm_get_entry_from_msg_id(cdata->msg_hdr.msg_id);
595 if (rc)
596 msm_rpm_free_list_entry(rc);
597 }
598 return ret;
599}
600
601int msm_rpm_send_request(struct msm_rpm_request *handle)
602{
603 return msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, false);
604}
605EXPORT_SYMBOL(msm_rpm_send_request);
606
607int msm_rpm_send_request_noirq(struct msm_rpm_request *handle)
608{
609 return msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, true);
610}
611EXPORT_SYMBOL(msm_rpm_send_request_noirq);
612
613int msm_rpm_wait_for_ack(uint32_t msg_id)
614{
615 struct msm_rpm_wait_data *elem;
616 int rc = 0;
617
618 if (!msg_id)
619 return -EINVAL;
620
621 if (standalone)
622 return 0;
623
624 elem = msm_rpm_get_entry_from_msg_id(msg_id);
625 if (!elem)
626 return 0;
627
628 rc = wait_for_completion_timeout(&elem->ack, msecs_to_jiffies(1));
629 if (!rc) {
630 pr_warn("%s(): Timed out after 1 ms\n", __func__);
631 rc = -ETIMEDOUT;
632 } else {
633 rc = elem->errno;
634 msm_rpm_free_list_entry(elem);
635 }
636 return rc;
637}
638EXPORT_SYMBOL(msm_rpm_wait_for_ack);
639
640int msm_rpm_wait_for_ack_noirq(uint32_t msg_id)
641{
642 struct msm_rpm_wait_data *elem;
643 unsigned long flags;
644 int rc = 0;
645 uint32_t id = 0;
646 int count = 0;
647
648 if (!msg_id)
649 return -EINVAL;
650
651 if (standalone)
652 return 0;
653
654 spin_lock_irqsave(&msm_rpm_data.smd_lock_read, flags);
655 irq_process = true;
656
657 elem = msm_rpm_get_entry_from_msg_id(msg_id);
658
659 if (!elem)
660 /* Should this be a bug
661 * Is it ok for another thread to read the msg?
662 */
663 goto wait_ack_cleanup;
664
665 while ((id != msg_id) && (count++ < 10)) {
666 if (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
667 int errno;
668 char buf[MAX_ERR_BUFFER_SIZE] = {};
669
670 msm_rpm_read_smd_data(buf);
671 id = msm_rpm_get_msg_id_from_ack(buf);
672 errno = msm_rpm_get_error_from_ack(buf);
673 msm_rpm_process_ack(id, errno);
674 } else
675 udelay(100);
676 }
677
678 if (count == 10) {
679 rc = -ETIMEDOUT;
680 pr_warn("%s(): Timed out after 1ms\n", __func__);
681 } else {
682 rc = elem->errno;
683 msm_rpm_free_list_entry(elem);
684 }
685wait_ack_cleanup:
686 irq_process = false;
687 spin_unlock_irqrestore(&msm_rpm_data.smd_lock_read, flags);
688 return rc;
689}
690EXPORT_SYMBOL(msm_rpm_wait_for_ack_noirq);
691
692int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
693 uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
694{
695 int i, rc;
696 struct msm_rpm_request *req =
697 msm_rpm_create_request(set, rsc_type, rsc_id, nelems);
698 if (!req)
699 return -ENOMEM;
700
701 for (i = 0; i < nelems; i++) {
702 rc = msm_rpm_add_kvp_data(req, kvp[i].key,
703 kvp[i].data, kvp[i].length);
704 if (rc)
705 goto bail;
706 }
707
708 rc = msm_rpm_wait_for_ack(msm_rpm_send_request(req));
709bail:
710 msm_rpm_free_request(req);
711 return rc;
712}
713EXPORT_SYMBOL(msm_rpm_send_message);
714
715int msm_rpm_send_message_noirq(enum msm_rpm_set set, uint32_t rsc_type,
716 uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
717{
718 int i, rc;
719 struct msm_rpm_request *req =
720 msm_rpm_create_request_noirq(set, rsc_type, rsc_id, nelems);
721 if (!req)
722 return -ENOMEM;
723
724 for (i = 0; i < nelems; i++) {
725 rc = msm_rpm_add_kvp_data_noirq(req, kvp[i].key,
726 kvp[i].data, kvp[i].length);
727 if (rc)
728 goto bail;
729 }
730
731 rc = msm_rpm_wait_for_ack_noirq(msm_rpm_send_request_noirq(req));
732bail:
733 msm_rpm_free_request(req);
734 return rc;
735}
736EXPORT_SYMBOL(msm_rpm_send_message_noirq);
737static bool msm_rpm_set_standalone(void)
738{
739 if (machine_is_copper()) {
740 pr_warn("%s(): Running in standalone mode, requests "
741 "will not be sent to RPM\n", __func__);
742 standalone = true;
743 }
744 return standalone;
745}
746
747static int __devinit msm_rpm_dev_probe(struct platform_device *pdev)
748{
749 char *key = NULL;
750 int ret;
751
752 key = "rpm-channel-name";
753 ret = of_property_read_string(pdev->dev.of_node, key,
754 &msm_rpm_data.ch_name);
755 if (ret)
756 goto fail;
757
758 key = "rpm-channel-type";
759 ret = of_property_read_u32(pdev->dev.of_node, key,
760 &msm_rpm_data.ch_type);
761 if (ret)
762 goto fail;
763
764 init_completion(&msm_rpm_data.smd_open);
765 spin_lock_init(&msm_rpm_data.smd_lock_write);
766 spin_lock_init(&msm_rpm_data.smd_lock_read);
767 INIT_WORK(&msm_rpm_data.work, msm_rpm_smd_work);
768
769 if (smd_named_open_on_edge(msm_rpm_data.ch_name, msm_rpm_data.ch_type,
770 &msm_rpm_data.ch_info, &msm_rpm_data,
771 msm_rpm_notify)) {
772 pr_info("Cannot open RPM channel %s %d\n", msm_rpm_data.ch_name,
773 msm_rpm_data.ch_type);
774
775 msm_rpm_set_standalone();
776 BUG_ON(!standalone);
777 complete(&msm_rpm_data.smd_open);
778 }
779
780 ret = wait_for_completion_timeout(&msm_rpm_data.smd_open,
781 msecs_to_jiffies(5));
782
783 BUG_ON(!ret);
784
785 smd_disable_read_intr(msm_rpm_data.ch_info);
786
787 if (!standalone) {
788 msm_rpm_smd_wq = create_singlethread_workqueue("rpm-smd");
789 if (!msm_rpm_smd_wq)
790 return -EINVAL;
791 }
792
793 of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
794 return 0;
795fail:
796 pr_err("%s(): Failed to read node: %s, key=%s\n", __func__,
797 pdev->dev.of_node->full_name, key);
798 return -EINVAL;
799}
800
801static struct of_device_id msm_rpm_match_table[] = {
802 {.compatible = "qcom,rpm-smd"},
803 {},
804};
805
806static struct platform_driver msm_rpm_device_driver = {
807 .probe = msm_rpm_dev_probe,
808 .driver = {
809 .name = "rpm-smd",
810 .owner = THIS_MODULE,
811 .of_match_table = msm_rpm_match_table,
812 },
813};
814
815int __init msm_rpm_driver_init(void)
816{
817 static bool registered;
818
819 if (registered)
820 return 0;
821 registered = true;
822
823 return platform_driver_register(&msm_rpm_device_driver);
824}
825EXPORT_SYMBOL(msm_rpm_driver_init);
826late_initcall(msm_rpm_driver_init);