blob: c29cfcbe22e7907380e90b825218abb882c3024b [file] [log] [blame]
Raju P.L.S.S.S.N43c1be72017-10-31 16:50:30 +05301/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/bug.h>
20#include <linux/completion.h>
21#include <linux/delay.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/io.h>
25#include <linux/irq.h>
26#include <linux/list.h>
27#include <linux/mutex.h>
28#include <linux/of_address.h>
29#include <linux/spinlock.h>
30#include <linux/string.h>
31#include <linux/device.h>
32#include <linux/notifier.h>
33#include <linux/slab.h>
Raju P.L.S.S.S.N43c1be72017-10-31 16:50:30 +053034#include <linux/platform_device.h>
35#include <linux/of.h>
36#include <linux/of_platform.h>
37#include <linux/rbtree.h>
38#include <soc/qcom/rpm-notifier.h>
39#include <soc/qcom/rpm-smd.h>
40#include <soc/qcom/smd.h>
41#include <soc/qcom/glink_rpm_xprt.h>
42#include <soc/qcom/glink.h>
43
44#define CREATE_TRACE_POINTS
45#include <trace/events/trace_rpm_smd.h>
46
47/* Debug Definitions */
48enum {
49 MSM_RPM_LOG_REQUEST_PRETTY = BIT(0),
50 MSM_RPM_LOG_REQUEST_RAW = BIT(1),
51 MSM_RPM_LOG_REQUEST_SHOW_MSG_ID = BIT(2),
52};
53
54static int msm_rpm_debug_mask;
55module_param_named(
56 debug_mask, msm_rpm_debug_mask, int, 0644
57);
58
59struct msm_rpm_driver_data {
60 const char *ch_name;
61 uint32_t ch_type;
62 smd_channel_t *ch_info;
63 struct work_struct work;
64 spinlock_t smd_lock_write;
65 spinlock_t smd_lock_read;
66 struct completion smd_open;
67};
68
69struct glink_apps_rpm_data {
70 const char *name;
71 const char *edge;
72 const char *xprt;
73 void *glink_handle;
74 struct glink_link_info *link_info;
75 struct glink_open_config *open_cfg;
76 struct work_struct work;
77};
78
79static bool glink_enabled;
80static struct glink_apps_rpm_data *glink_data;
81
82#define DEFAULT_BUFFER_SIZE 256
83#define DEBUG_PRINT_BUFFER_SIZE 512
84#define MAX_SLEEP_BUFFER 128
85#define GFP_FLAG(noirq) (noirq ? GFP_ATOMIC : GFP_NOIO)
86#define INV_RSC "resource does not exist"
87#define ERR "err\0"
88#define MAX_ERR_BUFFER_SIZE 128
89#define MAX_WAIT_ON_ACK 24
90#define INIT_ERROR 1
91#define V1_PROTOCOL_VERSION 0x31726576 /* rev1 */
92#define V0_PROTOCOL_VERSION 0 /* rev0 */
93#define RPM_MSG_TYPE_OFFSET 16
94#define RPM_MSG_TYPE_SIZE 8
95#define RPM_SET_TYPE_OFFSET 28
96#define RPM_SET_TYPE_SIZE 4
97#define RPM_REQ_LEN_OFFSET 0
98#define RPM_REQ_LEN_SIZE 16
99#define RPM_MSG_VERSION_OFFSET 24
100#define RPM_MSG_VERSION_SIZE 8
101#define RPM_MSG_VERSION 1
102#define RPM_MSG_SET_OFFSET 28
103#define RPM_MSG_SET_SIZE 4
104#define RPM_RSC_ID_OFFSET 16
105#define RPM_RSC_ID_SIZE 12
106#define RPM_DATA_LEN_OFFSET 0
107#define RPM_DATA_LEN_SIZE 16
108#define RPM_HDR_SIZE ((rpm_msg_fmt_ver == RPM_MSG_V0_FMT) ?\
109 sizeof(struct rpm_v0_hdr) : sizeof(struct rpm_v1_hdr))
110#define CLEAR_FIELD(offset, size) (~GENMASK(offset + size - 1, offset))
111
112static ATOMIC_NOTIFIER_HEAD(msm_rpm_sleep_notifier);
113static bool standalone;
114static int probe_status = -EPROBE_DEFER;
115static int msm_rpm_read_smd_data(char *buf);
116static void msm_rpm_process_ack(uint32_t msg_id, int errno);
117
118int msm_rpm_register_notifier(struct notifier_block *nb)
119{
120 return atomic_notifier_chain_register(&msm_rpm_sleep_notifier, nb);
121}
122
123int msm_rpm_unregister_notifier(struct notifier_block *nb)
124{
125 return atomic_notifier_chain_unregister(&msm_rpm_sleep_notifier, nb);
126}
127
Raju P.L.S.S.S.N43c1be72017-10-31 16:50:30 +0530128enum {
129 MSM_RPM_MSG_REQUEST_TYPE = 0,
130 MSM_RPM_MSG_TYPE_NR,
131};
132
133static const uint32_t msm_rpm_request_service_v1[MSM_RPM_MSG_TYPE_NR] = {
134 0x716572, /* 'req\0' */
135};
136
137enum {
138 RPM_V1_REQUEST_SERVICE,
139 RPM_V1_SYSTEMDB_SERVICE,
140 RPM_V1_COMMAND_SERVICE,
141 RPM_V1_ACK_SERVICE,
142 RPM_V1_NACK_SERVICE,
143} msm_rpm_request_service_v2;
144
145struct rpm_v0_hdr {
146 uint32_t service_type;
147 uint32_t request_len;
148};
149
150struct rpm_v1_hdr {
151 uint32_t request_hdr;
152};
153
154struct rpm_message_header_v0 {
155 struct rpm_v0_hdr hdr;
156 uint32_t msg_id;
157 enum msm_rpm_set set;
158 uint32_t resource_type;
159 uint32_t resource_id;
160 uint32_t data_len;
161};
162
163struct rpm_message_header_v1 {
164 struct rpm_v1_hdr hdr;
165 uint32_t msg_id;
166 uint32_t resource_type;
167 uint32_t request_details;
168};
169
170struct msm_rpm_ack_msg_v0 {
171 uint32_t req;
172 uint32_t req_len;
173 uint32_t rsc_id;
174 uint32_t msg_len;
175 uint32_t id_ack;
176};
177
178struct msm_rpm_ack_msg_v1 {
179 uint32_t request_hdr;
180 uint32_t id_ack;
181};
182
183struct kvp {
184 unsigned int k;
185 unsigned int s;
186};
187
188struct msm_rpm_kvp_data {
189 uint32_t key;
190 uint32_t nbytes; /* number of bytes */
191 uint8_t *value;
192 bool valid;
193};
194
195struct slp_buf {
196 struct rb_node node;
197 char ubuf[MAX_SLEEP_BUFFER];
198 char *buf;
199 bool valid;
200};
201
202enum rpm_msg_fmts {
203 RPM_MSG_V0_FMT,
204 RPM_MSG_V1_FMT
205};
206
207static uint32_t rpm_msg_fmt_ver;
208module_param_named(
209 rpm_msg_fmt_ver, rpm_msg_fmt_ver, uint, 0444
210);
211
212static struct rb_root tr_root = RB_ROOT;
213static int (*msm_rpm_send_buffer)(char *buf, uint32_t size, bool noirq);
214static int msm_rpm_send_smd_buffer(char *buf, uint32_t size, bool noirq);
215static int msm_rpm_glink_send_buffer(char *buf, uint32_t size, bool noirq);
216static uint32_t msm_rpm_get_next_msg_id(void);
217
218static inline uint32_t get_offset_value(uint32_t val, uint32_t offset,
219 uint32_t size)
220{
221 return (((val) & GENMASK(offset + size - 1, offset))
222 >> offset);
223}
224
225static inline void change_offset_value(uint32_t *val, uint32_t offset,
226 uint32_t size, int32_t val1)
227{
228 uint32_t member = *val;
229 uint32_t offset_val = get_offset_value(member, offset, size);
230 uint32_t mask = (1 << size) - 1;
231
232 offset_val += val1;
233 *val &= CLEAR_FIELD(offset, size);
234 *val |= ((offset_val & mask) << offset);
235}
236
237static inline void set_offset_value(uint32_t *val, uint32_t offset,
238 uint32_t size, uint32_t val1)
239{
240 uint32_t mask = (1 << size) - 1;
241
242 *val &= CLEAR_FIELD(offset, size);
243 *val |= ((val1 & mask) << offset);
244}
245static uint32_t get_msg_id(char *buf)
246{
247 if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
248 return ((struct rpm_message_header_v0 *)buf)->msg_id;
249
250 return ((struct rpm_message_header_v1 *)buf)->msg_id;
251
252}
253
254static uint32_t get_ack_msg_id(char *buf)
255{
256 if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
257 return ((struct msm_rpm_ack_msg_v0 *)buf)->id_ack;
258
259 return ((struct msm_rpm_ack_msg_v1 *)buf)->id_ack;
260
261}
262
263static uint32_t get_rsc_type(char *buf)
264{
265 if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
266 return ((struct rpm_message_header_v0 *)buf)->resource_type;
267
268 return ((struct rpm_message_header_v1 *)buf)->resource_type;
269
270}
271
272static uint32_t get_set_type(char *buf)
273{
274 if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
275 return ((struct rpm_message_header_v0 *)buf)->set;
276
277 return get_offset_value(((struct rpm_message_header_v1 *)buf)->
278 request_details, RPM_SET_TYPE_OFFSET,
279 RPM_SET_TYPE_SIZE);
280}
281
282static uint32_t get_data_len(char *buf)
283{
284 if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
285 return ((struct rpm_message_header_v0 *)buf)->data_len;
286
287 return get_offset_value(((struct rpm_message_header_v1 *)buf)->
288 request_details, RPM_DATA_LEN_OFFSET,
289 RPM_DATA_LEN_SIZE);
290}
291
292static uint32_t get_rsc_id(char *buf)
293{
294 if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
295 return ((struct rpm_message_header_v0 *)buf)->resource_id;
296
297 return get_offset_value(((struct rpm_message_header_v1 *)buf)->
298 request_details, RPM_RSC_ID_OFFSET,
299 RPM_RSC_ID_SIZE);
300}
301
302static uint32_t get_ack_req_len(char *buf)
303{
304 if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
305 return ((struct msm_rpm_ack_msg_v0 *)buf)->req_len;
306
307 return get_offset_value(((struct msm_rpm_ack_msg_v1 *)buf)->
308 request_hdr, RPM_REQ_LEN_OFFSET,
309 RPM_REQ_LEN_SIZE);
310}
311
312static uint32_t get_ack_msg_type(char *buf)
313{
314 if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
315 return ((struct msm_rpm_ack_msg_v0 *)buf)->req;
316
317 return get_offset_value(((struct msm_rpm_ack_msg_v1 *)buf)->
318 request_hdr, RPM_MSG_TYPE_OFFSET,
319 RPM_MSG_TYPE_SIZE);
320}
321
322static uint32_t get_req_len(char *buf)
323{
324 if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
325 return ((struct rpm_message_header_v0 *)buf)->hdr.request_len;
326
327 return get_offset_value(((struct rpm_message_header_v1 *)buf)->
328 hdr.request_hdr, RPM_REQ_LEN_OFFSET,
329 RPM_REQ_LEN_SIZE);
330}
331
332static void set_msg_ver(char *buf, uint32_t val)
333{
334 if (rpm_msg_fmt_ver) {
335 set_offset_value(&((struct rpm_message_header_v1 *)buf)->
336 hdr.request_hdr, RPM_MSG_VERSION_OFFSET,
337 RPM_MSG_VERSION_SIZE, val);
338 } else {
339 set_offset_value(&((struct rpm_message_header_v1 *)buf)->
340 hdr.request_hdr, RPM_MSG_VERSION_OFFSET,
341 RPM_MSG_VERSION_SIZE, 0);
342 }
343}
344
345static void set_req_len(char *buf, uint32_t val)
346{
347 if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT) {
348 ((struct rpm_message_header_v0 *)buf)->hdr.request_len = val;
349 } else {
350 set_offset_value(&((struct rpm_message_header_v1 *)buf)->
351 hdr.request_hdr, RPM_REQ_LEN_OFFSET,
352 RPM_REQ_LEN_SIZE, val);
353 }
354}
355
356static void change_req_len(char *buf, int32_t val)
357{
358 if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT) {
359 ((struct rpm_message_header_v0 *)buf)->hdr.request_len += val;
360 } else {
361 change_offset_value(&((struct rpm_message_header_v1 *)buf)->
362 hdr.request_hdr, RPM_REQ_LEN_OFFSET,
363 RPM_REQ_LEN_SIZE, val);
364 }
365}
366
367static void set_msg_type(char *buf, uint32_t val)
368{
369 if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT) {
370 ((struct rpm_message_header_v0 *)buf)->hdr.service_type =
371 msm_rpm_request_service_v1[val];
372 } else {
373 set_offset_value(&((struct rpm_message_header_v1 *)buf)->
374 hdr.request_hdr, RPM_MSG_TYPE_OFFSET,
375 RPM_MSG_TYPE_SIZE, RPM_V1_REQUEST_SERVICE);
376 }
377}
378
379static void set_rsc_id(char *buf, uint32_t val)
380{
381 if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
382 ((struct rpm_message_header_v0 *)buf)->resource_id = val;
383 else
384 set_offset_value(&((struct rpm_message_header_v1 *)buf)->
385 request_details, RPM_RSC_ID_OFFSET,
386 RPM_RSC_ID_SIZE, val);
387}
388
389static void set_data_len(char *buf, uint32_t val)
390{
391 if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
392 ((struct rpm_message_header_v0 *)buf)->data_len = val;
393 else
394 set_offset_value(&((struct rpm_message_header_v1 *)buf)->
395 request_details, RPM_DATA_LEN_OFFSET,
396 RPM_DATA_LEN_SIZE, val);
397}
398static void change_data_len(char *buf, int32_t val)
399{
400 if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
401 ((struct rpm_message_header_v0 *)buf)->data_len += val;
402 else
403 change_offset_value(&((struct rpm_message_header_v1 *)buf)->
404 request_details, RPM_DATA_LEN_OFFSET,
405 RPM_DATA_LEN_SIZE, val);
406}
407
408static void set_set_type(char *buf, uint32_t val)
409{
410 if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
411 ((struct rpm_message_header_v0 *)buf)->set = val;
412 else
413 set_offset_value(&((struct rpm_message_header_v1 *)buf)->
414 request_details, RPM_SET_TYPE_OFFSET,
415 RPM_SET_TYPE_SIZE, val);
416}
417static void set_msg_id(char *buf, uint32_t val)
418{
419 if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
420 ((struct rpm_message_header_v0 *)buf)->msg_id = val;
421 else
422 ((struct rpm_message_header_v1 *)buf)->msg_id = val;
423
424}
425
426static void set_rsc_type(char *buf, uint32_t val)
427{
428 if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
429 ((struct rpm_message_header_v0 *)buf)->resource_type = val;
430 else
431 ((struct rpm_message_header_v1 *)buf)->resource_type = val;
432}
433
434static inline int get_buf_len(char *buf)
435{
436 return get_req_len(buf) + RPM_HDR_SIZE;
437}
438
439static inline struct kvp *get_first_kvp(char *buf)
440{
441 if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
442 return (struct kvp *)(buf +
443 sizeof(struct rpm_message_header_v0));
444 else
445 return (struct kvp *)(buf +
446 sizeof(struct rpm_message_header_v1));
447}
448
449static inline struct kvp *get_next_kvp(struct kvp *k)
450{
451 return (struct kvp *)((void *)k + sizeof(*k) + k->s);
452}
453
454static inline void *get_data(struct kvp *k)
455{
456 return (void *)k + sizeof(*k);
457}
458
459
460static void delete_kvp(char *buf, struct kvp *d)
461{
462 struct kvp *n;
463 int dec;
464 uint32_t size;
465
466 n = get_next_kvp(d);
467 dec = (void *)n - (void *)d;
468 size = get_data_len(buf) -
469 ((void *)n - (void *)get_first_kvp(buf));
470
471 memcpy((void *)d, (void *)n, size);
472
473 change_data_len(buf, -dec);
474 change_req_len(buf, -dec);
475}
476
477static inline void update_kvp_data(struct kvp *dest, struct kvp *src)
478{
479 memcpy(get_data(dest), get_data(src), src->s);
480}
481
482static void add_kvp(char *buf, struct kvp *n)
483{
484 int32_t inc = sizeof(*n) + n->s;
485
486 if (get_req_len(buf) + inc > MAX_SLEEP_BUFFER) {
487 WARN_ON(get_req_len(buf) + inc > MAX_SLEEP_BUFFER);
488 return;
489 }
490
491 memcpy(buf + get_buf_len(buf), n, inc);
492
493 change_data_len(buf, inc);
494 change_req_len(buf, inc);
495}
496
497static struct slp_buf *tr_search(struct rb_root *root, char *slp)
498{
499 unsigned int type = get_rsc_type(slp);
500 unsigned int id = get_rsc_id(slp);
501 struct rb_node *node = root->rb_node;
502
503 while (node) {
504 struct slp_buf *cur = rb_entry(node, struct slp_buf, node);
505 unsigned int ctype = get_rsc_type(cur->buf);
506 unsigned int cid = get_rsc_id(cur->buf);
507
508 if (type < ctype)
509 node = node->rb_left;
510 else if (type > ctype)
511 node = node->rb_right;
512 else if (id < cid)
513 node = node->rb_left;
514 else if (id > cid)
515 node = node->rb_right;
516 else
517 return cur;
518 }
519 return NULL;
520}
521
522static int tr_insert(struct rb_root *root, struct slp_buf *slp)
523{
524 unsigned int type = get_rsc_type(slp->buf);
525 unsigned int id = get_rsc_id(slp->buf);
526 struct rb_node **node = &(root->rb_node), *parent = NULL;
527
528 while (*node) {
529 struct slp_buf *curr = rb_entry(*node, struct slp_buf, node);
530 unsigned int ctype = get_rsc_type(curr->buf);
531 unsigned int cid = get_rsc_id(curr->buf);
532
533 parent = *node;
534
535 if (type < ctype)
536 node = &((*node)->rb_left);
537 else if (type > ctype)
538 node = &((*node)->rb_right);
539 else if (id < cid)
540 node = &((*node)->rb_left);
541 else if (id > cid)
542 node = &((*node)->rb_right);
543 else
544 return -EINVAL;
545 }
546
547 rb_link_node(&slp->node, parent, node);
548 rb_insert_color(&slp->node, root);
549 slp->valid = true;
550 return 0;
551}
552
553#define for_each_kvp(buf, k) \
554 for (k = (struct kvp *)get_first_kvp(buf); \
555 ((void *)k - (void *)get_first_kvp(buf)) < \
556 get_data_len(buf);\
557 k = get_next_kvp(k))
558
559
560static void tr_update(struct slp_buf *s, char *buf)
561{
562 struct kvp *e, *n;
563
564 for_each_kvp(buf, n) {
565 bool found = false;
566
567 for_each_kvp(s->buf, e) {
568 if (n->k == e->k) {
569 found = true;
570 if (n->s == e->s) {
571 void *e_data = get_data(e);
572 void *n_data = get_data(n);
573
574 if (memcmp(e_data, n_data, n->s)) {
575 update_kvp_data(e, n);
576 s->valid = true;
577 }
578 } else {
579 delete_kvp(s->buf, e);
580 add_kvp(s->buf, n);
581 s->valid = true;
582 }
583 break;
584 }
585
586 }
587 if (!found) {
588 add_kvp(s->buf, n);
589 s->valid = true;
590 }
591 }
592}
593static atomic_t msm_rpm_msg_id = ATOMIC_INIT(0);
594
595struct msm_rpm_request {
596 uint8_t *client_buf;
597 struct msm_rpm_kvp_data *kvp;
598 uint32_t num_elements;
599 uint32_t write_idx;
600 uint8_t *buf;
601 uint32_t numbytes;
602};
603
604/*
605 * Data related to message acknowledgment
606 */
607
608LIST_HEAD(msm_rpm_wait_list);
609
610struct msm_rpm_wait_data {
611 struct list_head list;
612 uint32_t msg_id;
613 bool ack_recd;
614 int errno;
615 struct completion ack;
616 bool delete_on_ack;
617};
618DEFINE_SPINLOCK(msm_rpm_list_lock);
619
620
621
622LIST_HEAD(msm_rpm_ack_list);
623
624static struct tasklet_struct data_tasklet;
625
626static inline uint32_t msm_rpm_get_msg_id_from_ack(uint8_t *buf)
627{
628 return get_ack_msg_id(buf);
629}
630
631static inline int msm_rpm_get_error_from_ack(uint8_t *buf)
632{
633 uint8_t *tmp;
634 uint32_t req_len = get_ack_req_len(buf);
635 uint32_t msg_type = get_ack_msg_type(buf);
636 int rc = -ENODEV;
637 uint32_t err;
638 uint32_t ack_msg_size = rpm_msg_fmt_ver ?
639 sizeof(struct msm_rpm_ack_msg_v1) :
640 sizeof(struct msm_rpm_ack_msg_v0);
641
642 if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT &&
643 msg_type == RPM_V1_ACK_SERVICE) {
644 return 0;
645 } else if (rpm_msg_fmt_ver && msg_type == RPM_V1_NACK_SERVICE) {
646 err = *(uint32_t *)(buf + sizeof(struct msm_rpm_ack_msg_v1));
647 return err;
648 }
649
650 req_len -= ack_msg_size;
651 req_len += 2 * sizeof(uint32_t);
652 if (!req_len)
653 return 0;
654
655 pr_err("%s:rpm returned error or nack req_len: %d id_ack: %d\n",
656 __func__, req_len, get_ack_msg_id(buf));
657
658 tmp = buf + ack_msg_size;
659
660 if (memcmp(tmp, ERR, sizeof(uint32_t))) {
661 pr_err("%s rpm returned error\n", __func__);
662 WARN_ON(1);
663 }
664
665 tmp += 2 * sizeof(uint32_t);
666
667 if (!(memcmp(tmp, INV_RSC, min_t(uint32_t, req_len,
668 sizeof(INV_RSC))-1))) {
669 pr_err("%s(): RPM NACK Unsupported resource\n", __func__);
670 rc = -EINVAL;
671 } else {
672 pr_err("%s(): RPM NACK Invalid header\n", __func__);
673 }
674
675 return rc;
676}
677
678int msm_rpm_smd_buffer_request(struct msm_rpm_request *cdata,
679 uint32_t size, gfp_t flag)
680{
681 struct slp_buf *slp;
682 static DEFINE_SPINLOCK(slp_buffer_lock);
683 unsigned long flags;
684 char *buf;
685
686 buf = cdata->buf;
687
688 if (size > MAX_SLEEP_BUFFER)
689 return -ENOMEM;
690
691 spin_lock_irqsave(&slp_buffer_lock, flags);
692 slp = tr_search(&tr_root, buf);
693
694 if (!slp) {
695 slp = kzalloc(sizeof(struct slp_buf), GFP_ATOMIC);
696 if (!slp) {
697 spin_unlock_irqrestore(&slp_buffer_lock, flags);
698 return -ENOMEM;
699 }
700 slp->buf = PTR_ALIGN(&slp->ubuf[0], sizeof(u32));
701 memcpy(slp->buf, buf, size);
702 if (tr_insert(&tr_root, slp))
703 pr_err("Error updating sleep request\n");
704 } else {
705 /* handle unsent requests */
706 tr_update(slp, buf);
707 }
708 trace_rpm_smd_sleep_set(get_msg_id(cdata->client_buf),
709 get_rsc_type(cdata->client_buf),
710 get_req_len(cdata->client_buf));
711
712 spin_unlock_irqrestore(&slp_buffer_lock, flags);
713
714 return 0;
715}
716
717static struct msm_rpm_driver_data msm_rpm_data = {
718 .smd_open = COMPLETION_INITIALIZER(msm_rpm_data.smd_open),
719};
720
721static int msm_rpm_glink_rx_poll(void *glink_handle)
722{
723 int ret;
724
725 ret = glink_rpm_rx_poll(glink_handle);
726 if (ret >= 0)
727 /*
728 * Sleep for 50us at a time before checking
729 * for packet availability. The 50us is based
730 * on the the time rpm could take to process
731 * and send an ack for the sleep set request.
732 */
733 udelay(50);
734 else
735 pr_err("Not receieve an ACK from RPM. ret = %d\n", ret);
736
737 return ret;
738}
739
740/*
741 * Returns
742 * = 0 on successful reads
743 * > 0 on successful reads with no further data
744 * standard Linux error codes on failure.
745 */
746static int msm_rpm_read_sleep_ack(void)
747{
748 int ret;
749 char buf[MAX_ERR_BUFFER_SIZE] = {0};
750
751 if (glink_enabled)
752 ret = msm_rpm_glink_rx_poll(glink_data->glink_handle);
753 else {
754 ret = msm_rpm_read_smd_data(buf);
755 if (!ret)
756 ret = smd_is_pkt_avail(msm_rpm_data.ch_info);
757 }
758 return ret;
759}
760
761static int msm_rpm_flush_requests(bool print)
762{
763 struct rb_node *t;
764 int ret;
765 int count = 0;
766
767 for (t = rb_first(&tr_root); t; t = rb_next(t)) {
768
769 struct slp_buf *s = rb_entry(t, struct slp_buf, node);
770 unsigned int type = get_rsc_type(s->buf);
771 unsigned int id = get_rsc_id(s->buf);
772
773 if (!s->valid)
774 continue;
775
776 set_msg_id(s->buf, msm_rpm_get_next_msg_id());
777
778 if (!glink_enabled)
779 ret = msm_rpm_send_smd_buffer(s->buf,
780 get_buf_len(s->buf), true);
781 else
782 ret = msm_rpm_glink_send_buffer(s->buf,
783 get_buf_len(s->buf), true);
784
785 WARN_ON(ret != get_buf_len(s->buf));
786 trace_rpm_smd_send_sleep_set(get_msg_id(s->buf), type, id);
787
788 s->valid = false;
789 count++;
790
791 /*
792 * RPM acks need to be handled here if we have sent 24
793 * messages such that we do not overrun SMD buffer. Since
794 * we expect only sleep sets at this point (RPM PC would be
795 * disallowed if we had pending active requests), we need not
796 * process these sleep set acks.
797 */
798 if (count >= MAX_WAIT_ON_ACK) {
799 int ret = msm_rpm_read_sleep_ack();
800
801 if (ret >= 0)
802 count--;
803 else
804 return ret;
805 }
806 }
807 return 0;
808}
809
810static void msm_rpm_notify_sleep_chain(char *buf,
811 struct msm_rpm_kvp_data *kvp)
812{
813 struct msm_rpm_notifier_data notif;
814
815 notif.rsc_type = get_rsc_type(buf);
816 notif.rsc_id = get_req_len(buf);
817 notif.key = kvp->key;
818 notif.size = kvp->nbytes;
819 notif.value = kvp->value;
820 atomic_notifier_call_chain(&msm_rpm_sleep_notifier, 0, &notif);
821}
822
823static int msm_rpm_add_kvp_data_common(struct msm_rpm_request *handle,
824 uint32_t key, const uint8_t *data, int size, bool noirq)
825{
826 uint32_t i;
827 uint32_t data_size, msg_size;
828
829 if (probe_status)
830 return probe_status;
831
832 if (!handle || !data) {
833 pr_err("%s(): Invalid handle/data\n", __func__);
834 return -EINVAL;
835 }
836
837 if (size < 0)
838 return -EINVAL;
839
840 data_size = ALIGN(size, SZ_4);
841 msg_size = data_size + 8;
842
843 for (i = 0; i < handle->write_idx; i++) {
844 if (handle->kvp[i].key != key)
845 continue;
846 if (handle->kvp[i].nbytes != data_size) {
847 kfree(handle->kvp[i].value);
848 handle->kvp[i].value = NULL;
849 } else {
850 if (!memcmp(handle->kvp[i].value, data, data_size))
851 return 0;
852 }
853 break;
854 }
855
856 if (i >= handle->num_elements) {
857 pr_err("Number of resources exceeds max allocated\n");
858 return -ENOMEM;
859 }
860
861 if (i == handle->write_idx)
862 handle->write_idx++;
863
864 if (!handle->kvp[i].value) {
865 handle->kvp[i].value = kzalloc(data_size, GFP_FLAG(noirq));
866
867 if (!handle->kvp[i].value)
868 return -ENOMEM;
869 } else {
870 /* We enter the else case, if a key already exists but the
871 * data doesn't match. In which case, we should zero the data
872 * out.
873 */
874 memset(handle->kvp[i].value, 0, data_size);
875 }
876
877 if (!handle->kvp[i].valid)
878 change_data_len(handle->client_buf, msg_size);
879 else
880 change_data_len(handle->client_buf,
881 (data_size - handle->kvp[i].nbytes));
882
883 handle->kvp[i].nbytes = data_size;
884 handle->kvp[i].key = key;
885 memcpy(handle->kvp[i].value, data, size);
886 handle->kvp[i].valid = true;
887
888 return 0;
889
890}
891
892static struct msm_rpm_request *msm_rpm_create_request_common(
893 enum msm_rpm_set set, uint32_t rsc_type, uint32_t rsc_id,
894 int num_elements, bool noirq)
895{
896 struct msm_rpm_request *cdata;
897 uint32_t buf_size;
898
899 if (probe_status)
900 return ERR_PTR(probe_status);
901
902 cdata = kzalloc(sizeof(struct msm_rpm_request),
903 GFP_FLAG(noirq));
904
905 if (!cdata) {
906 pr_err("Cannot allocate memory for client data\n");
907 goto cdata_alloc_fail;
908 }
909
910 if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
911 buf_size = sizeof(struct rpm_message_header_v0);
912 else
913 buf_size = sizeof(struct rpm_message_header_v1);
914
915 cdata->client_buf = kzalloc(buf_size, GFP_FLAG(noirq));
916
917 if (!cdata->client_buf)
918 goto client_buf_alloc_fail;
919
920 set_set_type(cdata->client_buf, set);
921 set_rsc_type(cdata->client_buf, rsc_type);
922 set_rsc_id(cdata->client_buf, rsc_id);
923
924 cdata->num_elements = num_elements;
925 cdata->write_idx = 0;
926
927 cdata->kvp = kcalloc(num_elements, sizeof(struct msm_rpm_kvp_data),
928 GFP_FLAG(noirq));
929
930 if (!cdata->kvp) {
931 pr_warn("%s(): Cannot allocate memory for key value data\n",
932 __func__);
933 goto kvp_alloc_fail;
934 }
935
936 cdata->buf = kzalloc(DEFAULT_BUFFER_SIZE, GFP_FLAG(noirq));
937
938 if (!cdata->buf)
939 goto buf_alloc_fail;
940
941 cdata->numbytes = DEFAULT_BUFFER_SIZE;
942 return cdata;
943
944buf_alloc_fail:
945 kfree(cdata->kvp);
946kvp_alloc_fail:
947 kfree(cdata->client_buf);
948client_buf_alloc_fail:
949 kfree(cdata);
950cdata_alloc_fail:
951 return NULL;
952
953}
954
955void msm_rpm_free_request(struct msm_rpm_request *handle)
956{
957 int i;
958
959 if (!handle)
960 return;
961 for (i = 0; i < handle->num_elements; i++)
962 kfree(handle->kvp[i].value);
963 kfree(handle->kvp);
964 kfree(handle->client_buf);
965 kfree(handle->buf);
966 kfree(handle);
967}
968EXPORT_SYMBOL(msm_rpm_free_request);
969
970struct msm_rpm_request *msm_rpm_create_request(
971 enum msm_rpm_set set, uint32_t rsc_type,
972 uint32_t rsc_id, int num_elements)
973{
974 return msm_rpm_create_request_common(set, rsc_type, rsc_id,
975 num_elements, false);
976}
977EXPORT_SYMBOL(msm_rpm_create_request);
978
979struct msm_rpm_request *msm_rpm_create_request_noirq(
980 enum msm_rpm_set set, uint32_t rsc_type,
981 uint32_t rsc_id, int num_elements)
982{
983 return msm_rpm_create_request_common(set, rsc_type, rsc_id,
984 num_elements, true);
985}
986EXPORT_SYMBOL(msm_rpm_create_request_noirq);
987
988int msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
989 uint32_t key, const uint8_t *data, int size)
990{
991 return msm_rpm_add_kvp_data_common(handle, key, data, size, false);
992
993}
994EXPORT_SYMBOL(msm_rpm_add_kvp_data);
995
996int msm_rpm_add_kvp_data_noirq(struct msm_rpm_request *handle,
997 uint32_t key, const uint8_t *data, int size)
998{
999 return msm_rpm_add_kvp_data_common(handle, key, data, size, true);
1000}
1001EXPORT_SYMBOL(msm_rpm_add_kvp_data_noirq);
1002
1003/* Runs in interrupt context */
1004static void msm_rpm_notify(void *data, unsigned int event)
1005{
1006 struct msm_rpm_driver_data *pdata = (struct msm_rpm_driver_data *)data;
1007
1008 WARN_ON(!pdata);
1009
1010 if (!(pdata->ch_info))
1011 return;
1012
1013 switch (event) {
1014 case SMD_EVENT_DATA:
1015 tasklet_schedule(&data_tasklet);
1016 trace_rpm_smd_interrupt_notify("interrupt notification");
1017 break;
1018 case SMD_EVENT_OPEN:
1019 complete(&pdata->smd_open);
1020 break;
1021 case SMD_EVENT_CLOSE:
1022 case SMD_EVENT_STATUS:
1023 case SMD_EVENT_REOPEN_READY:
1024 break;
1025 default:
1026 pr_info("Unknown SMD event\n");
1027
1028 }
1029}
1030
1031bool msm_rpm_waiting_for_ack(void)
1032{
1033 bool ret;
1034 unsigned long flags;
1035
1036 spin_lock_irqsave(&msm_rpm_list_lock, flags);
1037 ret = list_empty(&msm_rpm_wait_list);
1038 spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
1039
1040 return !ret;
1041}
1042
1043static struct msm_rpm_wait_data *msm_rpm_get_entry_from_msg_id(uint32_t msg_id)
1044{
1045 struct list_head *ptr;
1046 struct msm_rpm_wait_data *elem = NULL;
1047 unsigned long flags;
1048
1049 spin_lock_irqsave(&msm_rpm_list_lock, flags);
1050
1051 list_for_each(ptr, &msm_rpm_wait_list) {
1052 elem = list_entry(ptr, struct msm_rpm_wait_data, list);
1053 if (elem && (elem->msg_id == msg_id))
1054 break;
1055 elem = NULL;
1056 }
1057 spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
1058 return elem;
1059}
1060
1061static uint32_t msm_rpm_get_next_msg_id(void)
1062{
1063 uint32_t id;
1064
1065 /*
1066 * A message id of 0 is used by the driver to indicate a error
1067 * condition. The RPM driver uses a id of 1 to indicate unsent data
1068 * when the data sent over hasn't been modified. This isn't a error
1069 * scenario and wait for ack returns a success when the message id is 1.
1070 */
1071
1072 do {
1073 id = atomic_inc_return(&msm_rpm_msg_id);
1074 } while ((id == 0) || (id == 1) || msm_rpm_get_entry_from_msg_id(id));
1075
1076 return id;
1077}
1078
1079static int msm_rpm_add_wait_list(uint32_t msg_id, bool delete_on_ack)
1080{
1081 unsigned long flags;
1082 struct msm_rpm_wait_data *data =
1083 kzalloc(sizeof(struct msm_rpm_wait_data), GFP_ATOMIC);
1084
1085 if (!data)
1086 return -ENOMEM;
1087
1088 init_completion(&data->ack);
1089 data->ack_recd = false;
1090 data->msg_id = msg_id;
1091 data->errno = INIT_ERROR;
1092 data->delete_on_ack = delete_on_ack;
1093 spin_lock_irqsave(&msm_rpm_list_lock, flags);
1094 if (delete_on_ack)
1095 list_add_tail(&data->list, &msm_rpm_wait_list);
1096 else
1097 list_add(&data->list, &msm_rpm_wait_list);
1098 spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
1099
1100 return 0;
1101}
1102
1103static void msm_rpm_free_list_entry(struct msm_rpm_wait_data *elem)
1104{
1105 unsigned long flags;
1106
1107 spin_lock_irqsave(&msm_rpm_list_lock, flags);
1108 list_del(&elem->list);
1109 spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
1110 kfree(elem);
1111}
1112
1113static void msm_rpm_process_ack(uint32_t msg_id, int errno)
1114{
1115 struct list_head *ptr, *next;
1116 struct msm_rpm_wait_data *elem = NULL;
1117 unsigned long flags;
1118
1119 spin_lock_irqsave(&msm_rpm_list_lock, flags);
1120
1121 list_for_each_safe(ptr, next, &msm_rpm_wait_list) {
1122 elem = list_entry(ptr, struct msm_rpm_wait_data, list);
1123 if (elem->msg_id == msg_id) {
1124 elem->errno = errno;
1125 elem->ack_recd = true;
1126 complete(&elem->ack);
1127 if (elem->delete_on_ack) {
1128 list_del(&elem->list);
1129 kfree(elem);
1130 }
1131 break;
1132 }
1133 }
1134 /* Special case where the sleep driver doesn't
1135 * wait for ACKs. This would decrease the latency involved with
1136 * entering RPM assisted power collapse.
1137 */
1138 if (!elem)
1139 trace_rpm_smd_ack_recvd(0, msg_id, 0xDEADBEEF);
1140
1141 spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
1142}
1143
1144struct msm_rpm_kvp_packet {
1145 uint32_t id;
1146 uint32_t len;
1147 uint32_t val;
1148};
1149
1150static int msm_rpm_read_smd_data(char *buf)
1151{
1152 int pkt_sz;
1153 int bytes_read = 0;
1154
1155 pkt_sz = smd_cur_packet_size(msm_rpm_data.ch_info);
1156
1157 if (!pkt_sz)
1158 return -EAGAIN;
1159
1160 if (pkt_sz > MAX_ERR_BUFFER_SIZE) {
1161 pr_err("rpm_smd pkt_sz is greater than max size\n");
1162 goto error;
1163 }
1164
1165 if (pkt_sz != smd_read_avail(msm_rpm_data.ch_info))
1166 return -EAGAIN;
1167
1168 do {
1169 int len;
1170
1171 len = smd_read(msm_rpm_data.ch_info, buf + bytes_read, pkt_sz);
1172 pkt_sz -= len;
1173 bytes_read += len;
1174
1175 } while (pkt_sz > 0);
1176
1177 if (pkt_sz < 0) {
1178 pr_err("rpm_smd pkt_sz is less than zero\n");
1179 goto error;
1180 }
1181 return 0;
1182error:
1183 WARN_ON(1);
1184
1185 return 0;
1186}
1187
1188static void data_fn_tasklet(unsigned long data)
1189{
1190 uint32_t msg_id;
1191 int errno;
1192 char buf[MAX_ERR_BUFFER_SIZE] = {0};
1193
1194 spin_lock(&msm_rpm_data.smd_lock_read);
1195 while (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
1196 if (msm_rpm_read_smd_data(buf))
1197 break;
1198 msg_id = msm_rpm_get_msg_id_from_ack(buf);
1199 errno = msm_rpm_get_error_from_ack(buf);
1200 trace_rpm_smd_ack_recvd(0, msg_id, errno);
1201 msm_rpm_process_ack(msg_id, errno);
1202 }
1203 spin_unlock(&msm_rpm_data.smd_lock_read);
1204}
1205
1206static void msm_rpm_log_request(struct msm_rpm_request *cdata)
1207{
1208 char buf[DEBUG_PRINT_BUFFER_SIZE];
1209 size_t buflen = DEBUG_PRINT_BUFFER_SIZE;
1210 char name[5];
1211 u32 value;
1212 uint32_t i;
1213 int j, prev_valid;
1214 int valid_count = 0;
1215 int pos = 0;
1216 uint32_t res_type, rsc_id;
1217
1218 name[4] = 0;
1219
1220 for (i = 0; i < cdata->write_idx; i++)
1221 if (cdata->kvp[i].valid)
1222 valid_count++;
1223
1224 pos += scnprintf(buf + pos, buflen - pos, "%sRPM req: ", KERN_INFO);
1225 if (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_SHOW_MSG_ID)
1226 pos += scnprintf(buf + pos, buflen - pos, "msg_id=%u, ",
1227 get_msg_id(cdata->client_buf));
1228 pos += scnprintf(buf + pos, buflen - pos, "s=%s",
1229 (get_set_type(cdata->client_buf) ==
1230 MSM_RPM_CTX_ACTIVE_SET ? "act" : "slp"));
1231
1232 res_type = get_rsc_type(cdata->client_buf);
1233 rsc_id = get_rsc_id(cdata->client_buf);
1234 if ((msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_PRETTY)
1235 && (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_RAW)) {
1236 /* Both pretty and raw formatting */
1237 memcpy(name, &res_type, sizeof(uint32_t));
1238 pos += scnprintf(buf + pos, buflen - pos,
1239 ", rsc_type=0x%08X (%s), rsc_id=%u; ",
1240 res_type, name, rsc_id);
1241
1242 for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
1243 if (!cdata->kvp[i].valid)
1244 continue;
1245
1246 memcpy(name, &cdata->kvp[i].key, sizeof(uint32_t));
1247 pos += scnprintf(buf + pos, buflen - pos,
1248 "[key=0x%08X (%s), value=%s",
1249 cdata->kvp[i].key, name,
1250 (cdata->kvp[i].nbytes ? "0x" : "null"));
1251
1252 for (j = 0; j < cdata->kvp[i].nbytes; j++)
1253 pos += scnprintf(buf + pos, buflen - pos,
1254 "%02X ",
1255 cdata->kvp[i].value[j]);
1256
1257 if (cdata->kvp[i].nbytes)
1258 pos += scnprintf(buf + pos, buflen - pos, "(");
1259
1260 for (j = 0; j < cdata->kvp[i].nbytes; j += 4) {
1261 value = 0;
1262 memcpy(&value, &cdata->kvp[i].value[j],
1263 min_t(uint32_t, sizeof(uint32_t),
1264 cdata->kvp[i].nbytes - j));
1265 pos += scnprintf(buf + pos, buflen - pos, "%u",
1266 value);
1267 if (j + 4 < cdata->kvp[i].nbytes)
1268 pos += scnprintf(buf + pos,
1269 buflen - pos, " ");
1270 }
1271 if (cdata->kvp[i].nbytes)
1272 pos += scnprintf(buf + pos, buflen - pos, ")");
1273 pos += scnprintf(buf + pos, buflen - pos, "]");
1274 if (prev_valid + 1 < valid_count)
1275 pos += scnprintf(buf + pos, buflen - pos, ", ");
1276 prev_valid++;
1277 }
1278 } else if (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_PRETTY) {
1279 /* Pretty formatting only */
1280 memcpy(name, &res_type, sizeof(uint32_t));
1281 pos += scnprintf(buf + pos, buflen - pos, " %s %u; ", name,
1282 rsc_id);
1283
1284 for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
1285 if (!cdata->kvp[i].valid)
1286 continue;
1287
1288 memcpy(name, &cdata->kvp[i].key, sizeof(uint32_t));
1289 pos += scnprintf(buf + pos, buflen - pos, "%s=%s",
1290 name, (cdata->kvp[i].nbytes ? "" : "null"));
1291
1292 for (j = 0; j < cdata->kvp[i].nbytes; j += 4) {
1293 value = 0;
1294 memcpy(&value, &cdata->kvp[i].value[j],
1295 min_t(uint32_t, sizeof(uint32_t),
1296 cdata->kvp[i].nbytes - j));
1297 pos += scnprintf(buf + pos, buflen - pos, "%u",
1298 value);
1299
1300 if (j + 4 < cdata->kvp[i].nbytes)
1301 pos += scnprintf(buf + pos,
1302 buflen - pos, " ");
1303 }
1304 if (prev_valid + 1 < valid_count)
1305 pos += scnprintf(buf + pos, buflen - pos, ", ");
1306 prev_valid++;
1307 }
1308 } else {
1309 /* Raw formatting only */
1310 pos += scnprintf(buf + pos, buflen - pos,
1311 ", rsc_type=0x%08X, rsc_id=%u; ", res_type, rsc_id);
1312
1313 for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
1314 if (!cdata->kvp[i].valid)
1315 continue;
1316
1317 pos += scnprintf(buf + pos, buflen - pos,
1318 "[key=0x%08X, value=%s",
1319 cdata->kvp[i].key,
1320 (cdata->kvp[i].nbytes ? "0x" : "null"));
1321 for (j = 0; j < cdata->kvp[i].nbytes; j++) {
1322 pos += scnprintf(buf + pos, buflen - pos,
1323 "%02X",
1324 cdata->kvp[i].value[j]);
1325 if (j + 1 < cdata->kvp[i].nbytes)
1326 pos += scnprintf(buf + pos,
1327 buflen - pos, " ");
1328 }
1329 pos += scnprintf(buf + pos, buflen - pos, "]");
1330 if (prev_valid + 1 < valid_count)
1331 pos += scnprintf(buf + pos, buflen - pos, ", ");
1332 prev_valid++;
1333 }
1334 }
1335
1336 pos += scnprintf(buf + pos, buflen - pos, "\n");
1337 printk(buf);
1338}
1339
1340static int msm_rpm_send_smd_buffer(char *buf, uint32_t size, bool noirq)
1341{
1342 unsigned long flags;
1343 int ret;
1344
1345 spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
1346 ret = smd_write_avail(msm_rpm_data.ch_info);
1347
1348 while ((ret = smd_write_avail(msm_rpm_data.ch_info)) < size) {
1349 if (ret < 0)
1350 break;
1351 if (!noirq) {
1352 spin_unlock_irqrestore(
1353 &msm_rpm_data.smd_lock_write, flags);
1354 cpu_relax();
1355 spin_lock_irqsave(
1356 &msm_rpm_data.smd_lock_write, flags);
1357 } else
1358 udelay(5);
1359 }
1360
1361 if (ret < 0) {
1362 pr_err("SMD not initialized\n");
1363 spin_unlock_irqrestore(
1364 &msm_rpm_data.smd_lock_write, flags);
1365 return ret;
1366 }
1367
1368 ret = smd_write(msm_rpm_data.ch_info, buf, size);
1369 spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
1370 return ret;
1371}
1372
1373static int msm_rpm_glink_send_buffer(char *buf, uint32_t size, bool noirq)
1374{
1375 int ret;
1376 unsigned long flags;
1377 int timeout = 50;
1378
1379 spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
1380 do {
1381 ret = glink_tx(glink_data->glink_handle, buf, buf,
1382 size, GLINK_TX_SINGLE_THREADED);
1383 if (ret == -EBUSY || ret == -ENOSPC) {
1384 if (!noirq) {
1385 spin_unlock_irqrestore(
1386 &msm_rpm_data.smd_lock_write, flags);
1387 cpu_relax();
1388 spin_lock_irqsave(
1389 &msm_rpm_data.smd_lock_write, flags);
1390 } else {
1391 udelay(5);
1392 }
1393 timeout--;
1394 } else {
1395 ret = 0;
1396 }
1397 } while (ret && timeout);
1398 spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
1399
1400 if (!timeout)
1401 return 0;
1402 else
1403 return size;
1404}
1405
1406static int msm_rpm_send_data(struct msm_rpm_request *cdata,
1407 int msg_type, bool noirq, bool noack)
1408{
1409 uint8_t *tmpbuff;
1410 int ret;
1411 uint32_t i;
1412 uint32_t msg_size;
1413 int msg_hdr_sz, req_hdr_sz;
1414 uint32_t data_len = get_data_len(cdata->client_buf);
1415 uint32_t set = get_set_type(cdata->client_buf);
1416 uint32_t msg_id;
1417
1418 if (probe_status)
1419 return probe_status;
1420
1421 if (!data_len)
1422 return 1;
1423
1424 msg_hdr_sz = rpm_msg_fmt_ver ? sizeof(struct rpm_message_header_v1) :
1425 sizeof(struct rpm_message_header_v0);
1426
1427 req_hdr_sz = RPM_HDR_SIZE;
1428 set_msg_type(cdata->client_buf, msg_type);
1429
1430 set_req_len(cdata->client_buf, data_len + msg_hdr_sz - req_hdr_sz);
1431 msg_size = get_req_len(cdata->client_buf) + req_hdr_sz;
1432
1433 /* populate data_len */
1434 if (msg_size > cdata->numbytes) {
1435 kfree(cdata->buf);
1436 cdata->numbytes = msg_size;
1437 cdata->buf = kzalloc(msg_size, GFP_FLAG(noirq));
1438 }
1439
1440 if (!cdata->buf) {
1441 pr_err("Failed malloc\n");
1442 return 0;
1443 }
1444
1445 tmpbuff = cdata->buf;
1446
1447 tmpbuff += msg_hdr_sz;
1448 for (i = 0; (i < cdata->write_idx); i++) {
1449 /* Sanity check */
1450 WARN_ON((tmpbuff - cdata->buf) > cdata->numbytes);
1451
1452 if (!cdata->kvp[i].valid)
1453 continue;
1454
1455 memcpy(tmpbuff, &cdata->kvp[i].key, sizeof(uint32_t));
1456 tmpbuff += sizeof(uint32_t);
1457
1458 memcpy(tmpbuff, &cdata->kvp[i].nbytes, sizeof(uint32_t));
1459 tmpbuff += sizeof(uint32_t);
1460
1461 memcpy(tmpbuff, cdata->kvp[i].value, cdata->kvp[i].nbytes);
1462 tmpbuff += cdata->kvp[i].nbytes;
1463
1464 if (set == MSM_RPM_CTX_SLEEP_SET)
1465 msm_rpm_notify_sleep_chain(cdata->client_buf,
1466 &cdata->kvp[i]);
1467
1468 }
1469
1470 memcpy(cdata->buf, cdata->client_buf, msg_hdr_sz);
1471 if ((set == MSM_RPM_CTX_SLEEP_SET) &&
1472 !msm_rpm_smd_buffer_request(cdata, msg_size,
1473 GFP_FLAG(noirq)))
1474 return 1;
1475
1476 msg_id = msm_rpm_get_next_msg_id();
1477 /* Set the version bit for new protocol */
1478 set_msg_ver(cdata->buf, rpm_msg_fmt_ver);
1479 set_msg_id(cdata->buf, msg_id);
1480 set_msg_id(cdata->client_buf, msg_id);
1481
1482 if (msm_rpm_debug_mask
1483 & (MSM_RPM_LOG_REQUEST_PRETTY | MSM_RPM_LOG_REQUEST_RAW))
1484 msm_rpm_log_request(cdata);
1485
1486 if (standalone) {
1487 for (i = 0; (i < cdata->write_idx); i++)
1488 cdata->kvp[i].valid = false;
1489
1490 set_data_len(cdata->client_buf, 0);
1491 ret = msg_id;
1492 return ret;
1493 }
1494
1495 msm_rpm_add_wait_list(msg_id, noack);
1496
1497 ret = msm_rpm_send_buffer(&cdata->buf[0], msg_size, noirq);
1498
1499 if (ret == msg_size) {
1500 for (i = 0; (i < cdata->write_idx); i++)
1501 cdata->kvp[i].valid = false;
1502 set_data_len(cdata->client_buf, 0);
1503 ret = msg_id;
1504 trace_rpm_smd_send_active_set(msg_id,
1505 get_rsc_type(cdata->client_buf),
1506 get_rsc_id(cdata->client_buf));
1507 } else if (ret < msg_size) {
1508 struct msm_rpm_wait_data *rc;
1509
1510 ret = 0;
1511 pr_err("Failed to write data msg_size:%d ret:%d msg_id:%d\n",
1512 msg_size, ret, msg_id);
1513 rc = msm_rpm_get_entry_from_msg_id(msg_id);
1514 if (rc)
1515 msm_rpm_free_list_entry(rc);
1516 }
1517 return ret;
1518}
1519
1520static int _msm_rpm_send_request(struct msm_rpm_request *handle, bool noack)
1521{
1522 int ret;
1523 static DEFINE_MUTEX(send_mtx);
1524
1525 mutex_lock(&send_mtx);
1526 ret = msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, false, noack);
1527 mutex_unlock(&send_mtx);
1528
1529 return ret;
1530}
1531
1532int msm_rpm_send_request(struct msm_rpm_request *handle)
1533{
1534 return _msm_rpm_send_request(handle, false);
1535}
1536EXPORT_SYMBOL(msm_rpm_send_request);
1537
1538int msm_rpm_send_request_noirq(struct msm_rpm_request *handle)
1539{
1540 return msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, true, false);
1541}
1542EXPORT_SYMBOL(msm_rpm_send_request_noirq);
1543
1544void *msm_rpm_send_request_noack(struct msm_rpm_request *handle)
1545{
1546 int ret;
1547
1548 ret = _msm_rpm_send_request(handle, true);
1549
1550 return ret < 0 ? ERR_PTR(ret) : NULL;
1551}
1552EXPORT_SYMBOL(msm_rpm_send_request_noack);
1553
1554int msm_rpm_wait_for_ack(uint32_t msg_id)
1555{
1556 struct msm_rpm_wait_data *elem;
1557 int rc = 0;
1558
1559 if (!msg_id) {
1560 pr_err("Invalid msg id\n");
1561 return -ENOMEM;
1562 }
1563
1564 if (msg_id == 1)
1565 return rc;
1566
1567 if (standalone)
1568 return rc;
1569
1570 elem = msm_rpm_get_entry_from_msg_id(msg_id);
1571 if (!elem)
1572 return rc;
1573
1574 wait_for_completion(&elem->ack);
1575 trace_rpm_smd_ack_recvd(0, msg_id, 0xDEADFEED);
1576
1577 rc = elem->errno;
1578 msm_rpm_free_list_entry(elem);
1579
1580 return rc;
1581}
1582EXPORT_SYMBOL(msm_rpm_wait_for_ack);
1583
1584static void msm_rpm_smd_read_data_noirq(uint32_t msg_id)
1585{
1586 uint32_t id = 0;
1587
1588 while (id != msg_id) {
1589 if (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
1590 int errno;
1591 char buf[MAX_ERR_BUFFER_SIZE] = {};
1592
1593 msm_rpm_read_smd_data(buf);
1594 id = msm_rpm_get_msg_id_from_ack(buf);
1595 errno = msm_rpm_get_error_from_ack(buf);
1596 trace_rpm_smd_ack_recvd(1, msg_id, errno);
1597 msm_rpm_process_ack(id, errno);
1598 }
1599 }
1600}
1601
1602static void msm_rpm_glink_read_data_noirq(struct msm_rpm_wait_data *elem)
1603{
1604 int ret;
1605
1606 /* Use rx_poll method to read the message from RPM */
1607 while (elem->errno) {
1608 ret = glink_rpm_rx_poll(glink_data->glink_handle);
1609 if (ret >= 0) {
1610 /*
1611 * We might have receieve the notification.
1612 * Now we have to check whether the notification
1613 * received is what we are interested?
1614 * Wait for few usec to get the notification
1615 * before re-trying the poll again.
1616 */
1617 udelay(50);
1618 } else {
1619 pr_err("rx poll return error = %d\n", ret);
1620 }
1621 }
1622}
1623
1624int msm_rpm_wait_for_ack_noirq(uint32_t msg_id)
1625{
1626 struct msm_rpm_wait_data *elem;
1627 unsigned long flags;
1628 int rc = 0;
1629
1630 if (!msg_id) {
1631 pr_err("Invalid msg id\n");
1632 return -ENOMEM;
1633 }
1634
1635 if (msg_id == 1)
1636 return 0;
1637
1638 if (standalone)
1639 return 0;
1640
1641 spin_lock_irqsave(&msm_rpm_data.smd_lock_read, flags);
1642
1643 elem = msm_rpm_get_entry_from_msg_id(msg_id);
1644
1645 if (!elem)
1646 /* Should this be a bug
1647 * Is it ok for another thread to read the msg?
1648 */
1649 goto wait_ack_cleanup;
1650
1651 if (elem->errno != INIT_ERROR) {
1652 rc = elem->errno;
1653 msm_rpm_free_list_entry(elem);
1654 goto wait_ack_cleanup;
1655 }
1656
1657 if (!glink_enabled)
1658 msm_rpm_smd_read_data_noirq(msg_id);
1659 else
1660 msm_rpm_glink_read_data_noirq(elem);
1661
1662 rc = elem->errno;
1663
1664 msm_rpm_free_list_entry(elem);
1665wait_ack_cleanup:
1666 spin_unlock_irqrestore(&msm_rpm_data.smd_lock_read, flags);
1667
1668 if (!glink_enabled)
1669 if (smd_is_pkt_avail(msm_rpm_data.ch_info))
1670 tasklet_schedule(&data_tasklet);
1671 return rc;
1672}
1673EXPORT_SYMBOL(msm_rpm_wait_for_ack_noirq);
1674
1675void *msm_rpm_send_message_noack(enum msm_rpm_set set, uint32_t rsc_type,
1676 uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
1677{
1678 int i, rc;
1679 struct msm_rpm_request *req =
1680 msm_rpm_create_request_common(set, rsc_type, rsc_id, nelems,
1681 false);
1682
1683 if (IS_ERR(req))
1684 return req;
1685
1686 if (!req)
1687 return ERR_PTR(ENOMEM);
1688
1689 for (i = 0; i < nelems; i++) {
1690 rc = msm_rpm_add_kvp_data(req, kvp[i].key,
1691 kvp[i].data, kvp[i].length);
1692 if (rc)
1693 goto bail;
1694 }
1695
1696 rc = PTR_ERR(msm_rpm_send_request_noack(req));
1697bail:
1698 msm_rpm_free_request(req);
1699 return rc < 0 ? ERR_PTR(rc) : NULL;
1700}
1701EXPORT_SYMBOL(msm_rpm_send_message_noack);
1702
1703int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
1704 uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
1705{
1706 int i, rc;
1707 struct msm_rpm_request *req =
1708 msm_rpm_create_request(set, rsc_type, rsc_id, nelems);
1709
1710 if (IS_ERR(req))
1711 return PTR_ERR(req);
1712
1713 if (!req)
1714 return -ENOMEM;
1715
1716 for (i = 0; i < nelems; i++) {
1717 rc = msm_rpm_add_kvp_data(req, kvp[i].key,
1718 kvp[i].data, kvp[i].length);
1719 if (rc)
1720 goto bail;
1721 }
1722
1723 rc = msm_rpm_wait_for_ack(msm_rpm_send_request(req));
1724bail:
1725 msm_rpm_free_request(req);
1726 return rc;
1727}
1728EXPORT_SYMBOL(msm_rpm_send_message);
1729
1730int msm_rpm_send_message_noirq(enum msm_rpm_set set, uint32_t rsc_type,
1731 uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
1732{
1733 int i, rc;
1734 struct msm_rpm_request *req =
1735 msm_rpm_create_request_noirq(set, rsc_type, rsc_id, nelems);
1736
1737 if (IS_ERR(req))
1738 return PTR_ERR(req);
1739
1740 if (!req)
1741 return -ENOMEM;
1742
1743 for (i = 0; i < nelems; i++) {
1744 rc = msm_rpm_add_kvp_data_noirq(req, kvp[i].key,
1745 kvp[i].data, kvp[i].length);
1746 if (rc)
1747 goto bail;
1748 }
1749
1750 rc = msm_rpm_wait_for_ack_noirq(msm_rpm_send_request_noirq(req));
1751bail:
1752 msm_rpm_free_request(req);
1753 return rc;
1754}
1755EXPORT_SYMBOL(msm_rpm_send_message_noirq);
1756
1757/**
1758 * During power collapse, the rpm driver disables the SMD interrupts to make
1759 * sure that the interrupt doesn't wakes us from sleep.
1760 */
1761int msm_rpm_enter_sleep(bool print, const struct cpumask *cpumask)
1762{
1763 int ret = 0;
1764
1765 if (standalone)
1766 return 0;
1767
1768 if (!glink_enabled)
1769 ret = smd_mask_receive_interrupt(msm_rpm_data.ch_info,
1770 true, cpumask);
1771 else
1772 ret = glink_rpm_mask_rx_interrupt(glink_data->glink_handle,
1773 true, (void *)cpumask);
1774
1775 if (!ret) {
1776 ret = msm_rpm_flush_requests(print);
1777
1778 if (ret) {
1779 if (!glink_enabled)
1780 smd_mask_receive_interrupt(
1781 msm_rpm_data.ch_info, false, NULL);
1782 else
1783 glink_rpm_mask_rx_interrupt(
1784 glink_data->glink_handle, false, NULL);
1785 }
1786 }
1787 return ret;
1788}
1789EXPORT_SYMBOL(msm_rpm_enter_sleep);
1790
1791/**
1792 * When the system resumes from power collapse, the SMD interrupt disabled by
1793 * enter function has to reenabled to continue processing SMD message.
1794 */
1795void msm_rpm_exit_sleep(void)
1796{
1797 int ret;
1798
1799 if (standalone)
1800 return;
1801
1802 do {
1803 ret = msm_rpm_read_sleep_ack();
1804 } while (ret > 0);
1805
1806 if (!glink_enabled)
1807 smd_mask_receive_interrupt(msm_rpm_data.ch_info, false, NULL);
1808 else
1809 glink_rpm_mask_rx_interrupt(glink_data->glink_handle,
1810 false, NULL);
1811}
1812EXPORT_SYMBOL(msm_rpm_exit_sleep);
1813
1814/*
1815 * Whenever there is a data from RPM, notify_rx will be called.
1816 * This function is invoked either interrupt OR polling context.
1817 */
1818static void msm_rpm_trans_notify_rx(void *handle, const void *priv,
1819 const void *pkt_priv, const void *ptr, size_t size)
1820{
1821 uint32_t msg_id;
1822 int errno;
1823 char buf[MAX_ERR_BUFFER_SIZE] = {0};
1824 struct msm_rpm_wait_data *elem;
1825 static DEFINE_SPINLOCK(rx_notify_lock);
1826 unsigned long flags;
1827
1828 if (!size)
1829 return;
1830
1831 WARN_ON(size > MAX_ERR_BUFFER_SIZE);
1832
1833 spin_lock_irqsave(&rx_notify_lock, flags);
1834 memcpy(buf, ptr, size);
1835 msg_id = msm_rpm_get_msg_id_from_ack(buf);
1836 errno = msm_rpm_get_error_from_ack(buf);
1837 elem = msm_rpm_get_entry_from_msg_id(msg_id);
1838
1839 /*
1840 * It is applicable for sleep set requests
1841 * Sleep set requests are not added to the
1842 * wait queue list. Without this check we
1843 * run into NULL pointer deferrence issue.
1844 */
1845 if (!elem) {
1846 spin_unlock_irqrestore(&rx_notify_lock, flags);
1847 glink_rx_done(handle, ptr, 0);
1848 return;
1849 }
1850
1851 msm_rpm_process_ack(msg_id, errno);
1852 spin_unlock_irqrestore(&rx_notify_lock, flags);
1853
1854 glink_rx_done(handle, ptr, 0);
1855}
1856
1857static void msm_rpm_trans_notify_state(void *handle, const void *priv,
1858 unsigned int event)
1859{
1860 switch (event) {
1861 case GLINK_CONNECTED:
1862 glink_data->glink_handle = handle;
1863
1864 if (IS_ERR_OR_NULL(glink_data->glink_handle)) {
1865 pr_err("glink_handle %d\n",
1866 (int)PTR_ERR(glink_data->glink_handle));
1867 WARN_ON(1);
1868 }
1869
1870 /*
1871 * Do not allow clients to send data to RPM until glink
1872 * is fully open.
1873 */
1874 probe_status = 0;
1875 pr_info("glink config params: transport=%s, edge=%s, name=%s\n",
1876 glink_data->xprt,
1877 glink_data->edge,
1878 glink_data->name);
1879 break;
1880 default:
1881 pr_err("Unrecognized event %d\n", event);
1882 break;
1883 };
1884}
1885
1886static void msm_rpm_trans_notify_tx_done(void *handle, const void *priv,
1887 const void *pkt_priv, const void *ptr)
1888{
1889}
1890
1891static void msm_rpm_glink_open_work(struct work_struct *work)
1892{
1893 pr_debug("Opening glink channel\n");
1894 glink_data->glink_handle = glink_open(glink_data->open_cfg);
1895
1896 if (IS_ERR_OR_NULL(glink_data->glink_handle)) {
1897 pr_err("Error: glink_open failed %d\n",
1898 (int)PTR_ERR(glink_data->glink_handle));
1899 WARN_ON(1);
1900 }
1901}
1902
1903static void msm_rpm_glink_notifier_cb(struct glink_link_state_cb_info *cb_info,
1904 void *priv)
1905{
1906 struct glink_open_config *open_config;
1907 static bool first = true;
1908
1909 if (!cb_info) {
1910 pr_err("Missing callback data\n");
1911 return;
1912 }
1913
1914 switch (cb_info->link_state) {
1915 case GLINK_LINK_STATE_UP:
1916 if (first)
1917 first = false;
1918 else
1919 break;
1920 open_config = kzalloc(sizeof(*open_config), GFP_KERNEL);
1921 if (!open_config) {
1922 pr_err("Could not allocate memory\n");
1923 break;
1924 }
1925
1926 glink_data->open_cfg = open_config;
1927 pr_debug("glink link state up cb receieved\n");
1928 INIT_WORK(&glink_data->work, msm_rpm_glink_open_work);
1929
1930 open_config->priv = glink_data;
1931 open_config->name = glink_data->name;
1932 open_config->edge = glink_data->edge;
1933 open_config->notify_rx = msm_rpm_trans_notify_rx;
1934 open_config->notify_tx_done = msm_rpm_trans_notify_tx_done;
1935 open_config->notify_state = msm_rpm_trans_notify_state;
1936 schedule_work(&glink_data->work);
1937 break;
1938 default:
1939 pr_err("Unrecognised state = %d\n", cb_info->link_state);
1940 break;
1941 };
1942}
1943
1944static int msm_rpm_glink_dt_parse(struct platform_device *pdev,
1945 struct glink_apps_rpm_data *glink_data)
1946{
1947 char *key = NULL;
1948 int ret;
1949
1950 if (of_device_is_compatible(pdev->dev.of_node, "qcom,rpm-glink")) {
1951 glink_enabled = true;
1952 } else {
1953 pr_warn("qcom,rpm-glink compatible not matches\n");
1954 ret = -EINVAL;
1955 return ret;
1956 }
1957
1958 key = "qcom,glink-edge";
1959 ret = of_property_read_string(pdev->dev.of_node, key,
1960 &glink_data->edge);
1961 if (ret) {
1962 pr_err("Failed to read node: %s, key=%s\n",
1963 pdev->dev.of_node->full_name, key);
1964 return ret;
1965 }
1966
1967 key = "rpm-channel-name";
1968 ret = of_property_read_string(pdev->dev.of_node, key,
1969 &glink_data->name);
1970 if (ret)
1971 pr_err("%s(): Failed to read node: %s, key=%s\n", __func__,
1972 pdev->dev.of_node->full_name, key);
1973
1974 return ret;
1975}
1976
1977static int msm_rpm_glink_link_setup(struct glink_apps_rpm_data *glink_data,
1978 struct platform_device *pdev)
1979{
1980 struct glink_link_info *link_info;
1981 void *link_state_cb_handle;
1982 struct device *dev = &pdev->dev;
1983 int ret = 0;
1984
1985 link_info = devm_kzalloc(dev, sizeof(struct glink_link_info),
1986 GFP_KERNEL);
1987 if (!link_info) {
1988 ret = -ENOMEM;
1989 return ret;
1990 }
1991
1992 glink_data->link_info = link_info;
1993
1994 /*
1995 * Setup link info parameters
1996 */
1997 link_info->edge = glink_data->edge;
1998 link_info->glink_link_state_notif_cb =
1999 msm_rpm_glink_notifier_cb;
2000 link_state_cb_handle = glink_register_link_state_cb(link_info, NULL);
2001 if (IS_ERR_OR_NULL(link_state_cb_handle)) {
2002 pr_err("Could not register cb\n");
2003 ret = PTR_ERR(link_state_cb_handle);
2004 return ret;
2005 }
2006
2007 spin_lock_init(&msm_rpm_data.smd_lock_read);
2008 spin_lock_init(&msm_rpm_data.smd_lock_write);
2009
2010 return ret;
2011}
2012
2013static int msm_rpm_dev_glink_probe(struct platform_device *pdev)
2014{
2015 int ret = -ENOMEM;
2016 struct device *dev = &pdev->dev;
2017
2018 glink_data = devm_kzalloc(dev, sizeof(*glink_data), GFP_KERNEL);
2019 if (!glink_data)
2020 return ret;
2021
2022 ret = msm_rpm_glink_dt_parse(pdev, glink_data);
2023 if (ret < 0) {
2024 devm_kfree(dev, glink_data);
2025 return ret;
2026 }
2027
2028 ret = msm_rpm_glink_link_setup(glink_data, pdev);
2029 if (ret < 0) {
2030 /*
2031 * If the glink setup fails there is no
2032 * fall back mechanism to SMD.
2033 */
2034 pr_err("GLINK setup fail ret = %d\n", ret);
2035 WARN_ON(1);
2036 }
2037
2038 return ret;
2039}
2040
2041static int msm_rpm_dev_probe(struct platform_device *pdev)
2042{
2043 char *key = NULL;
2044 int ret = 0;
2045 void __iomem *reg_base;
2046 uint32_t version = V0_PROTOCOL_VERSION; /* set to default v0 format */
2047
2048 /*
2049 * Check for standalone support
2050 */
2051 key = "rpm-standalone";
2052 standalone = of_property_read_bool(pdev->dev.of_node, key);
2053 if (standalone) {
2054 probe_status = ret;
2055 goto skip_init;
2056 }
2057
2058 reg_base = of_iomap(pdev->dev.of_node, 0);
2059
2060 if (reg_base) {
2061 version = readq_relaxed(reg_base);
2062 iounmap(reg_base);
2063 }
2064
2065 if (version == V1_PROTOCOL_VERSION)
2066 rpm_msg_fmt_ver = RPM_MSG_V1_FMT;
2067
2068 pr_debug("RPM-SMD running version %d/n", rpm_msg_fmt_ver);
2069
2070 ret = msm_rpm_dev_glink_probe(pdev);
2071 if (!ret) {
2072 pr_info("APSS-RPM communication over GLINK\n");
2073 msm_rpm_send_buffer = msm_rpm_glink_send_buffer;
2074 of_platform_populate(pdev->dev.of_node, NULL, NULL,
2075 &pdev->dev);
2076 return ret;
2077 }
2078 msm_rpm_send_buffer = msm_rpm_send_smd_buffer;
2079
2080 key = "rpm-channel-name";
2081 ret = of_property_read_string(pdev->dev.of_node, key,
2082 &msm_rpm_data.ch_name);
2083 if (ret) {
2084 pr_err("%s(): Failed to read node: %s, key=%s\n", __func__,
2085 pdev->dev.of_node->full_name, key);
2086 goto fail;
2087 }
2088
2089 key = "rpm-channel-type";
2090 ret = of_property_read_u32(pdev->dev.of_node, key,
2091 &msm_rpm_data.ch_type);
2092 if (ret) {
2093 pr_err("%s(): Failed to read node: %s, key=%s\n", __func__,
2094 pdev->dev.of_node->full_name, key);
2095 goto fail;
2096 }
2097
2098 ret = smd_named_open_on_edge(msm_rpm_data.ch_name,
2099 msm_rpm_data.ch_type,
2100 &msm_rpm_data.ch_info,
2101 &msm_rpm_data,
2102 msm_rpm_notify);
2103 if (ret) {
2104 if (ret != -EPROBE_DEFER) {
2105 pr_err("%s: Cannot open RPM channel %s %d\n",
2106 __func__, msm_rpm_data.ch_name,
2107 msm_rpm_data.ch_type);
2108 }
2109 goto fail;
2110 }
2111
2112 spin_lock_init(&msm_rpm_data.smd_lock_write);
2113 spin_lock_init(&msm_rpm_data.smd_lock_read);
2114 tasklet_init(&data_tasklet, data_fn_tasklet, 0);
2115
2116 wait_for_completion(&msm_rpm_data.smd_open);
2117
2118 smd_disable_read_intr(msm_rpm_data.ch_info);
2119
Raju P.L.S.S.S.N43c1be72017-10-31 16:50:30 +05302120 probe_status = ret;
2121skip_init:
2122 of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
2123
2124 if (standalone)
2125 pr_info("RPM running in standalone mode\n");
2126fail:
2127 return probe_status;
2128}
2129
2130static const struct of_device_id msm_rpm_match_table[] = {
2131 {.compatible = "qcom,rpm-smd"},
2132 {.compatible = "qcom,rpm-glink"},
2133 {},
2134};
2135
2136static struct platform_driver msm_rpm_device_driver = {
2137 .probe = msm_rpm_dev_probe,
2138 .driver = {
2139 .name = "rpm-smd",
2140 .owner = THIS_MODULE,
2141 .of_match_table = msm_rpm_match_table,
2142 },
2143};
2144
2145int __init msm_rpm_driver_init(void)
2146{
2147 static bool registered;
2148
2149 if (registered)
2150 return 0;
2151 registered = true;
2152
2153 return platform_driver_register(&msm_rpm_device_driver);
2154}
2155EXPORT_SYMBOL(msm_rpm_driver_init);
2156arch_initcall(msm_rpm_driver_init);