blob: f9ac00f9127fefc33ce2c6ed25ca696a4a040cca [file] [log] [blame]
Duy Truong790f06d2013-02-13 16:38:12 -08001/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/bug.h>
18#include <linux/completion.h>
19#include <linux/delay.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23#include <linux/irq.h>
24#include <linux/list.h>
25#include <linux/mutex.h>
26#include <linux/semaphore.h>
27#include <linux/spinlock.h>
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -060028#include <linux/device.h>
29#include <linux/platform_device.h>
Praveen Chidambaram78499012011-11-01 17:15:17 -060030#include <linux/slab.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031#include <asm/hardware/gic.h>
32#include <mach/msm_iomap.h>
33#include <mach/rpm.h>
34
35/******************************************************************************
36 * Data type and structure definitions
37 *****************************************************************************/
38
39struct msm_rpm_request {
40 struct msm_rpm_iv_pair *req;
41 int count;
42 uint32_t *ctx_mask_ack;
43 uint32_t *sel_masks_ack;
44 struct completion *done;
45};
46
47struct msm_rpm_notif_config {
Praveen Chidambaram78499012011-11-01 17:15:17 -060048 struct msm_rpm_iv_pair iv[SEL_MASK_SIZE * 2];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049};
50
51#define configured_iv(notif_cfg) ((notif_cfg)->iv)
Praveen Chidambaram78499012011-11-01 17:15:17 -060052#define registered_iv(notif_cfg) ((notif_cfg)->iv + msm_rpm_sel_mask_size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053
Praveen Chidambaram78499012011-11-01 17:15:17 -060054static uint32_t msm_rpm_sel_mask_size;
55static struct msm_rpm_platform_data msm_rpm_data;
56
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070057static DEFINE_MUTEX(msm_rpm_mutex);
58static DEFINE_SPINLOCK(msm_rpm_lock);
59static DEFINE_SPINLOCK(msm_rpm_irq_lock);
60
61static struct msm_rpm_request *msm_rpm_request;
62static struct msm_rpm_request msm_rpm_request_irq_mode;
63static struct msm_rpm_request msm_rpm_request_poll_mode;
64
65static LIST_HEAD(msm_rpm_notifications);
66static struct msm_rpm_notif_config msm_rpm_notif_cfgs[MSM_RPM_CTX_SET_COUNT];
67static bool msm_rpm_init_notif_done;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068/******************************************************************************
69 * Internal functions
70 *****************************************************************************/
71
Praveen Chidambaram78499012011-11-01 17:15:17 -060072static inline unsigned int target_enum(unsigned int id)
73{
74 BUG_ON(id >= MSM_RPM_ID_LAST);
75 return msm_rpm_data.target_id[id].id;
76}
77
78static inline unsigned int target_status(unsigned int id)
79{
80 BUG_ON(id >= MSM_RPM_STATUS_ID_LAST);
81 return msm_rpm_data.target_status[id];
82}
83
84static inline unsigned int target_ctrl(unsigned int id)
85{
86 BUG_ON(id >= MSM_RPM_CTRL_LAST);
87 return msm_rpm_data.target_ctrl_id[id];
88}
89
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070090static inline uint32_t msm_rpm_read(unsigned int page, unsigned int reg)
91{
Praveen Chidambaram78499012011-11-01 17:15:17 -060092 return __raw_readl(msm_rpm_data.reg_base_addrs[page] + reg * 4);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070093}
94
95static inline void msm_rpm_write(
96 unsigned int page, unsigned int reg, uint32_t value)
97{
Praveen Chidambaram78499012011-11-01 17:15:17 -060098 __raw_writel(value,
99 msm_rpm_data.reg_base_addrs[page] + reg * 4);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700100}
101
102static inline void msm_rpm_read_contiguous(
103 unsigned int page, unsigned int reg, uint32_t *values, int count)
104{
105 int i;
106
107 for (i = 0; i < count; i++)
108 values[i] = msm_rpm_read(page, reg + i);
109}
110
111static inline void msm_rpm_write_contiguous(
112 unsigned int page, unsigned int reg, uint32_t *values, int count)
113{
114 int i;
115
116 for (i = 0; i < count; i++)
117 msm_rpm_write(page, reg + i, values[i]);
118}
119
120static inline void msm_rpm_write_contiguous_zeros(
121 unsigned int page, unsigned int reg, int count)
122{
123 int i;
124
125 for (i = 0; i < count; i++)
126 msm_rpm_write(page, reg + i, 0);
127}
128
129static inline uint32_t msm_rpm_map_id_to_sel(uint32_t id)
130{
Praveen Chidambaram78499012011-11-01 17:15:17 -0600131 return (id >= MSM_RPM_ID_LAST) ? msm_rpm_data.sel_last + 1 :
132 msm_rpm_data.target_id[id].sel;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700133}
134
135/*
136 * Note: the function does not clear the masks before filling them.
137 *
138 * Return value:
139 * 0: success
140 * -EINVAL: invalid id in <req> array
141 */
142static int msm_rpm_fill_sel_masks(
143 uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count)
144{
145 uint32_t sel;
146 int i;
147
148 for (i = 0; i < count; i++) {
149 sel = msm_rpm_map_id_to_sel(req[i].id);
150
Praveen Chidambaram78499012011-11-01 17:15:17 -0600151 if (sel > msm_rpm_data.sel_last) {
152 pr_err("%s(): RPM ID %d not defined for target\n",
153 __func__, req[i].id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700154 return -EINVAL;
Praveen Chidambaram78499012011-11-01 17:15:17 -0600155 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700156
157 sel_masks[msm_rpm_get_sel_mask_reg(sel)] |=
158 msm_rpm_get_sel_mask(sel);
159 }
160
161 return 0;
162}
163
164static inline void msm_rpm_send_req_interrupt(void)
165{
Praveen Chidambaram78499012011-11-01 17:15:17 -0600166 __raw_writel(msm_rpm_data.ipc_rpm_val,
167 msm_rpm_data.ipc_rpm_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700168}
169
170/*
171 * Note: assumes caller has acquired <msm_rpm_irq_lock>.
172 *
173 * Return value:
174 * 0: request acknowledgement
175 * 1: notification
176 * 2: spurious interrupt
177 */
178static int msm_rpm_process_ack_interrupt(void)
179{
180 uint32_t ctx_mask_ack;
Praveen Chidambaram78499012011-11-01 17:15:17 -0600181 uint32_t sel_masks_ack[SEL_MASK_SIZE] = {0};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700182
Praveen Chidambaram78499012011-11-01 17:15:17 -0600183 ctx_mask_ack = msm_rpm_read(MSM_RPM_PAGE_CTRL,
184 target_ctrl(MSM_RPM_CTRL_ACK_CTX_0));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700185 msm_rpm_read_contiguous(MSM_RPM_PAGE_CTRL,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600186 target_ctrl(MSM_RPM_CTRL_ACK_SEL_0),
187 sel_masks_ack, msm_rpm_sel_mask_size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188
189 if (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_NOTIFICATION)) {
190 struct msm_rpm_notification *n;
191 int i;
192
193 list_for_each_entry(n, &msm_rpm_notifications, list)
Praveen Chidambaram78499012011-11-01 17:15:17 -0600194 for (i = 0; i < msm_rpm_sel_mask_size; i++)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700195 if (sel_masks_ack[i] & n->sel_masks[i]) {
196 up(&n->sem);
197 break;
198 }
199
200 msm_rpm_write_contiguous_zeros(MSM_RPM_PAGE_CTRL,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600201 target_ctrl(MSM_RPM_CTRL_ACK_SEL_0),
202 msm_rpm_sel_mask_size);
203 msm_rpm_write(MSM_RPM_PAGE_CTRL,
204 target_ctrl(MSM_RPM_CTRL_ACK_CTX_0), 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700205 /* Ensure the write is complete before return */
206 mb();
207
208 return 1;
209 }
210
211 if (msm_rpm_request) {
212 int i;
213
214 *(msm_rpm_request->ctx_mask_ack) = ctx_mask_ack;
215 memcpy(msm_rpm_request->sel_masks_ack, sel_masks_ack,
216 sizeof(sel_masks_ack));
217
218 for (i = 0; i < msm_rpm_request->count; i++)
219 msm_rpm_request->req[i].value =
220 msm_rpm_read(MSM_RPM_PAGE_ACK,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600221 target_enum(msm_rpm_request->req[i].id));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700222
223 msm_rpm_write_contiguous_zeros(MSM_RPM_PAGE_CTRL,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600224 target_ctrl(MSM_RPM_CTRL_ACK_SEL_0),
225 msm_rpm_sel_mask_size);
226 msm_rpm_write(MSM_RPM_PAGE_CTRL,
227 target_ctrl(MSM_RPM_CTRL_ACK_CTX_0), 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228 /* Ensure the write is complete before return */
229 mb();
230
231 if (msm_rpm_request->done)
232 complete_all(msm_rpm_request->done);
233
234 msm_rpm_request = NULL;
235 return 0;
236 }
237
238 return 2;
239}
240
Stephen Boydf61255e2012-02-24 14:31:09 -0800241static void msm_rpm_err_fatal(void)
242{
243 /* Tell RPM that we're handling the interrupt */
244 __raw_writel(0x1, msm_rpm_data.ipc_rpm_reg);
245 panic("RPM error fataled");
246}
247
248static irqreturn_t msm_rpm_err_interrupt(int irq, void *dev_id)
249{
250 msm_rpm_err_fatal();
251 return IRQ_HANDLED;
252}
253
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700254static irqreturn_t msm_rpm_ack_interrupt(int irq, void *dev_id)
255{
256 unsigned long flags;
257 int rc;
258
259 if (dev_id != &msm_rpm_ack_interrupt)
260 return IRQ_NONE;
261
262 spin_lock_irqsave(&msm_rpm_irq_lock, flags);
263 rc = msm_rpm_process_ack_interrupt();
264 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
265
266 return IRQ_HANDLED;
267}
268
269/*
270 * Note: assumes caller has acquired <msm_rpm_irq_lock>.
271 */
272static void msm_rpm_busy_wait_for_request_completion(
273 bool allow_async_completion)
274{
275 int rc;
276
277 do {
Trilok Soni01dbb612012-05-28 19:23:53 +0530278 while (!gic_is_irq_pending(msm_rpm_data.irq_ack) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700279 msm_rpm_request) {
280 if (allow_async_completion)
281 spin_unlock(&msm_rpm_irq_lock);
Trilok Soni01dbb612012-05-28 19:23:53 +0530282 if (gic_is_irq_pending(msm_rpm_data.irq_err))
Stephen Boydf61255e2012-02-24 14:31:09 -0800283 msm_rpm_err_fatal();
Trilok Soni01dbb612012-05-28 19:23:53 +0530284 gic_clear_irq_pending(msm_rpm_data.irq_err);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700285 udelay(1);
286 if (allow_async_completion)
287 spin_lock(&msm_rpm_irq_lock);
288 }
289
290 if (!msm_rpm_request)
291 break;
292
293 rc = msm_rpm_process_ack_interrupt();
Trilok Soni01dbb612012-05-28 19:23:53 +0530294 gic_clear_irq_pending(msm_rpm_data.irq_ack);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700295 } while (rc);
296}
297
298/* Upon return, the <req> array will contain values from the ack page.
299 *
300 * Note: assumes caller has acquired <msm_rpm_mutex>.
301 *
302 * Return value:
303 * 0: success
304 * -ENOSPC: request rejected
305 */
306static int msm_rpm_set_exclusive(int ctx,
307 uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count)
308{
309 DECLARE_COMPLETION_ONSTACK(ack);
310 unsigned long flags;
311 uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx);
Praveen Chidambaramfdaef162011-09-28 08:40:05 -0600312 uint32_t ctx_mask_ack = 0;
Anji Jonnala07d736b2013-04-16 13:19:00 +0530313 uint32_t sel_masks_ack[SEL_MASK_SIZE] = {0};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700314 int i;
315
316 msm_rpm_request_irq_mode.req = req;
317 msm_rpm_request_irq_mode.count = count;
318 msm_rpm_request_irq_mode.ctx_mask_ack = &ctx_mask_ack;
319 msm_rpm_request_irq_mode.sel_masks_ack = sel_masks_ack;
320 msm_rpm_request_irq_mode.done = &ack;
321
322 spin_lock_irqsave(&msm_rpm_lock, flags);
323 spin_lock(&msm_rpm_irq_lock);
324
325 BUG_ON(msm_rpm_request);
326 msm_rpm_request = &msm_rpm_request_irq_mode;
327
328 for (i = 0; i < count; i++) {
Praveen Chidambaram78499012011-11-01 17:15:17 -0600329 BUG_ON(target_enum(req[i].id) >= MSM_RPM_ID_LAST);
330 msm_rpm_write(MSM_RPM_PAGE_REQ,
331 target_enum(req[i].id), req[i].value);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700332 }
333
334 msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600335 target_ctrl(MSM_RPM_CTRL_REQ_SEL_0),
336 sel_masks, msm_rpm_sel_mask_size);
337 msm_rpm_write(MSM_RPM_PAGE_CTRL,
338 target_ctrl(MSM_RPM_CTRL_REQ_CTX_0), ctx_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700339
340 /* Ensure RPM data is written before sending the interrupt */
341 mb();
342 msm_rpm_send_req_interrupt();
343
344 spin_unlock(&msm_rpm_irq_lock);
345 spin_unlock_irqrestore(&msm_rpm_lock, flags);
346
347 wait_for_completion(&ack);
348
349 BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)))
350 != ctx_mask);
351 BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack)));
352
353 return (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED))
354 ? -ENOSPC : 0;
355}
356
357/* Upon return, the <req> array will contain values from the ack page.
358 *
359 * Note: assumes caller has acquired <msm_rpm_lock>.
360 *
361 * Return value:
362 * 0: success
363 * -ENOSPC: request rejected
364 */
365static int msm_rpm_set_exclusive_noirq(int ctx,
366 uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count)
367{
Praveen Chidambaram78499012011-11-01 17:15:17 -0600368 unsigned int irq = msm_rpm_data.irq_ack;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700369 unsigned long flags;
370 uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx);
Praveen Chidambaramfdaef162011-09-28 08:40:05 -0600371 uint32_t ctx_mask_ack = 0;
Anji Jonnala07d736b2013-04-16 13:19:00 +0530372 uint32_t sel_masks_ack[SEL_MASK_SIZE] = {0};
Stephen Boydf61255e2012-02-24 14:31:09 -0800373 struct irq_chip *irq_chip, *err_chip;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700374 int i;
375
376 msm_rpm_request_poll_mode.req = req;
377 msm_rpm_request_poll_mode.count = count;
378 msm_rpm_request_poll_mode.ctx_mask_ack = &ctx_mask_ack;
379 msm_rpm_request_poll_mode.sel_masks_ack = sel_masks_ack;
380 msm_rpm_request_poll_mode.done = NULL;
381
382 spin_lock_irqsave(&msm_rpm_irq_lock, flags);
Praveen Chidambaramfdaef162011-09-28 08:40:05 -0600383 irq_chip = irq_get_chip(irq);
384 if (!irq_chip) {
385 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
386 return -ENOSPC;
387 }
388 irq_chip->irq_mask(irq_get_irq_data(irq));
Stephen Boydf61255e2012-02-24 14:31:09 -0800389 err_chip = irq_get_chip(msm_rpm_data.irq_err);
390 if (!err_chip) {
391 irq_chip->irq_unmask(irq_get_irq_data(irq));
392 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
393 return -ENOSPC;
394 }
395 err_chip->irq_mask(irq_get_irq_data(msm_rpm_data.irq_err));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396
397 if (msm_rpm_request) {
398 msm_rpm_busy_wait_for_request_completion(true);
399 BUG_ON(msm_rpm_request);
400 }
401
402 msm_rpm_request = &msm_rpm_request_poll_mode;
403
404 for (i = 0; i < count; i++) {
Praveen Chidambaram78499012011-11-01 17:15:17 -0600405 BUG_ON(target_enum(req[i].id) >= MSM_RPM_ID_LAST);
406 msm_rpm_write(MSM_RPM_PAGE_REQ,
407 target_enum(req[i].id), req[i].value);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700408 }
409
410 msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600411 target_ctrl(MSM_RPM_CTRL_REQ_SEL_0),
412 sel_masks, msm_rpm_sel_mask_size);
413 msm_rpm_write(MSM_RPM_PAGE_CTRL,
414 target_ctrl(MSM_RPM_CTRL_REQ_CTX_0), ctx_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700415
416 /* Ensure RPM data is written before sending the interrupt */
417 mb();
418 msm_rpm_send_req_interrupt();
419
420 msm_rpm_busy_wait_for_request_completion(false);
421 BUG_ON(msm_rpm_request);
422
Stephen Boydf61255e2012-02-24 14:31:09 -0800423 err_chip->irq_unmask(irq_get_irq_data(msm_rpm_data.irq_err));
Praveen Chidambaramfdaef162011-09-28 08:40:05 -0600424 irq_chip->irq_unmask(irq_get_irq_data(irq));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700425 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
426
427 BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)))
428 != ctx_mask);
429 BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack)));
430
431 return (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED))
432 ? -ENOSPC : 0;
433}
434
435/* Upon return, the <req> array will contain values from the ack page.
436 *
437 * Return value:
438 * 0: success
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700439 * -EINVAL: invalid <ctx> or invalid id in <req> array
440 * -ENOSPC: request rejected
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700441 * -ENODEV: RPM driver not initialized
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700442 */
443static int msm_rpm_set_common(
444 int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq)
445{
Praveen Chidambaram78499012011-11-01 17:15:17 -0600446 uint32_t sel_masks[SEL_MASK_SIZE] = {};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700447 int rc;
448
449 if (ctx >= MSM_RPM_CTX_SET_COUNT) {
450 rc = -EINVAL;
451 goto set_common_exit;
452 }
453
454 rc = msm_rpm_fill_sel_masks(sel_masks, req, count);
455 if (rc)
456 goto set_common_exit;
457
458 if (noirq) {
459 unsigned long flags;
460
461 spin_lock_irqsave(&msm_rpm_lock, flags);
462 rc = msm_rpm_set_exclusive_noirq(ctx, sel_masks, req, count);
463 spin_unlock_irqrestore(&msm_rpm_lock, flags);
464 } else {
Praveen Chidambarame72c45d2012-03-26 16:56:56 -0600465 mutex_lock(&msm_rpm_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700466 rc = msm_rpm_set_exclusive(ctx, sel_masks, req, count);
467 mutex_unlock(&msm_rpm_mutex);
468 }
469
470set_common_exit:
471 return rc;
472}
473
474/*
475 * Return value:
476 * 0: success
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700477 * -EINVAL: invalid <ctx> or invalid id in <req> array
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700478 * -ENODEV: RPM driver not initialized.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700479 */
480static int msm_rpm_clear_common(
481 int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq)
482{
Praveen Chidambaram78499012011-11-01 17:15:17 -0600483 uint32_t sel_masks[SEL_MASK_SIZE] = {};
484 struct msm_rpm_iv_pair r[SEL_MASK_SIZE];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700485 int rc;
486 int i;
487
488 if (ctx >= MSM_RPM_CTX_SET_COUNT) {
489 rc = -EINVAL;
490 goto clear_common_exit;
491 }
492
493 rc = msm_rpm_fill_sel_masks(sel_masks, req, count);
494 if (rc)
495 goto clear_common_exit;
496
497 for (i = 0; i < ARRAY_SIZE(r); i++) {
498 r[i].id = MSM_RPM_ID_INVALIDATE_0 + i;
499 r[i].value = sel_masks[i];
500 }
501
502 memset(sel_masks, 0, sizeof(sel_masks));
Praveen Chidambaram78499012011-11-01 17:15:17 -0600503 sel_masks[msm_rpm_get_sel_mask_reg(msm_rpm_data.sel_invalidate)] |=
504 msm_rpm_get_sel_mask(msm_rpm_data.sel_invalidate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700505
506 if (noirq) {
507 unsigned long flags;
508
509 spin_lock_irqsave(&msm_rpm_lock, flags);
510 rc = msm_rpm_set_exclusive_noirq(ctx, sel_masks, r,
511 ARRAY_SIZE(r));
512 spin_unlock_irqrestore(&msm_rpm_lock, flags);
513 BUG_ON(rc);
514 } else {
Praveen Chidambarame72c45d2012-03-26 16:56:56 -0600515 mutex_lock(&msm_rpm_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700516 rc = msm_rpm_set_exclusive(ctx, sel_masks, r, ARRAY_SIZE(r));
517 mutex_unlock(&msm_rpm_mutex);
518 BUG_ON(rc);
519 }
520
521clear_common_exit:
522 return rc;
523}
524
525/*
526 * Note: assumes caller has acquired <msm_rpm_mutex>.
527 */
528static void msm_rpm_update_notification(uint32_t ctx,
529 struct msm_rpm_notif_config *curr_cfg,
530 struct msm_rpm_notif_config *new_cfg)
531{
Praveen Chidambaram78499012011-11-01 17:15:17 -0600532 unsigned int sel_notif = msm_rpm_data.sel_notification;
533
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700534 if (memcmp(curr_cfg, new_cfg, sizeof(*new_cfg))) {
Praveen Chidambaram78499012011-11-01 17:15:17 -0600535 uint32_t sel_masks[SEL_MASK_SIZE] = {};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536 int rc;
537
Praveen Chidambaram78499012011-11-01 17:15:17 -0600538 sel_masks[msm_rpm_get_sel_mask_reg(sel_notif)]
539 |= msm_rpm_get_sel_mask(sel_notif);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700540
541 rc = msm_rpm_set_exclusive(ctx,
542 sel_masks, new_cfg->iv, ARRAY_SIZE(new_cfg->iv));
543 BUG_ON(rc);
544
545 memcpy(curr_cfg, new_cfg, sizeof(*new_cfg));
546 }
547}
548
549/*
550 * Note: assumes caller has acquired <msm_rpm_mutex>.
551 */
552static void msm_rpm_initialize_notification(void)
553{
554 struct msm_rpm_notif_config cfg;
555 unsigned int ctx;
556 int i;
557
558 for (ctx = MSM_RPM_CTX_SET_0; ctx <= MSM_RPM_CTX_SET_SLEEP; ctx++) {
559 cfg = msm_rpm_notif_cfgs[ctx];
560
Praveen Chidambaram78499012011-11-01 17:15:17 -0600561 for (i = 0; i < msm_rpm_sel_mask_size; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700562 configured_iv(&cfg)[i].id =
563 MSM_RPM_ID_NOTIFICATION_CONFIGURED_0 + i;
564 configured_iv(&cfg)[i].value = ~0UL;
565
566 registered_iv(&cfg)[i].id =
567 MSM_RPM_ID_NOTIFICATION_REGISTERED_0 + i;
568 registered_iv(&cfg)[i].value = 0;
569 }
570
571 msm_rpm_update_notification(ctx,
572 &msm_rpm_notif_cfgs[ctx], &cfg);
573 }
574}
575
576/******************************************************************************
577 * Public functions
578 *****************************************************************************/
579
580int msm_rpm_local_request_is_outstanding(void)
581{
582 unsigned long flags;
583 int outstanding = 0;
584
585 if (!spin_trylock_irqsave(&msm_rpm_lock, flags))
586 goto local_request_is_outstanding_exit;
587
588 if (!spin_trylock(&msm_rpm_irq_lock))
589 goto local_request_is_outstanding_unlock;
590
591 outstanding = (msm_rpm_request != NULL);
592 spin_unlock(&msm_rpm_irq_lock);
593
594local_request_is_outstanding_unlock:
595 spin_unlock_irqrestore(&msm_rpm_lock, flags);
596
597local_request_is_outstanding_exit:
598 return outstanding;
599}
600
601/*
602 * Read the specified status registers and return their values.
603 *
604 * status: array of id-value pairs. Each <id> specifies a status register,
605 * i.e, one of MSM_RPM_STATUS_ID_xxxx. Upon return, each <value> will
606 * contain the value of the status register.
607 * count: number of id-value pairs in the array
608 *
609 * Return value:
610 * 0: success
611 * -EBUSY: RPM is updating the status page; values across different registers
612 * may not be consistent
613 * -EINVAL: invalid id in <status> array
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700614 * -ENODEV: RPM driver not initialized
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700615 */
616int msm_rpm_get_status(struct msm_rpm_iv_pair *status, int count)
617{
618 uint32_t seq_begin;
619 uint32_t seq_end;
620 int rc;
621 int i;
622
623 seq_begin = msm_rpm_read(MSM_RPM_PAGE_STATUS,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600624 target_status(MSM_RPM_STATUS_ID_SEQUENCE));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700625
626 for (i = 0; i < count; i++) {
Praveen Chidambaram78499012011-11-01 17:15:17 -0600627 int target_status_id;
628
629 if (status[i].id >= MSM_RPM_STATUS_ID_LAST) {
630 pr_err("%s(): Status ID beyond limits\n", __func__);
631 rc = -EINVAL;
632 goto get_status_exit;
633 }
634
635 target_status_id = target_status(status[i].id);
636 if (target_status_id >= MSM_RPM_STATUS_ID_LAST) {
637 pr_err("%s(): Status id %d not defined for target\n",
638 __func__,
639 target_status_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700640 rc = -EINVAL;
641 goto get_status_exit;
642 }
643
644 status[i].value = msm_rpm_read(MSM_RPM_PAGE_STATUS,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600645 target_status_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700646 }
647
648 seq_end = msm_rpm_read(MSM_RPM_PAGE_STATUS,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600649 target_status(MSM_RPM_STATUS_ID_SEQUENCE));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700650
651 rc = (seq_begin != seq_end || (seq_begin & 0x01)) ? -EBUSY : 0;
652
653get_status_exit:
654 return rc;
655}
656EXPORT_SYMBOL(msm_rpm_get_status);
657
658/*
659 * Issue a resource request to RPM to set resource values.
660 *
661 * Note: the function may sleep and must be called in a task context.
662 *
663 * ctx: the request's context.
664 * There two contexts that a RPM driver client can use:
665 * MSM_RPM_CTX_SET_0 and MSM_RPM_CTX_SET_SLEEP. For resource values
666 * that are intended to take effect when the CPU is active,
667 * MSM_RPM_CTX_SET_0 should be used. For resource values that are
668 * intended to take effect when the CPU is not active,
669 * MSM_RPM_CTX_SET_SLEEP should be used.
670 * req: array of id-value pairs. Each <id> specifies a RPM resource,
671 * i.e, one of MSM_RPM_ID_xxxx. Each <value> specifies the requested
672 * resource value.
673 * count: number of id-value pairs in the array
674 *
675 * Return value:
676 * 0: success
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700677 * -EINVAL: invalid <ctx> or invalid id in <req> array
678 * -ENOSPC: request rejected
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700679 * -ENODEV: RPM driver not initialized
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700680 */
681int msm_rpm_set(int ctx, struct msm_rpm_iv_pair *req, int count)
682{
683 return msm_rpm_set_common(ctx, req, count, false);
684}
685EXPORT_SYMBOL(msm_rpm_set);
686
687/*
688 * Issue a resource request to RPM to set resource values.
689 *
690 * Note: the function is similar to msm_rpm_set() except that it must be
691 * called with interrupts masked. If possible, use msm_rpm_set()
692 * instead, to maximize CPU throughput.
693 */
694int msm_rpm_set_noirq(int ctx, struct msm_rpm_iv_pair *req, int count)
695{
696 WARN(!irqs_disabled(), "msm_rpm_set_noirq can only be called "
697 "safely when local irqs are disabled. Consider using "
698 "msm_rpm_set or msm_rpm_set_nosleep instead.");
699 return msm_rpm_set_common(ctx, req, count, true);
700}
701EXPORT_SYMBOL(msm_rpm_set_noirq);
702
703/*
704 * Issue a resource request to RPM to clear resource values. Once the
705 * values are cleared, the resources revert back to their default values
706 * for this RPM master.
707 *
708 * Note: the function may sleep and must be called in a task context.
709 *
710 * ctx: the request's context.
711 * req: array of id-value pairs. Each <id> specifies a RPM resource,
712 * i.e, one of MSM_RPM_ID_xxxx. <value>'s are ignored.
713 * count: number of id-value pairs in the array
714 *
715 * Return value:
716 * 0: success
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700717 * -EINVAL: invalid <ctx> or invalid id in <req> array
718 */
719int msm_rpm_clear(int ctx, struct msm_rpm_iv_pair *req, int count)
720{
721 return msm_rpm_clear_common(ctx, req, count, false);
722}
723EXPORT_SYMBOL(msm_rpm_clear);
724
725/*
726 * Issue a resource request to RPM to clear resource values.
727 *
728 * Note: the function is similar to msm_rpm_clear() except that it must be
729 * called with interrupts masked. If possible, use msm_rpm_clear()
730 * instead, to maximize CPU throughput.
731 */
732int msm_rpm_clear_noirq(int ctx, struct msm_rpm_iv_pair *req, int count)
733{
734 WARN(!irqs_disabled(), "msm_rpm_clear_noirq can only be called "
735 "safely when local irqs are disabled. Consider using "
736 "msm_rpm_clear or msm_rpm_clear_nosleep instead.");
737 return msm_rpm_clear_common(ctx, req, count, true);
738}
739EXPORT_SYMBOL(msm_rpm_clear_noirq);
740
741/*
742 * Register for RPM notification. When the specified resources
743 * change their status on RPM, RPM sends out notifications and the
744 * driver will "up" the semaphore in struct msm_rpm_notification.
745 *
746 * Note: the function may sleep and must be called in a task context.
747 *
748 * Memory for <n> must not be freed until the notification is
749 * unregistered. Memory for <req> can be freed after this
750 * function returns.
751 *
752 * n: the notifcation object. Caller should initialize only the
753 * semaphore field. When a notification arrives later, the
754 * semaphore will be "up"ed.
755 * req: array of id-value pairs. Each <id> specifies a status register,
756 * i.e, one of MSM_RPM_STATUS_ID_xxxx. <value>'s are ignored.
757 * count: number of id-value pairs in the array
758 *
759 * Return value:
760 * 0: success
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700761 * -EINVAL: invalid id in <req> array
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700762 * -ENODEV: RPM driver not initialized
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700763 */
764int msm_rpm_register_notification(struct msm_rpm_notification *n,
765 struct msm_rpm_iv_pair *req, int count)
766{
767 unsigned long flags;
768 unsigned int ctx;
769 struct msm_rpm_notif_config cfg;
770 int rc;
771 int i;
772
773 INIT_LIST_HEAD(&n->list);
774 rc = msm_rpm_fill_sel_masks(n->sel_masks, req, count);
775 if (rc)
776 goto register_notification_exit;
777
Praveen Chidambarame72c45d2012-03-26 16:56:56 -0600778 mutex_lock(&msm_rpm_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700779
780 if (!msm_rpm_init_notif_done) {
781 msm_rpm_initialize_notification();
782 msm_rpm_init_notif_done = true;
783 }
784
785 spin_lock_irqsave(&msm_rpm_irq_lock, flags);
786 list_add(&n->list, &msm_rpm_notifications);
787 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
788
789 ctx = MSM_RPM_CTX_SET_0;
790 cfg = msm_rpm_notif_cfgs[ctx];
791
Praveen Chidambaram78499012011-11-01 17:15:17 -0600792 for (i = 0; i < msm_rpm_sel_mask_size; i++)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700793 registered_iv(&cfg)[i].value |= n->sel_masks[i];
794
795 msm_rpm_update_notification(ctx, &msm_rpm_notif_cfgs[ctx], &cfg);
796 mutex_unlock(&msm_rpm_mutex);
797
798register_notification_exit:
799 return rc;
800}
801EXPORT_SYMBOL(msm_rpm_register_notification);
802
803/*
804 * Unregister a notification.
805 *
806 * Note: the function may sleep and must be called in a task context.
807 *
808 * n: the notifcation object that was registered previously.
809 *
810 * Return value:
811 * 0: success
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700812 * -ENODEV: RPM driver not initialized
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700813 */
814int msm_rpm_unregister_notification(struct msm_rpm_notification *n)
815{
816 unsigned long flags;
817 unsigned int ctx;
818 struct msm_rpm_notif_config cfg;
Praveen Chidambarame72c45d2012-03-26 16:56:56 -0600819 int rc = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700820 int i;
821
Praveen Chidambarame72c45d2012-03-26 16:56:56 -0600822 mutex_lock(&msm_rpm_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700823 ctx = MSM_RPM_CTX_SET_0;
824 cfg = msm_rpm_notif_cfgs[ctx];
825
Praveen Chidambaram78499012011-11-01 17:15:17 -0600826 for (i = 0; i < msm_rpm_sel_mask_size; i++)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700827 registered_iv(&cfg)[i].value = 0;
828
829 spin_lock_irqsave(&msm_rpm_irq_lock, flags);
830 list_del(&n->list);
831 list_for_each_entry(n, &msm_rpm_notifications, list)
Praveen Chidambaram78499012011-11-01 17:15:17 -0600832 for (i = 0; i < msm_rpm_sel_mask_size; i++)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700833 registered_iv(&cfg)[i].value |= n->sel_masks[i];
834 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
835
836 msm_rpm_update_notification(ctx, &msm_rpm_notif_cfgs[ctx], &cfg);
837 mutex_unlock(&msm_rpm_mutex);
838
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700839 return rc;
840}
841EXPORT_SYMBOL(msm_rpm_unregister_notification);
842
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600843static uint32_t fw_major, fw_minor, fw_build;
844
845static ssize_t driver_version_show(struct kobject *kobj,
846 struct kobj_attribute *attr, char *buf)
847{
848 return snprintf(buf, PAGE_SIZE, "%u.%u.%u\n",
Praveen Chidambaram78499012011-11-01 17:15:17 -0600849 msm_rpm_data.ver[0], msm_rpm_data.ver[1], msm_rpm_data.ver[2]);
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600850}
851
852static ssize_t fw_version_show(struct kobject *kobj,
853 struct kobj_attribute *attr, char *buf)
854{
855 return snprintf(buf, PAGE_SIZE, "%u.%u.%u\n",
856 fw_major, fw_minor, fw_build);
857}
858
859static struct kobj_attribute driver_version_attr = __ATTR_RO(driver_version);
860static struct kobj_attribute fw_version_attr = __ATTR_RO(fw_version);
861
862static struct attribute *driver_attributes[] = {
863 &driver_version_attr.attr,
864 &fw_version_attr.attr,
865 NULL
866};
867
868static struct attribute_group driver_attr_group = {
869 .attrs = driver_attributes,
870};
871
872static int __devinit msm_rpm_probe(struct platform_device *pdev)
873{
874 return sysfs_create_group(&pdev->dev.kobj, &driver_attr_group);
875}
876
877static int __devexit msm_rpm_remove(struct platform_device *pdev)
878{
879 sysfs_remove_group(&pdev->dev.kobj, &driver_attr_group);
880 return 0;
881}
882
883static struct platform_driver msm_rpm_platform_driver = {
884 .probe = msm_rpm_probe,
885 .remove = __devexit_p(msm_rpm_remove),
886 .driver = {
887 .name = "msm_rpm",
888 .owner = THIS_MODULE,
889 },
890};
891
Praveen Chidambaram78499012011-11-01 17:15:17 -0600892static void __init msm_rpm_populate_map(struct msm_rpm_platform_data *data)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700893{
Praveen Chidambaram78499012011-11-01 17:15:17 -0600894 int i, j;
895 struct msm_rpm_map_data *src = NULL;
896 struct msm_rpm_map_data *dst = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700897
Praveen Chidambaram78499012011-11-01 17:15:17 -0600898 for (i = 0; i < MSM_RPM_ID_LAST;) {
899 src = &data->target_id[i];
900 dst = &msm_rpm_data.target_id[i];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700901
Praveen Chidambaram78499012011-11-01 17:15:17 -0600902 dst->id = MSM_RPM_ID_LAST;
903 dst->sel = msm_rpm_data.sel_last + 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700904
Praveen Chidambaram78499012011-11-01 17:15:17 -0600905 /*
906 * copy the target specific id of the current and also of
907 * all the #count id's that follow the current.
908 * [MSM_RPM_ID_PM8921_S1_0] = { MSM_RPM_8960_ID_PM8921_S1_0,
909 * MSM_RPM_8960_SEL_PM8921_S1,
910 * 2},
911 * [MSM_RPM_ID_PM8921_S1_1] = { 0, 0, 0 },
912 * should translate to
913 * [MSM_RPM_ID_PM8921_S1_0] = { MSM_RPM_8960_ID_PM8921_S1_0,
914 * MSM_RPM_8960_SEL_PM8921,
915 * 2 },
916 * [MSM_RPM_ID_PM8921_S1_1] = { MSM_RPM_8960_ID_PM8921_S1_0 + 1,
917 * MSM_RPM_8960_SEL_PM8921,
918 * 0 },
919 */
920 for (j = 0; j < src->count; j++) {
921 dst = &msm_rpm_data.target_id[i + j];
922 dst->id = src->id + j;
923 dst->sel = src->sel;
924 }
925
926 i += (src->count) ? src->count : 1;
927 }
928
929 for (i = 0; i < MSM_RPM_STATUS_ID_LAST; i++) {
930 if (data->target_status[i] & MSM_RPM_STATUS_ID_VALID)
931 msm_rpm_data.target_status[i] &=
932 ~MSM_RPM_STATUS_ID_VALID;
933 else
934 msm_rpm_data.target_status[i] = MSM_RPM_STATUS_ID_LAST;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700935 }
936}
937
Praveen Chidambarame396ce62012-03-30 11:15:57 -0600938static irqreturn_t msm_pm_rpm_wakeup_interrupt(int irq, void *dev_id)
939{
940 if (dev_id != &msm_pm_rpm_wakeup_interrupt)
941 return IRQ_NONE;
942
943 return IRQ_HANDLED;
944}
945
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700946int __init msm_rpm_init(struct msm_rpm_platform_data *data)
947{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700948 int rc;
949
Praveen Chidambaram78499012011-11-01 17:15:17 -0600950 memcpy(&msm_rpm_data, data, sizeof(struct msm_rpm_platform_data));
951 msm_rpm_sel_mask_size = msm_rpm_data.sel_last / 32 + 1;
952 BUG_ON(SEL_MASK_SIZE < msm_rpm_sel_mask_size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700953
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600954 fw_major = msm_rpm_read(MSM_RPM_PAGE_STATUS,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600955 target_status(MSM_RPM_STATUS_ID_VERSION_MAJOR));
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600956 fw_minor = msm_rpm_read(MSM_RPM_PAGE_STATUS,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600957 target_status(MSM_RPM_STATUS_ID_VERSION_MINOR));
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600958 fw_build = msm_rpm_read(MSM_RPM_PAGE_STATUS,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600959 target_status(MSM_RPM_STATUS_ID_VERSION_BUILD));
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600960 pr_info("%s: RPM firmware %u.%u.%u\n", __func__,
961 fw_major, fw_minor, fw_build);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700962
Praveen Chidambaram78499012011-11-01 17:15:17 -0600963 if (fw_major != msm_rpm_data.ver[0]) {
Praveen Chidambaram99a6a5d2011-07-13 10:14:06 -0600964 pr_err("%s: RPM version %u.%u.%u incompatible with "
965 "this driver version %u.%u.%u\n", __func__,
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600966 fw_major, fw_minor, fw_build,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600967 msm_rpm_data.ver[0],
968 msm_rpm_data.ver[1],
969 msm_rpm_data.ver[2]);
Praveen Chidambaram99a6a5d2011-07-13 10:14:06 -0600970 return -EFAULT;
971 }
972
Praveen Chidambaram78499012011-11-01 17:15:17 -0600973 msm_rpm_write(MSM_RPM_PAGE_CTRL,
974 target_ctrl(MSM_RPM_CTRL_VERSION_MAJOR), msm_rpm_data.ver[0]);
975 msm_rpm_write(MSM_RPM_PAGE_CTRL,
976 target_ctrl(MSM_RPM_CTRL_VERSION_MINOR), msm_rpm_data.ver[1]);
977 msm_rpm_write(MSM_RPM_PAGE_CTRL,
978 target_ctrl(MSM_RPM_CTRL_VERSION_BUILD), msm_rpm_data.ver[2]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700979
Praveen Chidambarame396ce62012-03-30 11:15:57 -0600980 rc = request_irq(data->irq_ack, msm_rpm_ack_interrupt,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700981 IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND,
982 "rpm_drv", msm_rpm_ack_interrupt);
983 if (rc) {
984 pr_err("%s: failed to request irq %d: %d\n",
Praveen Chidambarame396ce62012-03-30 11:15:57 -0600985 __func__, data->irq_ack, rc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700986 return rc;
987 }
988
Praveen Chidambarame396ce62012-03-30 11:15:57 -0600989 rc = irq_set_irq_wake(data->irq_ack, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700990 if (rc) {
991 pr_err("%s: failed to set wakeup irq %u: %d\n",
Praveen Chidambarame396ce62012-03-30 11:15:57 -0600992 __func__, data->irq_ack, rc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700993 return rc;
994 }
995
Stephen Boydf61255e2012-02-24 14:31:09 -0800996 rc = request_irq(data->irq_err, msm_rpm_err_interrupt,
997 IRQF_TRIGGER_RISING, "rpm_err", NULL);
998 if (rc) {
999 pr_err("%s: failed to request error interrupt: %d\n",
1000 __func__, rc);
1001 return rc;
1002 }
1003
Praveen Chidambarame396ce62012-03-30 11:15:57 -06001004 rc = request_irq(data->irq_wakeup,
1005 msm_pm_rpm_wakeup_interrupt, IRQF_TRIGGER_RISING,
1006 "pm_drv", msm_pm_rpm_wakeup_interrupt);
1007 if (rc) {
1008 pr_err("%s: failed to request irq %u: %d\n",
1009 __func__, data->irq_wakeup, rc);
1010 return rc;
1011 }
1012
1013 rc = irq_set_irq_wake(data->irq_wakeup, 1);
1014 if (rc) {
1015 pr_err("%s: failed to set wakeup irq %u: %d\n",
1016 __func__, data->irq_wakeup, rc);
1017 return rc;
1018 }
1019
Praveen Chidambaram78499012011-11-01 17:15:17 -06001020 msm_rpm_populate_map(data);
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -06001021
1022 return platform_driver_register(&msm_rpm_platform_driver);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001023}