blob: fb9d295d1c8287c538c592d1629d0e2683a8c708 [file] [log] [blame]
Praveen Chidambaram78499012011-11-01 17:15:17 -06001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/bug.h>
18#include <linux/completion.h>
19#include <linux/delay.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23#include <linux/irq.h>
24#include <linux/list.h>
25#include <linux/mutex.h>
26#include <linux/semaphore.h>
27#include <linux/spinlock.h>
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -060028#include <linux/device.h>
29#include <linux/platform_device.h>
Praveen Chidambaram78499012011-11-01 17:15:17 -060030#include <linux/slab.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031#include <asm/hardware/gic.h>
32#include <mach/msm_iomap.h>
33#include <mach/rpm.h>
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -070034#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035
36/******************************************************************************
37 * Data type and structure definitions
38 *****************************************************************************/
39
40struct msm_rpm_request {
41 struct msm_rpm_iv_pair *req;
42 int count;
43 uint32_t *ctx_mask_ack;
44 uint32_t *sel_masks_ack;
45 struct completion *done;
46};
47
48struct msm_rpm_notif_config {
Praveen Chidambaram78499012011-11-01 17:15:17 -060049 struct msm_rpm_iv_pair iv[SEL_MASK_SIZE * 2];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070050};
51
52#define configured_iv(notif_cfg) ((notif_cfg)->iv)
Praveen Chidambaram78499012011-11-01 17:15:17 -060053#define registered_iv(notif_cfg) ((notif_cfg)->iv + msm_rpm_sel_mask_size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070054
Praveen Chidambaram78499012011-11-01 17:15:17 -060055static uint32_t msm_rpm_sel_mask_size;
56static struct msm_rpm_platform_data msm_rpm_data;
57
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070058
59static DEFINE_MUTEX(msm_rpm_mutex);
60static DEFINE_SPINLOCK(msm_rpm_lock);
61static DEFINE_SPINLOCK(msm_rpm_irq_lock);
62
63static struct msm_rpm_request *msm_rpm_request;
64static struct msm_rpm_request msm_rpm_request_irq_mode;
65static struct msm_rpm_request msm_rpm_request_poll_mode;
66
67static LIST_HEAD(msm_rpm_notifications);
68static struct msm_rpm_notif_config msm_rpm_notif_cfgs[MSM_RPM_CTX_SET_COUNT];
69static bool msm_rpm_init_notif_done;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070/******************************************************************************
71 * Internal functions
72 *****************************************************************************/
73
Praveen Chidambaram78499012011-11-01 17:15:17 -060074static inline unsigned int target_enum(unsigned int id)
75{
76 BUG_ON(id >= MSM_RPM_ID_LAST);
77 return msm_rpm_data.target_id[id].id;
78}
79
80static inline unsigned int target_status(unsigned int id)
81{
82 BUG_ON(id >= MSM_RPM_STATUS_ID_LAST);
83 return msm_rpm_data.target_status[id];
84}
85
86static inline unsigned int target_ctrl(unsigned int id)
87{
88 BUG_ON(id >= MSM_RPM_CTRL_LAST);
89 return msm_rpm_data.target_ctrl_id[id];
90}
91
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070092static inline uint32_t msm_rpm_read(unsigned int page, unsigned int reg)
93{
Praveen Chidambaram78499012011-11-01 17:15:17 -060094 return __raw_readl(msm_rpm_data.reg_base_addrs[page] + reg * 4);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095}
96
97static inline void msm_rpm_write(
98 unsigned int page, unsigned int reg, uint32_t value)
99{
Praveen Chidambaram78499012011-11-01 17:15:17 -0600100 __raw_writel(value,
101 msm_rpm_data.reg_base_addrs[page] + reg * 4);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700102}
103
104static inline void msm_rpm_read_contiguous(
105 unsigned int page, unsigned int reg, uint32_t *values, int count)
106{
107 int i;
108
109 for (i = 0; i < count; i++)
110 values[i] = msm_rpm_read(page, reg + i);
111}
112
113static inline void msm_rpm_write_contiguous(
114 unsigned int page, unsigned int reg, uint32_t *values, int count)
115{
116 int i;
117
118 for (i = 0; i < count; i++)
119 msm_rpm_write(page, reg + i, values[i]);
120}
121
122static inline void msm_rpm_write_contiguous_zeros(
123 unsigned int page, unsigned int reg, int count)
124{
125 int i;
126
127 for (i = 0; i < count; i++)
128 msm_rpm_write(page, reg + i, 0);
129}
130
131static inline uint32_t msm_rpm_map_id_to_sel(uint32_t id)
132{
Praveen Chidambaram78499012011-11-01 17:15:17 -0600133 return (id >= MSM_RPM_ID_LAST) ? msm_rpm_data.sel_last + 1 :
134 msm_rpm_data.target_id[id].sel;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700135}
136
137/*
138 * Note: the function does not clear the masks before filling them.
139 *
140 * Return value:
141 * 0: success
142 * -EINVAL: invalid id in <req> array
143 */
144static int msm_rpm_fill_sel_masks(
145 uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count)
146{
147 uint32_t sel;
148 int i;
149
150 for (i = 0; i < count; i++) {
151 sel = msm_rpm_map_id_to_sel(req[i].id);
152
Praveen Chidambaram78499012011-11-01 17:15:17 -0600153 if (sel > msm_rpm_data.sel_last) {
154 pr_err("%s(): RPM ID %d not defined for target\n",
155 __func__, req[i].id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700156 return -EINVAL;
Praveen Chidambaram78499012011-11-01 17:15:17 -0600157 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700158
159 sel_masks[msm_rpm_get_sel_mask_reg(sel)] |=
160 msm_rpm_get_sel_mask(sel);
161 }
162
163 return 0;
164}
165
166static inline void msm_rpm_send_req_interrupt(void)
167{
Praveen Chidambaram78499012011-11-01 17:15:17 -0600168 __raw_writel(msm_rpm_data.ipc_rpm_val,
169 msm_rpm_data.ipc_rpm_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700170}
171
172/*
173 * Note: assumes caller has acquired <msm_rpm_irq_lock>.
174 *
175 * Return value:
176 * 0: request acknowledgement
177 * 1: notification
178 * 2: spurious interrupt
179 */
180static int msm_rpm_process_ack_interrupt(void)
181{
182 uint32_t ctx_mask_ack;
Praveen Chidambaram78499012011-11-01 17:15:17 -0600183 uint32_t sel_masks_ack[SEL_MASK_SIZE] = {0};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700184
Praveen Chidambaram78499012011-11-01 17:15:17 -0600185 ctx_mask_ack = msm_rpm_read(MSM_RPM_PAGE_CTRL,
186 target_ctrl(MSM_RPM_CTRL_ACK_CTX_0));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700187 msm_rpm_read_contiguous(MSM_RPM_PAGE_CTRL,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600188 target_ctrl(MSM_RPM_CTRL_ACK_SEL_0),
189 sel_masks_ack, msm_rpm_sel_mask_size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700190
191 if (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_NOTIFICATION)) {
192 struct msm_rpm_notification *n;
193 int i;
194
195 list_for_each_entry(n, &msm_rpm_notifications, list)
Praveen Chidambaram78499012011-11-01 17:15:17 -0600196 for (i = 0; i < msm_rpm_sel_mask_size; i++)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700197 if (sel_masks_ack[i] & n->sel_masks[i]) {
198 up(&n->sem);
199 break;
200 }
201
202 msm_rpm_write_contiguous_zeros(MSM_RPM_PAGE_CTRL,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600203 target_ctrl(MSM_RPM_CTRL_ACK_SEL_0),
204 msm_rpm_sel_mask_size);
205 msm_rpm_write(MSM_RPM_PAGE_CTRL,
206 target_ctrl(MSM_RPM_CTRL_ACK_CTX_0), 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700207 /* Ensure the write is complete before return */
208 mb();
209
210 return 1;
211 }
212
213 if (msm_rpm_request) {
214 int i;
215
216 *(msm_rpm_request->ctx_mask_ack) = ctx_mask_ack;
217 memcpy(msm_rpm_request->sel_masks_ack, sel_masks_ack,
218 sizeof(sel_masks_ack));
219
220 for (i = 0; i < msm_rpm_request->count; i++)
221 msm_rpm_request->req[i].value =
222 msm_rpm_read(MSM_RPM_PAGE_ACK,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600223 target_enum(msm_rpm_request->req[i].id));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700224
225 msm_rpm_write_contiguous_zeros(MSM_RPM_PAGE_CTRL,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600226 target_ctrl(MSM_RPM_CTRL_ACK_SEL_0),
227 msm_rpm_sel_mask_size);
228 msm_rpm_write(MSM_RPM_PAGE_CTRL,
229 target_ctrl(MSM_RPM_CTRL_ACK_CTX_0), 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230 /* Ensure the write is complete before return */
231 mb();
232
233 if (msm_rpm_request->done)
234 complete_all(msm_rpm_request->done);
235
236 msm_rpm_request = NULL;
237 return 0;
238 }
239
240 return 2;
241}
242
243static irqreturn_t msm_rpm_ack_interrupt(int irq, void *dev_id)
244{
245 unsigned long flags;
246 int rc;
247
248 if (dev_id != &msm_rpm_ack_interrupt)
249 return IRQ_NONE;
250
251 spin_lock_irqsave(&msm_rpm_irq_lock, flags);
252 rc = msm_rpm_process_ack_interrupt();
253 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
254
255 return IRQ_HANDLED;
256}
257
258/*
259 * Note: assumes caller has acquired <msm_rpm_irq_lock>.
260 */
261static void msm_rpm_busy_wait_for_request_completion(
262 bool allow_async_completion)
263{
264 int rc;
265
266 do {
Praveen Chidambaram78499012011-11-01 17:15:17 -0600267 while (!gic_is_spi_pending(msm_rpm_data.irq_ack) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700268 msm_rpm_request) {
269 if (allow_async_completion)
270 spin_unlock(&msm_rpm_irq_lock);
271 udelay(1);
272 if (allow_async_completion)
273 spin_lock(&msm_rpm_irq_lock);
274 }
275
276 if (!msm_rpm_request)
277 break;
278
279 rc = msm_rpm_process_ack_interrupt();
Praveen Chidambaram78499012011-11-01 17:15:17 -0600280 gic_clear_spi_pending(msm_rpm_data.irq_ack);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700281 } while (rc);
282}
283
284/* Upon return, the <req> array will contain values from the ack page.
285 *
286 * Note: assumes caller has acquired <msm_rpm_mutex>.
287 *
288 * Return value:
289 * 0: success
290 * -ENOSPC: request rejected
291 */
292static int msm_rpm_set_exclusive(int ctx,
293 uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count)
294{
295 DECLARE_COMPLETION_ONSTACK(ack);
296 unsigned long flags;
297 uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx);
Praveen Chidambaramfdaef162011-09-28 08:40:05 -0600298 uint32_t ctx_mask_ack = 0;
Praveen Chidambaram78499012011-11-01 17:15:17 -0600299 uint32_t sel_masks_ack[SEL_MASK_SIZE];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700300 int i;
301
302 msm_rpm_request_irq_mode.req = req;
303 msm_rpm_request_irq_mode.count = count;
304 msm_rpm_request_irq_mode.ctx_mask_ack = &ctx_mask_ack;
305 msm_rpm_request_irq_mode.sel_masks_ack = sel_masks_ack;
306 msm_rpm_request_irq_mode.done = &ack;
307
308 spin_lock_irqsave(&msm_rpm_lock, flags);
309 spin_lock(&msm_rpm_irq_lock);
310
311 BUG_ON(msm_rpm_request);
312 msm_rpm_request = &msm_rpm_request_irq_mode;
313
314 for (i = 0; i < count; i++) {
Praveen Chidambaram78499012011-11-01 17:15:17 -0600315 BUG_ON(target_enum(req[i].id) >= MSM_RPM_ID_LAST);
316 msm_rpm_write(MSM_RPM_PAGE_REQ,
317 target_enum(req[i].id), req[i].value);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700318 }
319
320 msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600321 target_ctrl(MSM_RPM_CTRL_REQ_SEL_0),
322 sel_masks, msm_rpm_sel_mask_size);
323 msm_rpm_write(MSM_RPM_PAGE_CTRL,
324 target_ctrl(MSM_RPM_CTRL_REQ_CTX_0), ctx_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700325
326 /* Ensure RPM data is written before sending the interrupt */
327 mb();
328 msm_rpm_send_req_interrupt();
329
330 spin_unlock(&msm_rpm_irq_lock);
331 spin_unlock_irqrestore(&msm_rpm_lock, flags);
332
333 wait_for_completion(&ack);
334
335 BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)))
336 != ctx_mask);
337 BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack)));
338
339 return (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED))
340 ? -ENOSPC : 0;
341}
342
343/* Upon return, the <req> array will contain values from the ack page.
344 *
345 * Note: assumes caller has acquired <msm_rpm_lock>.
346 *
347 * Return value:
348 * 0: success
349 * -ENOSPC: request rejected
350 */
351static int msm_rpm_set_exclusive_noirq(int ctx,
352 uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count)
353{
Praveen Chidambaram78499012011-11-01 17:15:17 -0600354 unsigned int irq = msm_rpm_data.irq_ack;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700355 unsigned long flags;
356 uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx);
Praveen Chidambaramfdaef162011-09-28 08:40:05 -0600357 uint32_t ctx_mask_ack = 0;
Praveen Chidambaram78499012011-11-01 17:15:17 -0600358 uint32_t sel_masks_ack[SEL_MASK_SIZE];
Praveen Chidambaramfdaef162011-09-28 08:40:05 -0600359 struct irq_chip *irq_chip = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700360 int i;
361
362 msm_rpm_request_poll_mode.req = req;
363 msm_rpm_request_poll_mode.count = count;
364 msm_rpm_request_poll_mode.ctx_mask_ack = &ctx_mask_ack;
365 msm_rpm_request_poll_mode.sel_masks_ack = sel_masks_ack;
366 msm_rpm_request_poll_mode.done = NULL;
367
368 spin_lock_irqsave(&msm_rpm_irq_lock, flags);
Praveen Chidambaramfdaef162011-09-28 08:40:05 -0600369 irq_chip = irq_get_chip(irq);
370 if (!irq_chip) {
371 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
372 return -ENOSPC;
373 }
374 irq_chip->irq_mask(irq_get_irq_data(irq));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700375
376 if (msm_rpm_request) {
377 msm_rpm_busy_wait_for_request_completion(true);
378 BUG_ON(msm_rpm_request);
379 }
380
381 msm_rpm_request = &msm_rpm_request_poll_mode;
382
383 for (i = 0; i < count; i++) {
Praveen Chidambaram78499012011-11-01 17:15:17 -0600384 BUG_ON(target_enum(req[i].id) >= MSM_RPM_ID_LAST);
385 msm_rpm_write(MSM_RPM_PAGE_REQ,
386 target_enum(req[i].id), req[i].value);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700387 }
388
389 msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600390 target_ctrl(MSM_RPM_CTRL_REQ_SEL_0),
391 sel_masks, msm_rpm_sel_mask_size);
392 msm_rpm_write(MSM_RPM_PAGE_CTRL,
393 target_ctrl(MSM_RPM_CTRL_REQ_CTX_0), ctx_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700394
395 /* Ensure RPM data is written before sending the interrupt */
396 mb();
397 msm_rpm_send_req_interrupt();
398
399 msm_rpm_busy_wait_for_request_completion(false);
400 BUG_ON(msm_rpm_request);
401
Praveen Chidambaramfdaef162011-09-28 08:40:05 -0600402 irq_chip->irq_unmask(irq_get_irq_data(irq));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700403 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
404
405 BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)))
406 != ctx_mask);
407 BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack)));
408
409 return (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED))
410 ? -ENOSPC : 0;
411}
412
413/* Upon return, the <req> array will contain values from the ack page.
414 *
415 * Return value:
416 * 0: success
417 * -EINTR: interrupted
418 * -EINVAL: invalid <ctx> or invalid id in <req> array
419 * -ENOSPC: request rejected
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700420 * -ENODEV: RPM driver not initialized
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700421 */
422static int msm_rpm_set_common(
423 int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq)
424{
Praveen Chidambaram78499012011-11-01 17:15:17 -0600425 uint32_t sel_masks[SEL_MASK_SIZE] = {};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700426 int rc;
427
Praveen Chidambaram78499012011-11-01 17:15:17 -0600428 if (cpu_is_apq8064())
429 return 0;
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700430
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700431 if (ctx >= MSM_RPM_CTX_SET_COUNT) {
432 rc = -EINVAL;
433 goto set_common_exit;
434 }
435
436 rc = msm_rpm_fill_sel_masks(sel_masks, req, count);
437 if (rc)
438 goto set_common_exit;
439
440 if (noirq) {
441 unsigned long flags;
442
443 spin_lock_irqsave(&msm_rpm_lock, flags);
444 rc = msm_rpm_set_exclusive_noirq(ctx, sel_masks, req, count);
445 spin_unlock_irqrestore(&msm_rpm_lock, flags);
446 } else {
447 rc = mutex_lock_interruptible(&msm_rpm_mutex);
448 if (rc)
449 goto set_common_exit;
450
451 rc = msm_rpm_set_exclusive(ctx, sel_masks, req, count);
452 mutex_unlock(&msm_rpm_mutex);
453 }
454
455set_common_exit:
456 return rc;
457}
458
459/*
460 * Return value:
461 * 0: success
462 * -EINTR: interrupted
463 * -EINVAL: invalid <ctx> or invalid id in <req> array
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700464 * -ENODEV: RPM driver not initialized.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700465 */
466static int msm_rpm_clear_common(
467 int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq)
468{
Praveen Chidambaram78499012011-11-01 17:15:17 -0600469 uint32_t sel_masks[SEL_MASK_SIZE] = {};
470 struct msm_rpm_iv_pair r[SEL_MASK_SIZE];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700471 int rc;
472 int i;
473
Praveen Chidambaram78499012011-11-01 17:15:17 -0600474 if (cpu_is_apq8064())
475 return 0;
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700476
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700477 if (ctx >= MSM_RPM_CTX_SET_COUNT) {
478 rc = -EINVAL;
479 goto clear_common_exit;
480 }
481
482 rc = msm_rpm_fill_sel_masks(sel_masks, req, count);
483 if (rc)
484 goto clear_common_exit;
485
486 for (i = 0; i < ARRAY_SIZE(r); i++) {
487 r[i].id = MSM_RPM_ID_INVALIDATE_0 + i;
488 r[i].value = sel_masks[i];
489 }
490
491 memset(sel_masks, 0, sizeof(sel_masks));
Praveen Chidambaram78499012011-11-01 17:15:17 -0600492 sel_masks[msm_rpm_get_sel_mask_reg(msm_rpm_data.sel_invalidate)] |=
493 msm_rpm_get_sel_mask(msm_rpm_data.sel_invalidate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700494
495 if (noirq) {
496 unsigned long flags;
497
498 spin_lock_irqsave(&msm_rpm_lock, flags);
499 rc = msm_rpm_set_exclusive_noirq(ctx, sel_masks, r,
500 ARRAY_SIZE(r));
501 spin_unlock_irqrestore(&msm_rpm_lock, flags);
502 BUG_ON(rc);
503 } else {
504 rc = mutex_lock_interruptible(&msm_rpm_mutex);
505 if (rc)
506 goto clear_common_exit;
507
508 rc = msm_rpm_set_exclusive(ctx, sel_masks, r, ARRAY_SIZE(r));
509 mutex_unlock(&msm_rpm_mutex);
510 BUG_ON(rc);
511 }
512
513clear_common_exit:
514 return rc;
515}
516
517/*
518 * Note: assumes caller has acquired <msm_rpm_mutex>.
519 */
520static void msm_rpm_update_notification(uint32_t ctx,
521 struct msm_rpm_notif_config *curr_cfg,
522 struct msm_rpm_notif_config *new_cfg)
523{
Praveen Chidambaram78499012011-11-01 17:15:17 -0600524 unsigned int sel_notif = msm_rpm_data.sel_notification;
525
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526 if (memcmp(curr_cfg, new_cfg, sizeof(*new_cfg))) {
Praveen Chidambaram78499012011-11-01 17:15:17 -0600527 uint32_t sel_masks[SEL_MASK_SIZE] = {};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700528 int rc;
529
Praveen Chidambaram78499012011-11-01 17:15:17 -0600530 sel_masks[msm_rpm_get_sel_mask_reg(sel_notif)]
531 |= msm_rpm_get_sel_mask(sel_notif);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700532
533 rc = msm_rpm_set_exclusive(ctx,
534 sel_masks, new_cfg->iv, ARRAY_SIZE(new_cfg->iv));
535 BUG_ON(rc);
536
537 memcpy(curr_cfg, new_cfg, sizeof(*new_cfg));
538 }
539}
540
541/*
542 * Note: assumes caller has acquired <msm_rpm_mutex>.
543 */
544static void msm_rpm_initialize_notification(void)
545{
546 struct msm_rpm_notif_config cfg;
547 unsigned int ctx;
548 int i;
549
550 for (ctx = MSM_RPM_CTX_SET_0; ctx <= MSM_RPM_CTX_SET_SLEEP; ctx++) {
551 cfg = msm_rpm_notif_cfgs[ctx];
552
Praveen Chidambaram78499012011-11-01 17:15:17 -0600553 for (i = 0; i < msm_rpm_sel_mask_size; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700554 configured_iv(&cfg)[i].id =
555 MSM_RPM_ID_NOTIFICATION_CONFIGURED_0 + i;
556 configured_iv(&cfg)[i].value = ~0UL;
557
558 registered_iv(&cfg)[i].id =
559 MSM_RPM_ID_NOTIFICATION_REGISTERED_0 + i;
560 registered_iv(&cfg)[i].value = 0;
561 }
562
563 msm_rpm_update_notification(ctx,
564 &msm_rpm_notif_cfgs[ctx], &cfg);
565 }
566}
567
568/******************************************************************************
569 * Public functions
570 *****************************************************************************/
571
572int msm_rpm_local_request_is_outstanding(void)
573{
574 unsigned long flags;
575 int outstanding = 0;
576
577 if (!spin_trylock_irqsave(&msm_rpm_lock, flags))
578 goto local_request_is_outstanding_exit;
579
580 if (!spin_trylock(&msm_rpm_irq_lock))
581 goto local_request_is_outstanding_unlock;
582
583 outstanding = (msm_rpm_request != NULL);
584 spin_unlock(&msm_rpm_irq_lock);
585
586local_request_is_outstanding_unlock:
587 spin_unlock_irqrestore(&msm_rpm_lock, flags);
588
589local_request_is_outstanding_exit:
590 return outstanding;
591}
592
593/*
594 * Read the specified status registers and return their values.
595 *
596 * status: array of id-value pairs. Each <id> specifies a status register,
597 * i.e, one of MSM_RPM_STATUS_ID_xxxx. Upon return, each <value> will
598 * contain the value of the status register.
599 * count: number of id-value pairs in the array
600 *
601 * Return value:
602 * 0: success
603 * -EBUSY: RPM is updating the status page; values across different registers
604 * may not be consistent
605 * -EINVAL: invalid id in <status> array
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700606 * -ENODEV: RPM driver not initialized
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700607 */
608int msm_rpm_get_status(struct msm_rpm_iv_pair *status, int count)
609{
610 uint32_t seq_begin;
611 uint32_t seq_end;
612 int rc;
613 int i;
614
Praveen Chidambaram78499012011-11-01 17:15:17 -0600615 if (cpu_is_apq8064())
616 return 0;
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700617
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700618 seq_begin = msm_rpm_read(MSM_RPM_PAGE_STATUS,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600619 target_status(MSM_RPM_STATUS_ID_SEQUENCE));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700620
621 for (i = 0; i < count; i++) {
Praveen Chidambaram78499012011-11-01 17:15:17 -0600622 int target_status_id;
623
624 if (status[i].id >= MSM_RPM_STATUS_ID_LAST) {
625 pr_err("%s(): Status ID beyond limits\n", __func__);
626 rc = -EINVAL;
627 goto get_status_exit;
628 }
629
630 target_status_id = target_status(status[i].id);
631 if (target_status_id >= MSM_RPM_STATUS_ID_LAST) {
632 pr_err("%s(): Status id %d not defined for target\n",
633 __func__,
634 target_status_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700635 rc = -EINVAL;
636 goto get_status_exit;
637 }
638
639 status[i].value = msm_rpm_read(MSM_RPM_PAGE_STATUS,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600640 target_status_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700641 }
642
643 seq_end = msm_rpm_read(MSM_RPM_PAGE_STATUS,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600644 target_status(MSM_RPM_STATUS_ID_SEQUENCE));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700645
646 rc = (seq_begin != seq_end || (seq_begin & 0x01)) ? -EBUSY : 0;
647
648get_status_exit:
649 return rc;
650}
651EXPORT_SYMBOL(msm_rpm_get_status);
652
653/*
654 * Issue a resource request to RPM to set resource values.
655 *
656 * Note: the function may sleep and must be called in a task context.
657 *
658 * ctx: the request's context.
659 * There two contexts that a RPM driver client can use:
660 * MSM_RPM_CTX_SET_0 and MSM_RPM_CTX_SET_SLEEP. For resource values
661 * that are intended to take effect when the CPU is active,
662 * MSM_RPM_CTX_SET_0 should be used. For resource values that are
663 * intended to take effect when the CPU is not active,
664 * MSM_RPM_CTX_SET_SLEEP should be used.
665 * req: array of id-value pairs. Each <id> specifies a RPM resource,
666 * i.e, one of MSM_RPM_ID_xxxx. Each <value> specifies the requested
667 * resource value.
668 * count: number of id-value pairs in the array
669 *
670 * Return value:
671 * 0: success
672 * -EINTR: interrupted
673 * -EINVAL: invalid <ctx> or invalid id in <req> array
674 * -ENOSPC: request rejected
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700675 * -ENODEV: RPM driver not initialized
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700676 */
677int msm_rpm_set(int ctx, struct msm_rpm_iv_pair *req, int count)
678{
679 return msm_rpm_set_common(ctx, req, count, false);
680}
681EXPORT_SYMBOL(msm_rpm_set);
682
683/*
684 * Issue a resource request to RPM to set resource values.
685 *
686 * Note: the function is similar to msm_rpm_set() except that it must be
687 * called with interrupts masked. If possible, use msm_rpm_set()
688 * instead, to maximize CPU throughput.
689 */
690int msm_rpm_set_noirq(int ctx, struct msm_rpm_iv_pair *req, int count)
691{
692 WARN(!irqs_disabled(), "msm_rpm_set_noirq can only be called "
693 "safely when local irqs are disabled. Consider using "
694 "msm_rpm_set or msm_rpm_set_nosleep instead.");
695 return msm_rpm_set_common(ctx, req, count, true);
696}
697EXPORT_SYMBOL(msm_rpm_set_noirq);
698
699/*
700 * Issue a resource request to RPM to clear resource values. Once the
701 * values are cleared, the resources revert back to their default values
702 * for this RPM master.
703 *
704 * Note: the function may sleep and must be called in a task context.
705 *
706 * ctx: the request's context.
707 * req: array of id-value pairs. Each <id> specifies a RPM resource,
708 * i.e, one of MSM_RPM_ID_xxxx. <value>'s are ignored.
709 * count: number of id-value pairs in the array
710 *
711 * Return value:
712 * 0: success
713 * -EINTR: interrupted
714 * -EINVAL: invalid <ctx> or invalid id in <req> array
715 */
716int msm_rpm_clear(int ctx, struct msm_rpm_iv_pair *req, int count)
717{
718 return msm_rpm_clear_common(ctx, req, count, false);
719}
720EXPORT_SYMBOL(msm_rpm_clear);
721
722/*
723 * Issue a resource request to RPM to clear resource values.
724 *
725 * Note: the function is similar to msm_rpm_clear() except that it must be
726 * called with interrupts masked. If possible, use msm_rpm_clear()
727 * instead, to maximize CPU throughput.
728 */
729int msm_rpm_clear_noirq(int ctx, struct msm_rpm_iv_pair *req, int count)
730{
731 WARN(!irqs_disabled(), "msm_rpm_clear_noirq can only be called "
732 "safely when local irqs are disabled. Consider using "
733 "msm_rpm_clear or msm_rpm_clear_nosleep instead.");
734 return msm_rpm_clear_common(ctx, req, count, true);
735}
736EXPORT_SYMBOL(msm_rpm_clear_noirq);
737
738/*
739 * Register for RPM notification. When the specified resources
740 * change their status on RPM, RPM sends out notifications and the
741 * driver will "up" the semaphore in struct msm_rpm_notification.
742 *
743 * Note: the function may sleep and must be called in a task context.
744 *
745 * Memory for <n> must not be freed until the notification is
746 * unregistered. Memory for <req> can be freed after this
747 * function returns.
748 *
749 * n: the notifcation object. Caller should initialize only the
750 * semaphore field. When a notification arrives later, the
751 * semaphore will be "up"ed.
752 * req: array of id-value pairs. Each <id> specifies a status register,
753 * i.e, one of MSM_RPM_STATUS_ID_xxxx. <value>'s are ignored.
754 * count: number of id-value pairs in the array
755 *
756 * Return value:
757 * 0: success
758 * -EINTR: interrupted
759 * -EINVAL: invalid id in <req> array
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700760 * -ENODEV: RPM driver not initialized
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700761 */
762int msm_rpm_register_notification(struct msm_rpm_notification *n,
763 struct msm_rpm_iv_pair *req, int count)
764{
765 unsigned long flags;
766 unsigned int ctx;
767 struct msm_rpm_notif_config cfg;
768 int rc;
769 int i;
770
Praveen Chidambaram78499012011-11-01 17:15:17 -0600771 if (cpu_is_apq8064())
772 return 0;
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700773
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700774 INIT_LIST_HEAD(&n->list);
775 rc = msm_rpm_fill_sel_masks(n->sel_masks, req, count);
776 if (rc)
777 goto register_notification_exit;
778
779 rc = mutex_lock_interruptible(&msm_rpm_mutex);
780 if (rc)
781 goto register_notification_exit;
782
783 if (!msm_rpm_init_notif_done) {
784 msm_rpm_initialize_notification();
785 msm_rpm_init_notif_done = true;
786 }
787
788 spin_lock_irqsave(&msm_rpm_irq_lock, flags);
789 list_add(&n->list, &msm_rpm_notifications);
790 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
791
792 ctx = MSM_RPM_CTX_SET_0;
793 cfg = msm_rpm_notif_cfgs[ctx];
794
Praveen Chidambaram78499012011-11-01 17:15:17 -0600795 for (i = 0; i < msm_rpm_sel_mask_size; i++)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700796 registered_iv(&cfg)[i].value |= n->sel_masks[i];
797
798 msm_rpm_update_notification(ctx, &msm_rpm_notif_cfgs[ctx], &cfg);
799 mutex_unlock(&msm_rpm_mutex);
800
801register_notification_exit:
802 return rc;
803}
804EXPORT_SYMBOL(msm_rpm_register_notification);
805
806/*
807 * Unregister a notification.
808 *
809 * Note: the function may sleep and must be called in a task context.
810 *
811 * n: the notifcation object that was registered previously.
812 *
813 * Return value:
814 * 0: success
815 * -EINTR: interrupted
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700816 * -ENODEV: RPM driver not initialized
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700817 */
818int msm_rpm_unregister_notification(struct msm_rpm_notification *n)
819{
820 unsigned long flags;
821 unsigned int ctx;
822 struct msm_rpm_notif_config cfg;
823 int rc;
824 int i;
825
Praveen Chidambaram78499012011-11-01 17:15:17 -0600826 if (cpu_is_apq8064())
827 return 0;
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700828
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700829 rc = mutex_lock_interruptible(&msm_rpm_mutex);
830 if (rc)
831 goto unregister_notification_exit;
832
833 ctx = MSM_RPM_CTX_SET_0;
834 cfg = msm_rpm_notif_cfgs[ctx];
835
Praveen Chidambaram78499012011-11-01 17:15:17 -0600836 for (i = 0; i < msm_rpm_sel_mask_size; i++)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700837 registered_iv(&cfg)[i].value = 0;
838
839 spin_lock_irqsave(&msm_rpm_irq_lock, flags);
840 list_del(&n->list);
841 list_for_each_entry(n, &msm_rpm_notifications, list)
Praveen Chidambaram78499012011-11-01 17:15:17 -0600842 for (i = 0; i < msm_rpm_sel_mask_size; i++)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700843 registered_iv(&cfg)[i].value |= n->sel_masks[i];
844 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
845
846 msm_rpm_update_notification(ctx, &msm_rpm_notif_cfgs[ctx], &cfg);
847 mutex_unlock(&msm_rpm_mutex);
848
849unregister_notification_exit:
850 return rc;
851}
852EXPORT_SYMBOL(msm_rpm_unregister_notification);
853
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600854static uint32_t fw_major, fw_minor, fw_build;
855
856static ssize_t driver_version_show(struct kobject *kobj,
857 struct kobj_attribute *attr, char *buf)
858{
859 return snprintf(buf, PAGE_SIZE, "%u.%u.%u\n",
Praveen Chidambaram78499012011-11-01 17:15:17 -0600860 msm_rpm_data.ver[0], msm_rpm_data.ver[1], msm_rpm_data.ver[2]);
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600861}
862
863static ssize_t fw_version_show(struct kobject *kobj,
864 struct kobj_attribute *attr, char *buf)
865{
866 return snprintf(buf, PAGE_SIZE, "%u.%u.%u\n",
867 fw_major, fw_minor, fw_build);
868}
869
870static struct kobj_attribute driver_version_attr = __ATTR_RO(driver_version);
871static struct kobj_attribute fw_version_attr = __ATTR_RO(fw_version);
872
873static struct attribute *driver_attributes[] = {
874 &driver_version_attr.attr,
875 &fw_version_attr.attr,
876 NULL
877};
878
879static struct attribute_group driver_attr_group = {
880 .attrs = driver_attributes,
881};
882
883static int __devinit msm_rpm_probe(struct platform_device *pdev)
884{
885 return sysfs_create_group(&pdev->dev.kobj, &driver_attr_group);
886}
887
888static int __devexit msm_rpm_remove(struct platform_device *pdev)
889{
890 sysfs_remove_group(&pdev->dev.kobj, &driver_attr_group);
891 return 0;
892}
893
894static struct platform_driver msm_rpm_platform_driver = {
895 .probe = msm_rpm_probe,
896 .remove = __devexit_p(msm_rpm_remove),
897 .driver = {
898 .name = "msm_rpm",
899 .owner = THIS_MODULE,
900 },
901};
902
Praveen Chidambaram78499012011-11-01 17:15:17 -0600903static void __init msm_rpm_populate_map(struct msm_rpm_platform_data *data)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700904{
Praveen Chidambaram78499012011-11-01 17:15:17 -0600905 int i, j;
906 struct msm_rpm_map_data *src = NULL;
907 struct msm_rpm_map_data *dst = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700908
Praveen Chidambaram78499012011-11-01 17:15:17 -0600909 for (i = 0; i < MSM_RPM_ID_LAST;) {
910 src = &data->target_id[i];
911 dst = &msm_rpm_data.target_id[i];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700912
Praveen Chidambaram78499012011-11-01 17:15:17 -0600913 dst->id = MSM_RPM_ID_LAST;
914 dst->sel = msm_rpm_data.sel_last + 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700915
Praveen Chidambaram78499012011-11-01 17:15:17 -0600916 /*
917 * copy the target specific id of the current and also of
918 * all the #count id's that follow the current.
919 * [MSM_RPM_ID_PM8921_S1_0] = { MSM_RPM_8960_ID_PM8921_S1_0,
920 * MSM_RPM_8960_SEL_PM8921_S1,
921 * 2},
922 * [MSM_RPM_ID_PM8921_S1_1] = { 0, 0, 0 },
923 * should translate to
924 * [MSM_RPM_ID_PM8921_S1_0] = { MSM_RPM_8960_ID_PM8921_S1_0,
925 * MSM_RPM_8960_SEL_PM8921,
926 * 2 },
927 * [MSM_RPM_ID_PM8921_S1_1] = { MSM_RPM_8960_ID_PM8921_S1_0 + 1,
928 * MSM_RPM_8960_SEL_PM8921,
929 * 0 },
930 */
931 for (j = 0; j < src->count; j++) {
932 dst = &msm_rpm_data.target_id[i + j];
933 dst->id = src->id + j;
934 dst->sel = src->sel;
935 }
936
937 i += (src->count) ? src->count : 1;
938 }
939
940 for (i = 0; i < MSM_RPM_STATUS_ID_LAST; i++) {
941 if (data->target_status[i] & MSM_RPM_STATUS_ID_VALID)
942 msm_rpm_data.target_status[i] &=
943 ~MSM_RPM_STATUS_ID_VALID;
944 else
945 msm_rpm_data.target_status[i] = MSM_RPM_STATUS_ID_LAST;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700946 }
947}
948
949int __init msm_rpm_init(struct msm_rpm_platform_data *data)
950{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700951 unsigned int irq;
952 int rc;
953
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700954 if (cpu_is_apq8064())
955 return 0;
956
Praveen Chidambaram78499012011-11-01 17:15:17 -0600957 memcpy(&msm_rpm_data, data, sizeof(struct msm_rpm_platform_data));
958 msm_rpm_sel_mask_size = msm_rpm_data.sel_last / 32 + 1;
959 BUG_ON(SEL_MASK_SIZE < msm_rpm_sel_mask_size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700960
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600961 fw_major = msm_rpm_read(MSM_RPM_PAGE_STATUS,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600962 target_status(MSM_RPM_STATUS_ID_VERSION_MAJOR));
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600963 fw_minor = msm_rpm_read(MSM_RPM_PAGE_STATUS,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600964 target_status(MSM_RPM_STATUS_ID_VERSION_MINOR));
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600965 fw_build = msm_rpm_read(MSM_RPM_PAGE_STATUS,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600966 target_status(MSM_RPM_STATUS_ID_VERSION_BUILD));
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600967 pr_info("%s: RPM firmware %u.%u.%u\n", __func__,
968 fw_major, fw_minor, fw_build);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700969
Praveen Chidambaram78499012011-11-01 17:15:17 -0600970 if (fw_major != msm_rpm_data.ver[0]) {
Praveen Chidambaram99a6a5d2011-07-13 10:14:06 -0600971 pr_err("%s: RPM version %u.%u.%u incompatible with "
972 "this driver version %u.%u.%u\n", __func__,
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600973 fw_major, fw_minor, fw_build,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600974 msm_rpm_data.ver[0],
975 msm_rpm_data.ver[1],
976 msm_rpm_data.ver[2]);
Praveen Chidambaram99a6a5d2011-07-13 10:14:06 -0600977 return -EFAULT;
978 }
979
Praveen Chidambaram78499012011-11-01 17:15:17 -0600980 msm_rpm_write(MSM_RPM_PAGE_CTRL,
981 target_ctrl(MSM_RPM_CTRL_VERSION_MAJOR), msm_rpm_data.ver[0]);
982 msm_rpm_write(MSM_RPM_PAGE_CTRL,
983 target_ctrl(MSM_RPM_CTRL_VERSION_MINOR), msm_rpm_data.ver[1]);
984 msm_rpm_write(MSM_RPM_PAGE_CTRL,
985 target_ctrl(MSM_RPM_CTRL_VERSION_BUILD), msm_rpm_data.ver[2]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700986
Praveen Chidambaram78499012011-11-01 17:15:17 -0600987 irq = data->irq_ack;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700988
989 rc = request_irq(irq, msm_rpm_ack_interrupt,
990 IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND,
991 "rpm_drv", msm_rpm_ack_interrupt);
992 if (rc) {
993 pr_err("%s: failed to request irq %d: %d\n",
994 __func__, irq, rc);
995 return rc;
996 }
997
998 rc = irq_set_irq_wake(irq, 1);
999 if (rc) {
1000 pr_err("%s: failed to set wakeup irq %u: %d\n",
1001 __func__, irq, rc);
1002 return rc;
1003 }
1004
Praveen Chidambaram78499012011-11-01 17:15:17 -06001005 msm_rpm_populate_map(data);
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -06001006
1007 return platform_driver_register(&msm_rpm_platform_driver);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001008}