blob: b0fa3d2dd2f0a9f3de0e71bb41bcbd264cd70996 [file] [log] [blame]
Praveen Chidambaram78499012011-11-01 17:15:17 -06001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/bug.h>
18#include <linux/completion.h>
19#include <linux/delay.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23#include <linux/irq.h>
24#include <linux/list.h>
25#include <linux/mutex.h>
26#include <linux/semaphore.h>
27#include <linux/spinlock.h>
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -060028#include <linux/device.h>
29#include <linux/platform_device.h>
Praveen Chidambaram78499012011-11-01 17:15:17 -060030#include <linux/slab.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031#include <asm/hardware/gic.h>
32#include <mach/msm_iomap.h>
33#include <mach/rpm.h>
34
35/******************************************************************************
36 * Data type and structure definitions
37 *****************************************************************************/
38
39struct msm_rpm_request {
40 struct msm_rpm_iv_pair *req;
41 int count;
42 uint32_t *ctx_mask_ack;
43 uint32_t *sel_masks_ack;
44 struct completion *done;
45};
46
47struct msm_rpm_notif_config {
Praveen Chidambaram78499012011-11-01 17:15:17 -060048 struct msm_rpm_iv_pair iv[SEL_MASK_SIZE * 2];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049};
50
51#define configured_iv(notif_cfg) ((notif_cfg)->iv)
Praveen Chidambaram78499012011-11-01 17:15:17 -060052#define registered_iv(notif_cfg) ((notif_cfg)->iv + msm_rpm_sel_mask_size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053
Praveen Chidambaram78499012011-11-01 17:15:17 -060054static uint32_t msm_rpm_sel_mask_size;
55static struct msm_rpm_platform_data msm_rpm_data;
56
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070057
58static DEFINE_MUTEX(msm_rpm_mutex);
59static DEFINE_SPINLOCK(msm_rpm_lock);
60static DEFINE_SPINLOCK(msm_rpm_irq_lock);
61
62static struct msm_rpm_request *msm_rpm_request;
63static struct msm_rpm_request msm_rpm_request_irq_mode;
64static struct msm_rpm_request msm_rpm_request_poll_mode;
65
66static LIST_HEAD(msm_rpm_notifications);
67static struct msm_rpm_notif_config msm_rpm_notif_cfgs[MSM_RPM_CTX_SET_COUNT];
68static bool msm_rpm_init_notif_done;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070069/******************************************************************************
70 * Internal functions
71 *****************************************************************************/
72
Praveen Chidambaram78499012011-11-01 17:15:17 -060073static inline unsigned int target_enum(unsigned int id)
74{
75 BUG_ON(id >= MSM_RPM_ID_LAST);
76 return msm_rpm_data.target_id[id].id;
77}
78
79static inline unsigned int target_status(unsigned int id)
80{
81 BUG_ON(id >= MSM_RPM_STATUS_ID_LAST);
82 return msm_rpm_data.target_status[id];
83}
84
85static inline unsigned int target_ctrl(unsigned int id)
86{
87 BUG_ON(id >= MSM_RPM_CTRL_LAST);
88 return msm_rpm_data.target_ctrl_id[id];
89}
90
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070091static inline uint32_t msm_rpm_read(unsigned int page, unsigned int reg)
92{
Praveen Chidambaram78499012011-11-01 17:15:17 -060093 return __raw_readl(msm_rpm_data.reg_base_addrs[page] + reg * 4);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070094}
95
96static inline void msm_rpm_write(
97 unsigned int page, unsigned int reg, uint32_t value)
98{
Praveen Chidambaram78499012011-11-01 17:15:17 -060099 __raw_writel(value,
100 msm_rpm_data.reg_base_addrs[page] + reg * 4);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700101}
102
103static inline void msm_rpm_read_contiguous(
104 unsigned int page, unsigned int reg, uint32_t *values, int count)
105{
106 int i;
107
108 for (i = 0; i < count; i++)
109 values[i] = msm_rpm_read(page, reg + i);
110}
111
112static inline void msm_rpm_write_contiguous(
113 unsigned int page, unsigned int reg, uint32_t *values, int count)
114{
115 int i;
116
117 for (i = 0; i < count; i++)
118 msm_rpm_write(page, reg + i, values[i]);
119}
120
121static inline void msm_rpm_write_contiguous_zeros(
122 unsigned int page, unsigned int reg, int count)
123{
124 int i;
125
126 for (i = 0; i < count; i++)
127 msm_rpm_write(page, reg + i, 0);
128}
129
130static inline uint32_t msm_rpm_map_id_to_sel(uint32_t id)
131{
Praveen Chidambaram78499012011-11-01 17:15:17 -0600132 return (id >= MSM_RPM_ID_LAST) ? msm_rpm_data.sel_last + 1 :
133 msm_rpm_data.target_id[id].sel;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700134}
135
136/*
137 * Note: the function does not clear the masks before filling them.
138 *
139 * Return value:
140 * 0: success
141 * -EINVAL: invalid id in <req> array
142 */
143static int msm_rpm_fill_sel_masks(
144 uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count)
145{
146 uint32_t sel;
147 int i;
148
149 for (i = 0; i < count; i++) {
150 sel = msm_rpm_map_id_to_sel(req[i].id);
151
Praveen Chidambaram78499012011-11-01 17:15:17 -0600152 if (sel > msm_rpm_data.sel_last) {
153 pr_err("%s(): RPM ID %d not defined for target\n",
154 __func__, req[i].id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700155 return -EINVAL;
Praveen Chidambaram78499012011-11-01 17:15:17 -0600156 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700157
158 sel_masks[msm_rpm_get_sel_mask_reg(sel)] |=
159 msm_rpm_get_sel_mask(sel);
160 }
161
162 return 0;
163}
164
165static inline void msm_rpm_send_req_interrupt(void)
166{
Praveen Chidambaram78499012011-11-01 17:15:17 -0600167 __raw_writel(msm_rpm_data.ipc_rpm_val,
168 msm_rpm_data.ipc_rpm_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700169}
170
171/*
172 * Note: assumes caller has acquired <msm_rpm_irq_lock>.
173 *
174 * Return value:
175 * 0: request acknowledgement
176 * 1: notification
177 * 2: spurious interrupt
178 */
179static int msm_rpm_process_ack_interrupt(void)
180{
181 uint32_t ctx_mask_ack;
Praveen Chidambaram78499012011-11-01 17:15:17 -0600182 uint32_t sel_masks_ack[SEL_MASK_SIZE] = {0};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700183
Praveen Chidambaram78499012011-11-01 17:15:17 -0600184 ctx_mask_ack = msm_rpm_read(MSM_RPM_PAGE_CTRL,
185 target_ctrl(MSM_RPM_CTRL_ACK_CTX_0));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700186 msm_rpm_read_contiguous(MSM_RPM_PAGE_CTRL,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600187 target_ctrl(MSM_RPM_CTRL_ACK_SEL_0),
188 sel_masks_ack, msm_rpm_sel_mask_size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700189
190 if (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_NOTIFICATION)) {
191 struct msm_rpm_notification *n;
192 int i;
193
194 list_for_each_entry(n, &msm_rpm_notifications, list)
Praveen Chidambaram78499012011-11-01 17:15:17 -0600195 for (i = 0; i < msm_rpm_sel_mask_size; i++)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700196 if (sel_masks_ack[i] & n->sel_masks[i]) {
197 up(&n->sem);
198 break;
199 }
200
201 msm_rpm_write_contiguous_zeros(MSM_RPM_PAGE_CTRL,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600202 target_ctrl(MSM_RPM_CTRL_ACK_SEL_0),
203 msm_rpm_sel_mask_size);
204 msm_rpm_write(MSM_RPM_PAGE_CTRL,
205 target_ctrl(MSM_RPM_CTRL_ACK_CTX_0), 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700206 /* Ensure the write is complete before return */
207 mb();
208
209 return 1;
210 }
211
212 if (msm_rpm_request) {
213 int i;
214
215 *(msm_rpm_request->ctx_mask_ack) = ctx_mask_ack;
216 memcpy(msm_rpm_request->sel_masks_ack, sel_masks_ack,
217 sizeof(sel_masks_ack));
218
219 for (i = 0; i < msm_rpm_request->count; i++)
220 msm_rpm_request->req[i].value =
221 msm_rpm_read(MSM_RPM_PAGE_ACK,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600222 target_enum(msm_rpm_request->req[i].id));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700223
224 msm_rpm_write_contiguous_zeros(MSM_RPM_PAGE_CTRL,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600225 target_ctrl(MSM_RPM_CTRL_ACK_SEL_0),
226 msm_rpm_sel_mask_size);
227 msm_rpm_write(MSM_RPM_PAGE_CTRL,
228 target_ctrl(MSM_RPM_CTRL_ACK_CTX_0), 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700229 /* Ensure the write is complete before return */
230 mb();
231
232 if (msm_rpm_request->done)
233 complete_all(msm_rpm_request->done);
234
235 msm_rpm_request = NULL;
236 return 0;
237 }
238
239 return 2;
240}
241
Stephen Boydf61255e2012-02-24 14:31:09 -0800242static void msm_rpm_err_fatal(void)
243{
244 /* Tell RPM that we're handling the interrupt */
245 __raw_writel(0x1, msm_rpm_data.ipc_rpm_reg);
246 panic("RPM error fataled");
247}
248
249static irqreturn_t msm_rpm_err_interrupt(int irq, void *dev_id)
250{
251 msm_rpm_err_fatal();
252 return IRQ_HANDLED;
253}
254
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700255static irqreturn_t msm_rpm_ack_interrupt(int irq, void *dev_id)
256{
257 unsigned long flags;
258 int rc;
259
260 if (dev_id != &msm_rpm_ack_interrupt)
261 return IRQ_NONE;
262
263 spin_lock_irqsave(&msm_rpm_irq_lock, flags);
264 rc = msm_rpm_process_ack_interrupt();
265 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
266
267 return IRQ_HANDLED;
268}
269
270/*
271 * Note: assumes caller has acquired <msm_rpm_irq_lock>.
272 */
273static void msm_rpm_busy_wait_for_request_completion(
274 bool allow_async_completion)
275{
276 int rc;
277
278 do {
Praveen Chidambaram78499012011-11-01 17:15:17 -0600279 while (!gic_is_spi_pending(msm_rpm_data.irq_ack) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700280 msm_rpm_request) {
281 if (allow_async_completion)
282 spin_unlock(&msm_rpm_irq_lock);
Stephen Boydf61255e2012-02-24 14:31:09 -0800283 if (gic_is_spi_pending(msm_rpm_data.irq_err))
284 msm_rpm_err_fatal();
285 gic_clear_spi_pending(msm_rpm_data.irq_err);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700286 udelay(1);
287 if (allow_async_completion)
288 spin_lock(&msm_rpm_irq_lock);
289 }
290
291 if (!msm_rpm_request)
292 break;
293
294 rc = msm_rpm_process_ack_interrupt();
Praveen Chidambaram78499012011-11-01 17:15:17 -0600295 gic_clear_spi_pending(msm_rpm_data.irq_ack);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700296 } while (rc);
297}
298
299/* Upon return, the <req> array will contain values from the ack page.
300 *
301 * Note: assumes caller has acquired <msm_rpm_mutex>.
302 *
303 * Return value:
304 * 0: success
305 * -ENOSPC: request rejected
306 */
307static int msm_rpm_set_exclusive(int ctx,
308 uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count)
309{
310 DECLARE_COMPLETION_ONSTACK(ack);
311 unsigned long flags;
312 uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx);
Praveen Chidambaramfdaef162011-09-28 08:40:05 -0600313 uint32_t ctx_mask_ack = 0;
Praveen Chidambaram78499012011-11-01 17:15:17 -0600314 uint32_t sel_masks_ack[SEL_MASK_SIZE];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700315 int i;
316
317 msm_rpm_request_irq_mode.req = req;
318 msm_rpm_request_irq_mode.count = count;
319 msm_rpm_request_irq_mode.ctx_mask_ack = &ctx_mask_ack;
320 msm_rpm_request_irq_mode.sel_masks_ack = sel_masks_ack;
321 msm_rpm_request_irq_mode.done = &ack;
322
323 spin_lock_irqsave(&msm_rpm_lock, flags);
324 spin_lock(&msm_rpm_irq_lock);
325
326 BUG_ON(msm_rpm_request);
327 msm_rpm_request = &msm_rpm_request_irq_mode;
328
329 for (i = 0; i < count; i++) {
Praveen Chidambaram78499012011-11-01 17:15:17 -0600330 BUG_ON(target_enum(req[i].id) >= MSM_RPM_ID_LAST);
331 msm_rpm_write(MSM_RPM_PAGE_REQ,
332 target_enum(req[i].id), req[i].value);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700333 }
334
335 msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600336 target_ctrl(MSM_RPM_CTRL_REQ_SEL_0),
337 sel_masks, msm_rpm_sel_mask_size);
338 msm_rpm_write(MSM_RPM_PAGE_CTRL,
339 target_ctrl(MSM_RPM_CTRL_REQ_CTX_0), ctx_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700340
341 /* Ensure RPM data is written before sending the interrupt */
342 mb();
343 msm_rpm_send_req_interrupt();
344
345 spin_unlock(&msm_rpm_irq_lock);
346 spin_unlock_irqrestore(&msm_rpm_lock, flags);
347
348 wait_for_completion(&ack);
349
350 BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)))
351 != ctx_mask);
352 BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack)));
353
354 return (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED))
355 ? -ENOSPC : 0;
356}
357
358/* Upon return, the <req> array will contain values from the ack page.
359 *
360 * Note: assumes caller has acquired <msm_rpm_lock>.
361 *
362 * Return value:
363 * 0: success
364 * -ENOSPC: request rejected
365 */
366static int msm_rpm_set_exclusive_noirq(int ctx,
367 uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count)
368{
Praveen Chidambaram78499012011-11-01 17:15:17 -0600369 unsigned int irq = msm_rpm_data.irq_ack;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700370 unsigned long flags;
371 uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx);
Praveen Chidambaramfdaef162011-09-28 08:40:05 -0600372 uint32_t ctx_mask_ack = 0;
Praveen Chidambaram78499012011-11-01 17:15:17 -0600373 uint32_t sel_masks_ack[SEL_MASK_SIZE];
Stephen Boydf61255e2012-02-24 14:31:09 -0800374 struct irq_chip *irq_chip, *err_chip;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700375 int i;
376
377 msm_rpm_request_poll_mode.req = req;
378 msm_rpm_request_poll_mode.count = count;
379 msm_rpm_request_poll_mode.ctx_mask_ack = &ctx_mask_ack;
380 msm_rpm_request_poll_mode.sel_masks_ack = sel_masks_ack;
381 msm_rpm_request_poll_mode.done = NULL;
382
383 spin_lock_irqsave(&msm_rpm_irq_lock, flags);
Praveen Chidambaramfdaef162011-09-28 08:40:05 -0600384 irq_chip = irq_get_chip(irq);
385 if (!irq_chip) {
386 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
387 return -ENOSPC;
388 }
389 irq_chip->irq_mask(irq_get_irq_data(irq));
Stephen Boydf61255e2012-02-24 14:31:09 -0800390 err_chip = irq_get_chip(msm_rpm_data.irq_err);
391 if (!err_chip) {
392 irq_chip->irq_unmask(irq_get_irq_data(irq));
393 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
394 return -ENOSPC;
395 }
396 err_chip->irq_mask(irq_get_irq_data(msm_rpm_data.irq_err));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700397
398 if (msm_rpm_request) {
399 msm_rpm_busy_wait_for_request_completion(true);
400 BUG_ON(msm_rpm_request);
401 }
402
403 msm_rpm_request = &msm_rpm_request_poll_mode;
404
405 for (i = 0; i < count; i++) {
Praveen Chidambaram78499012011-11-01 17:15:17 -0600406 BUG_ON(target_enum(req[i].id) >= MSM_RPM_ID_LAST);
407 msm_rpm_write(MSM_RPM_PAGE_REQ,
408 target_enum(req[i].id), req[i].value);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700409 }
410
411 msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600412 target_ctrl(MSM_RPM_CTRL_REQ_SEL_0),
413 sel_masks, msm_rpm_sel_mask_size);
414 msm_rpm_write(MSM_RPM_PAGE_CTRL,
415 target_ctrl(MSM_RPM_CTRL_REQ_CTX_0), ctx_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700416
417 /* Ensure RPM data is written before sending the interrupt */
418 mb();
419 msm_rpm_send_req_interrupt();
420
421 msm_rpm_busy_wait_for_request_completion(false);
422 BUG_ON(msm_rpm_request);
423
Stephen Boydf61255e2012-02-24 14:31:09 -0800424 err_chip->irq_unmask(irq_get_irq_data(msm_rpm_data.irq_err));
Praveen Chidambaramfdaef162011-09-28 08:40:05 -0600425 irq_chip->irq_unmask(irq_get_irq_data(irq));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700426 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
427
428 BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)))
429 != ctx_mask);
430 BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack)));
431
432 return (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED))
433 ? -ENOSPC : 0;
434}
435
436/* Upon return, the <req> array will contain values from the ack page.
437 *
438 * Return value:
439 * 0: success
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700440 * -EINVAL: invalid <ctx> or invalid id in <req> array
441 * -ENOSPC: request rejected
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700442 * -ENODEV: RPM driver not initialized
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700443 */
444static int msm_rpm_set_common(
445 int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq)
446{
Praveen Chidambaram78499012011-11-01 17:15:17 -0600447 uint32_t sel_masks[SEL_MASK_SIZE] = {};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448 int rc;
449
450 if (ctx >= MSM_RPM_CTX_SET_COUNT) {
451 rc = -EINVAL;
452 goto set_common_exit;
453 }
454
455 rc = msm_rpm_fill_sel_masks(sel_masks, req, count);
456 if (rc)
457 goto set_common_exit;
458
459 if (noirq) {
460 unsigned long flags;
461
462 spin_lock_irqsave(&msm_rpm_lock, flags);
463 rc = msm_rpm_set_exclusive_noirq(ctx, sel_masks, req, count);
464 spin_unlock_irqrestore(&msm_rpm_lock, flags);
465 } else {
Praveen Chidambarame72c45d2012-03-26 16:56:56 -0600466 mutex_lock(&msm_rpm_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700467 rc = msm_rpm_set_exclusive(ctx, sel_masks, req, count);
468 mutex_unlock(&msm_rpm_mutex);
469 }
470
471set_common_exit:
472 return rc;
473}
474
475/*
476 * Return value:
477 * 0: success
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700478 * -EINVAL: invalid <ctx> or invalid id in <req> array
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700479 * -ENODEV: RPM driver not initialized.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700480 */
481static int msm_rpm_clear_common(
482 int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq)
483{
Praveen Chidambaram78499012011-11-01 17:15:17 -0600484 uint32_t sel_masks[SEL_MASK_SIZE] = {};
485 struct msm_rpm_iv_pair r[SEL_MASK_SIZE];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700486 int rc;
487 int i;
488
489 if (ctx >= MSM_RPM_CTX_SET_COUNT) {
490 rc = -EINVAL;
491 goto clear_common_exit;
492 }
493
494 rc = msm_rpm_fill_sel_masks(sel_masks, req, count);
495 if (rc)
496 goto clear_common_exit;
497
498 for (i = 0; i < ARRAY_SIZE(r); i++) {
499 r[i].id = MSM_RPM_ID_INVALIDATE_0 + i;
500 r[i].value = sel_masks[i];
501 }
502
503 memset(sel_masks, 0, sizeof(sel_masks));
Praveen Chidambaram78499012011-11-01 17:15:17 -0600504 sel_masks[msm_rpm_get_sel_mask_reg(msm_rpm_data.sel_invalidate)] |=
505 msm_rpm_get_sel_mask(msm_rpm_data.sel_invalidate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700506
507 if (noirq) {
508 unsigned long flags;
509
510 spin_lock_irqsave(&msm_rpm_lock, flags);
511 rc = msm_rpm_set_exclusive_noirq(ctx, sel_masks, r,
512 ARRAY_SIZE(r));
513 spin_unlock_irqrestore(&msm_rpm_lock, flags);
514 BUG_ON(rc);
515 } else {
Praveen Chidambarame72c45d2012-03-26 16:56:56 -0600516 mutex_lock(&msm_rpm_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700517 rc = msm_rpm_set_exclusive(ctx, sel_masks, r, ARRAY_SIZE(r));
518 mutex_unlock(&msm_rpm_mutex);
519 BUG_ON(rc);
520 }
521
522clear_common_exit:
523 return rc;
524}
525
526/*
527 * Note: assumes caller has acquired <msm_rpm_mutex>.
528 */
529static void msm_rpm_update_notification(uint32_t ctx,
530 struct msm_rpm_notif_config *curr_cfg,
531 struct msm_rpm_notif_config *new_cfg)
532{
Praveen Chidambaram78499012011-11-01 17:15:17 -0600533 unsigned int sel_notif = msm_rpm_data.sel_notification;
534
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700535 if (memcmp(curr_cfg, new_cfg, sizeof(*new_cfg))) {
Praveen Chidambaram78499012011-11-01 17:15:17 -0600536 uint32_t sel_masks[SEL_MASK_SIZE] = {};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700537 int rc;
538
Praveen Chidambaram78499012011-11-01 17:15:17 -0600539 sel_masks[msm_rpm_get_sel_mask_reg(sel_notif)]
540 |= msm_rpm_get_sel_mask(sel_notif);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700541
542 rc = msm_rpm_set_exclusive(ctx,
543 sel_masks, new_cfg->iv, ARRAY_SIZE(new_cfg->iv));
544 BUG_ON(rc);
545
546 memcpy(curr_cfg, new_cfg, sizeof(*new_cfg));
547 }
548}
549
550/*
551 * Note: assumes caller has acquired <msm_rpm_mutex>.
552 */
553static void msm_rpm_initialize_notification(void)
554{
555 struct msm_rpm_notif_config cfg;
556 unsigned int ctx;
557 int i;
558
559 for (ctx = MSM_RPM_CTX_SET_0; ctx <= MSM_RPM_CTX_SET_SLEEP; ctx++) {
560 cfg = msm_rpm_notif_cfgs[ctx];
561
Praveen Chidambaram78499012011-11-01 17:15:17 -0600562 for (i = 0; i < msm_rpm_sel_mask_size; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700563 configured_iv(&cfg)[i].id =
564 MSM_RPM_ID_NOTIFICATION_CONFIGURED_0 + i;
565 configured_iv(&cfg)[i].value = ~0UL;
566
567 registered_iv(&cfg)[i].id =
568 MSM_RPM_ID_NOTIFICATION_REGISTERED_0 + i;
569 registered_iv(&cfg)[i].value = 0;
570 }
571
572 msm_rpm_update_notification(ctx,
573 &msm_rpm_notif_cfgs[ctx], &cfg);
574 }
575}
576
577/******************************************************************************
578 * Public functions
579 *****************************************************************************/
580
581int msm_rpm_local_request_is_outstanding(void)
582{
583 unsigned long flags;
584 int outstanding = 0;
585
586 if (!spin_trylock_irqsave(&msm_rpm_lock, flags))
587 goto local_request_is_outstanding_exit;
588
589 if (!spin_trylock(&msm_rpm_irq_lock))
590 goto local_request_is_outstanding_unlock;
591
592 outstanding = (msm_rpm_request != NULL);
593 spin_unlock(&msm_rpm_irq_lock);
594
595local_request_is_outstanding_unlock:
596 spin_unlock_irqrestore(&msm_rpm_lock, flags);
597
598local_request_is_outstanding_exit:
599 return outstanding;
600}
601
602/*
603 * Read the specified status registers and return their values.
604 *
605 * status: array of id-value pairs. Each <id> specifies a status register,
606 * i.e, one of MSM_RPM_STATUS_ID_xxxx. Upon return, each <value> will
607 * contain the value of the status register.
608 * count: number of id-value pairs in the array
609 *
610 * Return value:
611 * 0: success
612 * -EBUSY: RPM is updating the status page; values across different registers
613 * may not be consistent
614 * -EINVAL: invalid id in <status> array
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700615 * -ENODEV: RPM driver not initialized
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700616 */
617int msm_rpm_get_status(struct msm_rpm_iv_pair *status, int count)
618{
619 uint32_t seq_begin;
620 uint32_t seq_end;
621 int rc;
622 int i;
623
624 seq_begin = msm_rpm_read(MSM_RPM_PAGE_STATUS,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600625 target_status(MSM_RPM_STATUS_ID_SEQUENCE));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700626
627 for (i = 0; i < count; i++) {
Praveen Chidambaram78499012011-11-01 17:15:17 -0600628 int target_status_id;
629
630 if (status[i].id >= MSM_RPM_STATUS_ID_LAST) {
631 pr_err("%s(): Status ID beyond limits\n", __func__);
632 rc = -EINVAL;
633 goto get_status_exit;
634 }
635
636 target_status_id = target_status(status[i].id);
637 if (target_status_id >= MSM_RPM_STATUS_ID_LAST) {
638 pr_err("%s(): Status id %d not defined for target\n",
639 __func__,
640 target_status_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700641 rc = -EINVAL;
642 goto get_status_exit;
643 }
644
645 status[i].value = msm_rpm_read(MSM_RPM_PAGE_STATUS,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600646 target_status_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700647 }
648
649 seq_end = msm_rpm_read(MSM_RPM_PAGE_STATUS,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600650 target_status(MSM_RPM_STATUS_ID_SEQUENCE));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700651
652 rc = (seq_begin != seq_end || (seq_begin & 0x01)) ? -EBUSY : 0;
653
654get_status_exit:
655 return rc;
656}
657EXPORT_SYMBOL(msm_rpm_get_status);
658
659/*
660 * Issue a resource request to RPM to set resource values.
661 *
662 * Note: the function may sleep and must be called in a task context.
663 *
664 * ctx: the request's context.
665 * There two contexts that a RPM driver client can use:
666 * MSM_RPM_CTX_SET_0 and MSM_RPM_CTX_SET_SLEEP. For resource values
667 * that are intended to take effect when the CPU is active,
668 * MSM_RPM_CTX_SET_0 should be used. For resource values that are
669 * intended to take effect when the CPU is not active,
670 * MSM_RPM_CTX_SET_SLEEP should be used.
671 * req: array of id-value pairs. Each <id> specifies a RPM resource,
672 * i.e, one of MSM_RPM_ID_xxxx. Each <value> specifies the requested
673 * resource value.
674 * count: number of id-value pairs in the array
675 *
676 * Return value:
677 * 0: success
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700678 * -EINVAL: invalid <ctx> or invalid id in <req> array
679 * -ENOSPC: request rejected
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700680 * -ENODEV: RPM driver not initialized
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700681 */
682int msm_rpm_set(int ctx, struct msm_rpm_iv_pair *req, int count)
683{
684 return msm_rpm_set_common(ctx, req, count, false);
685}
686EXPORT_SYMBOL(msm_rpm_set);
687
688/*
689 * Issue a resource request to RPM to set resource values.
690 *
691 * Note: the function is similar to msm_rpm_set() except that it must be
692 * called with interrupts masked. If possible, use msm_rpm_set()
693 * instead, to maximize CPU throughput.
694 */
695int msm_rpm_set_noirq(int ctx, struct msm_rpm_iv_pair *req, int count)
696{
697 WARN(!irqs_disabled(), "msm_rpm_set_noirq can only be called "
698 "safely when local irqs are disabled. Consider using "
699 "msm_rpm_set or msm_rpm_set_nosleep instead.");
700 return msm_rpm_set_common(ctx, req, count, true);
701}
702EXPORT_SYMBOL(msm_rpm_set_noirq);
703
704/*
705 * Issue a resource request to RPM to clear resource values. Once the
706 * values are cleared, the resources revert back to their default values
707 * for this RPM master.
708 *
709 * Note: the function may sleep and must be called in a task context.
710 *
711 * ctx: the request's context.
712 * req: array of id-value pairs. Each <id> specifies a RPM resource,
713 * i.e, one of MSM_RPM_ID_xxxx. <value>'s are ignored.
714 * count: number of id-value pairs in the array
715 *
716 * Return value:
717 * 0: success
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700718 * -EINVAL: invalid <ctx> or invalid id in <req> array
719 */
720int msm_rpm_clear(int ctx, struct msm_rpm_iv_pair *req, int count)
721{
722 return msm_rpm_clear_common(ctx, req, count, false);
723}
724EXPORT_SYMBOL(msm_rpm_clear);
725
726/*
727 * Issue a resource request to RPM to clear resource values.
728 *
729 * Note: the function is similar to msm_rpm_clear() except that it must be
730 * called with interrupts masked. If possible, use msm_rpm_clear()
731 * instead, to maximize CPU throughput.
732 */
733int msm_rpm_clear_noirq(int ctx, struct msm_rpm_iv_pair *req, int count)
734{
735 WARN(!irqs_disabled(), "msm_rpm_clear_noirq can only be called "
736 "safely when local irqs are disabled. Consider using "
737 "msm_rpm_clear or msm_rpm_clear_nosleep instead.");
738 return msm_rpm_clear_common(ctx, req, count, true);
739}
740EXPORT_SYMBOL(msm_rpm_clear_noirq);
741
742/*
743 * Register for RPM notification. When the specified resources
744 * change their status on RPM, RPM sends out notifications and the
745 * driver will "up" the semaphore in struct msm_rpm_notification.
746 *
747 * Note: the function may sleep and must be called in a task context.
748 *
749 * Memory for <n> must not be freed until the notification is
750 * unregistered. Memory for <req> can be freed after this
751 * function returns.
752 *
753 * n: the notifcation object. Caller should initialize only the
754 * semaphore field. When a notification arrives later, the
755 * semaphore will be "up"ed.
756 * req: array of id-value pairs. Each <id> specifies a status register,
757 * i.e, one of MSM_RPM_STATUS_ID_xxxx. <value>'s are ignored.
758 * count: number of id-value pairs in the array
759 *
760 * Return value:
761 * 0: success
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700762 * -EINVAL: invalid id in <req> array
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700763 * -ENODEV: RPM driver not initialized
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700764 */
765int msm_rpm_register_notification(struct msm_rpm_notification *n,
766 struct msm_rpm_iv_pair *req, int count)
767{
768 unsigned long flags;
769 unsigned int ctx;
770 struct msm_rpm_notif_config cfg;
771 int rc;
772 int i;
773
774 INIT_LIST_HEAD(&n->list);
775 rc = msm_rpm_fill_sel_masks(n->sel_masks, req, count);
776 if (rc)
777 goto register_notification_exit;
778
Praveen Chidambarame72c45d2012-03-26 16:56:56 -0600779 mutex_lock(&msm_rpm_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700780
781 if (!msm_rpm_init_notif_done) {
782 msm_rpm_initialize_notification();
783 msm_rpm_init_notif_done = true;
784 }
785
786 spin_lock_irqsave(&msm_rpm_irq_lock, flags);
787 list_add(&n->list, &msm_rpm_notifications);
788 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
789
790 ctx = MSM_RPM_CTX_SET_0;
791 cfg = msm_rpm_notif_cfgs[ctx];
792
Praveen Chidambaram78499012011-11-01 17:15:17 -0600793 for (i = 0; i < msm_rpm_sel_mask_size; i++)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700794 registered_iv(&cfg)[i].value |= n->sel_masks[i];
795
796 msm_rpm_update_notification(ctx, &msm_rpm_notif_cfgs[ctx], &cfg);
797 mutex_unlock(&msm_rpm_mutex);
798
799register_notification_exit:
800 return rc;
801}
802EXPORT_SYMBOL(msm_rpm_register_notification);
803
804/*
805 * Unregister a notification.
806 *
807 * Note: the function may sleep and must be called in a task context.
808 *
809 * n: the notifcation object that was registered previously.
810 *
811 * Return value:
812 * 0: success
Mahesh Sivasubramanian0e82fb22011-12-12 12:21:03 -0700813 * -ENODEV: RPM driver not initialized
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700814 */
815int msm_rpm_unregister_notification(struct msm_rpm_notification *n)
816{
817 unsigned long flags;
818 unsigned int ctx;
819 struct msm_rpm_notif_config cfg;
Praveen Chidambarame72c45d2012-03-26 16:56:56 -0600820 int rc = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700821 int i;
822
Praveen Chidambarame72c45d2012-03-26 16:56:56 -0600823 mutex_lock(&msm_rpm_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700824 ctx = MSM_RPM_CTX_SET_0;
825 cfg = msm_rpm_notif_cfgs[ctx];
826
Praveen Chidambaram78499012011-11-01 17:15:17 -0600827 for (i = 0; i < msm_rpm_sel_mask_size; i++)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700828 registered_iv(&cfg)[i].value = 0;
829
830 spin_lock_irqsave(&msm_rpm_irq_lock, flags);
831 list_del(&n->list);
832 list_for_each_entry(n, &msm_rpm_notifications, list)
Praveen Chidambaram78499012011-11-01 17:15:17 -0600833 for (i = 0; i < msm_rpm_sel_mask_size; i++)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700834 registered_iv(&cfg)[i].value |= n->sel_masks[i];
835 spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
836
837 msm_rpm_update_notification(ctx, &msm_rpm_notif_cfgs[ctx], &cfg);
838 mutex_unlock(&msm_rpm_mutex);
839
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700840 return rc;
841}
842EXPORT_SYMBOL(msm_rpm_unregister_notification);
843
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600844static uint32_t fw_major, fw_minor, fw_build;
845
846static ssize_t driver_version_show(struct kobject *kobj,
847 struct kobj_attribute *attr, char *buf)
848{
849 return snprintf(buf, PAGE_SIZE, "%u.%u.%u\n",
Praveen Chidambaram78499012011-11-01 17:15:17 -0600850 msm_rpm_data.ver[0], msm_rpm_data.ver[1], msm_rpm_data.ver[2]);
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600851}
852
853static ssize_t fw_version_show(struct kobject *kobj,
854 struct kobj_attribute *attr, char *buf)
855{
856 return snprintf(buf, PAGE_SIZE, "%u.%u.%u\n",
857 fw_major, fw_minor, fw_build);
858}
859
860static struct kobj_attribute driver_version_attr = __ATTR_RO(driver_version);
861static struct kobj_attribute fw_version_attr = __ATTR_RO(fw_version);
862
863static struct attribute *driver_attributes[] = {
864 &driver_version_attr.attr,
865 &fw_version_attr.attr,
866 NULL
867};
868
869static struct attribute_group driver_attr_group = {
870 .attrs = driver_attributes,
871};
872
873static int __devinit msm_rpm_probe(struct platform_device *pdev)
874{
875 return sysfs_create_group(&pdev->dev.kobj, &driver_attr_group);
876}
877
878static int __devexit msm_rpm_remove(struct platform_device *pdev)
879{
880 sysfs_remove_group(&pdev->dev.kobj, &driver_attr_group);
881 return 0;
882}
883
884static struct platform_driver msm_rpm_platform_driver = {
885 .probe = msm_rpm_probe,
886 .remove = __devexit_p(msm_rpm_remove),
887 .driver = {
888 .name = "msm_rpm",
889 .owner = THIS_MODULE,
890 },
891};
892
Praveen Chidambaram78499012011-11-01 17:15:17 -0600893static void __init msm_rpm_populate_map(struct msm_rpm_platform_data *data)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700894{
Praveen Chidambaram78499012011-11-01 17:15:17 -0600895 int i, j;
896 struct msm_rpm_map_data *src = NULL;
897 struct msm_rpm_map_data *dst = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700898
Praveen Chidambaram78499012011-11-01 17:15:17 -0600899 for (i = 0; i < MSM_RPM_ID_LAST;) {
900 src = &data->target_id[i];
901 dst = &msm_rpm_data.target_id[i];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700902
Praveen Chidambaram78499012011-11-01 17:15:17 -0600903 dst->id = MSM_RPM_ID_LAST;
904 dst->sel = msm_rpm_data.sel_last + 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700905
Praveen Chidambaram78499012011-11-01 17:15:17 -0600906 /*
907 * copy the target specific id of the current and also of
908 * all the #count id's that follow the current.
909 * [MSM_RPM_ID_PM8921_S1_0] = { MSM_RPM_8960_ID_PM8921_S1_0,
910 * MSM_RPM_8960_SEL_PM8921_S1,
911 * 2},
912 * [MSM_RPM_ID_PM8921_S1_1] = { 0, 0, 0 },
913 * should translate to
914 * [MSM_RPM_ID_PM8921_S1_0] = { MSM_RPM_8960_ID_PM8921_S1_0,
915 * MSM_RPM_8960_SEL_PM8921,
916 * 2 },
917 * [MSM_RPM_ID_PM8921_S1_1] = { MSM_RPM_8960_ID_PM8921_S1_0 + 1,
918 * MSM_RPM_8960_SEL_PM8921,
919 * 0 },
920 */
921 for (j = 0; j < src->count; j++) {
922 dst = &msm_rpm_data.target_id[i + j];
923 dst->id = src->id + j;
924 dst->sel = src->sel;
925 }
926
927 i += (src->count) ? src->count : 1;
928 }
929
930 for (i = 0; i < MSM_RPM_STATUS_ID_LAST; i++) {
931 if (data->target_status[i] & MSM_RPM_STATUS_ID_VALID)
932 msm_rpm_data.target_status[i] &=
933 ~MSM_RPM_STATUS_ID_VALID;
934 else
935 msm_rpm_data.target_status[i] = MSM_RPM_STATUS_ID_LAST;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700936 }
937}
938
939int __init msm_rpm_init(struct msm_rpm_platform_data *data)
940{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700941 unsigned int irq;
942 int rc;
943
Praveen Chidambaram78499012011-11-01 17:15:17 -0600944 memcpy(&msm_rpm_data, data, sizeof(struct msm_rpm_platform_data));
945 msm_rpm_sel_mask_size = msm_rpm_data.sel_last / 32 + 1;
946 BUG_ON(SEL_MASK_SIZE < msm_rpm_sel_mask_size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700947
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600948 fw_major = msm_rpm_read(MSM_RPM_PAGE_STATUS,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600949 target_status(MSM_RPM_STATUS_ID_VERSION_MAJOR));
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600950 fw_minor = msm_rpm_read(MSM_RPM_PAGE_STATUS,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600951 target_status(MSM_RPM_STATUS_ID_VERSION_MINOR));
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600952 fw_build = msm_rpm_read(MSM_RPM_PAGE_STATUS,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600953 target_status(MSM_RPM_STATUS_ID_VERSION_BUILD));
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600954 pr_info("%s: RPM firmware %u.%u.%u\n", __func__,
955 fw_major, fw_minor, fw_build);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700956
Praveen Chidambaram78499012011-11-01 17:15:17 -0600957 if (fw_major != msm_rpm_data.ver[0]) {
Praveen Chidambaram99a6a5d2011-07-13 10:14:06 -0600958 pr_err("%s: RPM version %u.%u.%u incompatible with "
959 "this driver version %u.%u.%u\n", __func__,
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -0600960 fw_major, fw_minor, fw_build,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600961 msm_rpm_data.ver[0],
962 msm_rpm_data.ver[1],
963 msm_rpm_data.ver[2]);
Praveen Chidambaram99a6a5d2011-07-13 10:14:06 -0600964 return -EFAULT;
965 }
966
Praveen Chidambaram78499012011-11-01 17:15:17 -0600967 msm_rpm_write(MSM_RPM_PAGE_CTRL,
968 target_ctrl(MSM_RPM_CTRL_VERSION_MAJOR), msm_rpm_data.ver[0]);
969 msm_rpm_write(MSM_RPM_PAGE_CTRL,
970 target_ctrl(MSM_RPM_CTRL_VERSION_MINOR), msm_rpm_data.ver[1]);
971 msm_rpm_write(MSM_RPM_PAGE_CTRL,
972 target_ctrl(MSM_RPM_CTRL_VERSION_BUILD), msm_rpm_data.ver[2]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700973
Praveen Chidambaram78499012011-11-01 17:15:17 -0600974 irq = data->irq_ack;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700975
976 rc = request_irq(irq, msm_rpm_ack_interrupt,
977 IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND,
978 "rpm_drv", msm_rpm_ack_interrupt);
979 if (rc) {
980 pr_err("%s: failed to request irq %d: %d\n",
981 __func__, irq, rc);
982 return rc;
983 }
984
985 rc = irq_set_irq_wake(irq, 1);
986 if (rc) {
987 pr_err("%s: failed to set wakeup irq %u: %d\n",
988 __func__, irq, rc);
989 return rc;
990 }
991
Stephen Boydf61255e2012-02-24 14:31:09 -0800992 rc = request_irq(data->irq_err, msm_rpm_err_interrupt,
993 IRQF_TRIGGER_RISING, "rpm_err", NULL);
994 if (rc) {
995 pr_err("%s: failed to request error interrupt: %d\n",
996 __func__, rc);
997 return rc;
998 }
999
Praveen Chidambaram78499012011-11-01 17:15:17 -06001000 msm_rpm_populate_map(data);
Praveen Chidambaram043f4ce2011-08-02 09:37:59 -06001001
1002 return platform_driver_register(&msm_rpm_platform_driver);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001003}