blob: 46f64603160855776477eb4160984d46489ad63c [file] [log] [blame]
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -07001/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/bitmap.h>
14#include <linux/bitops.h>
15#include <linux/gpio.h>
16#include <linux/init.h>
17#include <linux/interrupt.h>
18#include <linux/irq.h>
19#include <linux/io.h>
20#include <linux/module.h>
21#include <linux/spinlock.h>
22#include <linux/syscore_ops.h>
23#include <linux/irqdomain.h>
24#include <linux/of.h>
25#include <linux/err.h>
Rohit Vaswanib1cc4932012-07-23 21:30:11 -070026#include <linux/platform_device.h>
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -070027
28#include <asm/mach/irq.h>
29
30#include <mach/msm_iomap.h>
31#include <mach/gpiomux.h>
32#include <mach/mpm.h>
33#include "gpio-msm-common.h"
34
Sujit Reddy Thumma39306c22012-06-26 15:39:26 +053035#ifdef CONFIG_GPIO_MSM_V3
36enum msm_tlmm_register {
37 SDC4_HDRV_PULL_CTL = 0x0, /* NOT USED */
38 SDC3_HDRV_PULL_CTL = 0x0, /* NOT USED */
39 SDC2_HDRV_PULL_CTL = 0x2048,
40 SDC1_HDRV_PULL_CTL = 0x2044,
41};
42#else
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -070043enum msm_tlmm_register {
44 SDC4_HDRV_PULL_CTL = 0x20a0,
45 SDC3_HDRV_PULL_CTL = 0x20a4,
Sujit Reddy Thumma39306c22012-06-26 15:39:26 +053046 SDC2_HDRV_PULL_CTL = 0x0, /* NOT USED */
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -070047 SDC1_HDRV_PULL_CTL = 0x20a0,
48};
Sujit Reddy Thumma39306c22012-06-26 15:39:26 +053049#endif
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -070050
51struct tlmm_field_cfg {
52 enum msm_tlmm_register reg;
53 u8 off;
54};
55
56static const struct tlmm_field_cfg tlmm_hdrv_cfgs[] = {
57 {SDC4_HDRV_PULL_CTL, 6}, /* TLMM_HDRV_SDC4_CLK */
58 {SDC4_HDRV_PULL_CTL, 3}, /* TLMM_HDRV_SDC4_CMD */
59 {SDC4_HDRV_PULL_CTL, 0}, /* TLMM_HDRV_SDC4_DATA */
60 {SDC3_HDRV_PULL_CTL, 6}, /* TLMM_HDRV_SDC3_CLK */
61 {SDC3_HDRV_PULL_CTL, 3}, /* TLMM_HDRV_SDC3_CMD */
62 {SDC3_HDRV_PULL_CTL, 0}, /* TLMM_HDRV_SDC3_DATA */
Sujit Reddy Thumma39306c22012-06-26 15:39:26 +053063 {SDC2_HDRV_PULL_CTL, 6}, /* TLMM_HDRV_SDC2_CLK */
64 {SDC2_HDRV_PULL_CTL, 3}, /* TLMM_HDRV_SDC2_CMD */
65 {SDC2_HDRV_PULL_CTL, 0}, /* TLMM_HDRV_SDC2_DATA */
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -070066 {SDC1_HDRV_PULL_CTL, 6}, /* TLMM_HDRV_SDC1_CLK */
67 {SDC1_HDRV_PULL_CTL, 3}, /* TLMM_HDRV_SDC1_CMD */
68 {SDC1_HDRV_PULL_CTL, 0}, /* TLMM_HDRV_SDC1_DATA */
69};
70
71static const struct tlmm_field_cfg tlmm_pull_cfgs[] = {
Sujit Reddy Thumma39306c22012-06-26 15:39:26 +053072 {SDC4_HDRV_PULL_CTL, 14}, /* TLMM_PULL_SDC4_CLK */
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -070073 {SDC4_HDRV_PULL_CTL, 11}, /* TLMM_PULL_SDC4_CMD */
74 {SDC4_HDRV_PULL_CTL, 9}, /* TLMM_PULL_SDC4_DATA */
75 {SDC3_HDRV_PULL_CTL, 14}, /* TLMM_PULL_SDC3_CLK */
76 {SDC3_HDRV_PULL_CTL, 11}, /* TLMM_PULL_SDC3_CMD */
77 {SDC3_HDRV_PULL_CTL, 9}, /* TLMM_PULL_SDC3_DATA */
Sujit Reddy Thumma39306c22012-06-26 15:39:26 +053078 {SDC2_HDRV_PULL_CTL, 14}, /* TLMM_PULL_SDC2_CLK */
79 {SDC2_HDRV_PULL_CTL, 11}, /* TLMM_PULL_SDC2_CMD */
80 {SDC2_HDRV_PULL_CTL, 9}, /* TLMM_PULL_SDC2_DATA */
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -070081 {SDC1_HDRV_PULL_CTL, 13}, /* TLMM_PULL_SDC1_CLK */
82 {SDC1_HDRV_PULL_CTL, 11}, /* TLMM_PULL_SDC1_CMD */
83 {SDC1_HDRV_PULL_CTL, 9}, /* TLMM_PULL_SDC1_DATA */
84};
85
86/*
87 * Supported arch specific irq extension.
88 * Default make them NULL.
89 */
90struct irq_chip msm_gpio_irq_extn = {
91 .irq_eoi = NULL,
92 .irq_mask = NULL,
93 .irq_unmask = NULL,
94 .irq_retrigger = NULL,
95 .irq_set_type = NULL,
96 .irq_set_wake = NULL,
97 .irq_disable = NULL,
98};
99
100/**
101 * struct msm_gpio_dev: the MSM8660 SoC GPIO device structure
102 *
103 * @enabled_irqs: a bitmap used to optimize the summary-irq handler. By
104 * keeping track of which gpios are unmasked as irq sources, we avoid
105 * having to do __raw_readl calls on hundreds of iomapped registers each time
106 * the summary interrupt fires in order to locate the active interrupts.
107 *
108 * @wake_irqs: a bitmap for tracking which interrupt lines are enabled
109 * as wakeup sources. When the device is suspended, interrupts which are
110 * not wakeup sources are disabled.
111 *
112 * @dual_edge_irqs: a bitmap used to track which irqs are configured
113 * as dual-edge, as this is not supported by the hardware and requires
114 * some special handling in the driver.
115 */
116struct msm_gpio_dev {
117 struct gpio_chip gpio_chip;
118 DECLARE_BITMAP(enabled_irqs, NR_MSM_GPIOS);
119 DECLARE_BITMAP(wake_irqs, NR_MSM_GPIOS);
120 DECLARE_BITMAP(dual_edge_irqs, NR_MSM_GPIOS);
Michael Bohanbb6b30f2012-06-01 13:33:51 -0700121 struct irq_domain *domain;
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -0700122};
123
124static DEFINE_SPINLOCK(tlmm_lock);
125
126static inline struct msm_gpio_dev *to_msm_gpio_dev(struct gpio_chip *chip)
127{
128 return container_of(chip, struct msm_gpio_dev, gpio_chip);
129}
130
131static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
132{
133 int rc;
134 rc = __msm_gpio_get_inout(offset);
135 mb();
136 return rc;
137}
138
139static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
140{
141 __msm_gpio_set_inout(offset, val);
142 mb();
143}
144
145static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
146{
147 unsigned long irq_flags;
148
149 spin_lock_irqsave(&tlmm_lock, irq_flags);
150 __msm_gpio_set_config_direction(offset, 1, 0);
151 mb();
152 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
153 return 0;
154}
155
156static int msm_gpio_direction_output(struct gpio_chip *chip,
157 unsigned offset,
158 int val)
159{
160 unsigned long irq_flags;
161
162 spin_lock_irqsave(&tlmm_lock, irq_flags);
163 __msm_gpio_set_config_direction(offset, 0, val);
164 mb();
165 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
166 return 0;
167}
168
169#ifdef CONFIG_OF
170static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
171{
172 struct msm_gpio_dev *g_dev = to_msm_gpio_dev(chip);
Michael Bohanbb6b30f2012-06-01 13:33:51 -0700173 struct irq_domain *domain = g_dev->domain;
Michael Bohan27e7b942012-07-06 10:25:30 -0700174 return irq_linear_revmap(domain, offset);
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -0700175}
176
177static inline int msm_irq_to_gpio(struct gpio_chip *chip, unsigned irq)
178{
Michael Bohanbb6b30f2012-06-01 13:33:51 -0700179 struct irq_data *irq_data = irq_get_irq_data(irq);
180 return irq_data->hwirq;
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -0700181}
182#else
183static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
184{
185 return MSM_GPIO_TO_INT(offset - chip->base);
186}
187
188static inline int msm_irq_to_gpio(struct gpio_chip *chip, unsigned irq)
189{
190 return irq - MSM_GPIO_TO_INT(chip->base);
191}
192#endif
193
194static int msm_gpio_request(struct gpio_chip *chip, unsigned offset)
195{
196 return msm_gpiomux_get(chip->base + offset);
197}
198
199static void msm_gpio_free(struct gpio_chip *chip, unsigned offset)
200{
201 msm_gpiomux_put(chip->base + offset);
202}
203
204static struct msm_gpio_dev msm_gpio = {
205 .gpio_chip = {
206 .label = "msmgpio",
207 .base = 0,
208 .ngpio = NR_MSM_GPIOS,
209 .direction_input = msm_gpio_direction_input,
210 .direction_output = msm_gpio_direction_output,
211 .get = msm_gpio_get,
212 .set = msm_gpio_set,
213 .to_irq = msm_gpio_to_irq,
214 .request = msm_gpio_request,
215 .free = msm_gpio_free,
216 },
217};
218
219static void switch_mpm_config(struct irq_data *d, unsigned val)
220{
221 /* switch the configuration in the mpm as well */
222 if (!msm_gpio_irq_extn.irq_set_type)
223 return;
224
225 if (val)
226 msm_gpio_irq_extn.irq_set_type(d, IRQF_TRIGGER_FALLING);
227 else
228 msm_gpio_irq_extn.irq_set_type(d, IRQF_TRIGGER_RISING);
229}
230
231/* For dual-edge interrupts in software, since the hardware has no
232 * such support:
233 *
234 * At appropriate moments, this function may be called to flip the polarity
235 * settings of both-edge irq lines to try and catch the next edge.
236 *
237 * The attempt is considered successful if:
238 * - the status bit goes high, indicating that an edge was caught, or
239 * - the input value of the gpio doesn't change during the attempt.
240 * If the value changes twice during the process, that would cause the first
241 * test to fail but would force the second, as two opposite
242 * transitions would cause a detection no matter the polarity setting.
243 *
244 * The do-loop tries to sledge-hammer closed the timing hole between
245 * the initial value-read and the polarity-write - if the line value changes
246 * during that window, an interrupt is lost, the new polarity setting is
247 * incorrect, and the first success test will fail, causing a retry.
248 *
249 * Algorithm comes from Google's msmgpio driver, see mach-msm/gpio.c.
250 */
251static void msm_gpio_update_dual_edge_pos(struct irq_data *d, unsigned gpio)
252{
253 int loop_limit = 100;
254 unsigned val, val2, intstat;
255
256 do {
257 val = __msm_gpio_get_inout(gpio);
258 __msm_gpio_set_polarity(gpio, val);
259 val2 = __msm_gpio_get_inout(gpio);
260 intstat = __msm_gpio_get_intr_status(gpio);
261 if (intstat || val == val2) {
262 switch_mpm_config(d, val);
263 return;
264 }
265 } while (loop_limit-- > 0);
266 pr_err("%s: dual-edge irq failed to stabilize, %#08x != %#08x\n",
267 __func__, val, val2);
268}
269
270static void msm_gpio_irq_ack(struct irq_data *d)
271{
272 int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
273
274 __msm_gpio_set_intr_status(gpio);
275 if (test_bit(gpio, msm_gpio.dual_edge_irqs))
276 msm_gpio_update_dual_edge_pos(d, gpio);
277 mb();
278}
279
280static void msm_gpio_irq_mask(struct irq_data *d)
281{
282 int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
283 unsigned long irq_flags;
284
285 spin_lock_irqsave(&tlmm_lock, irq_flags);
286 __msm_gpio_set_intr_cfg_enable(gpio, 0);
287 __clear_bit(gpio, msm_gpio.enabled_irqs);
288 mb();
289 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
290
291 if (msm_gpio_irq_extn.irq_mask)
292 msm_gpio_irq_extn.irq_mask(d);
293
294}
295
296static void msm_gpio_irq_unmask(struct irq_data *d)
297{
298 int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
299 unsigned long irq_flags;
300
301 spin_lock_irqsave(&tlmm_lock, irq_flags);
302 __set_bit(gpio, msm_gpio.enabled_irqs);
Rohit Vaswani13f70c12012-08-03 11:54:09 -0700303 __msm_gpio_set_intr_status(gpio);
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -0700304 __msm_gpio_set_intr_cfg_enable(gpio, 1);
305 mb();
306 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
307
308 if (msm_gpio_irq_extn.irq_mask)
309 msm_gpio_irq_extn.irq_unmask(d);
310}
311
312static void msm_gpio_irq_disable(struct irq_data *d)
313{
314 if (msm_gpio_irq_extn.irq_disable)
315 msm_gpio_irq_extn.irq_disable(d);
316}
317
318static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
319{
320 int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
321 unsigned long irq_flags;
322
323 spin_lock_irqsave(&tlmm_lock, irq_flags);
324
325 if (flow_type & IRQ_TYPE_EDGE_BOTH) {
326 __irq_set_handler_locked(d->irq, handle_edge_irq);
327 if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
328 __set_bit(gpio, msm_gpio.dual_edge_irqs);
329 else
330 __clear_bit(gpio, msm_gpio.dual_edge_irqs);
331 } else {
332 __irq_set_handler_locked(d->irq, handle_level_irq);
333 __clear_bit(gpio, msm_gpio.dual_edge_irqs);
334 }
335
336 __msm_gpio_set_intr_cfg_type(gpio, flow_type);
337
338 if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
339 msm_gpio_update_dual_edge_pos(d, gpio);
340
341 mb();
342 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
343
344 if (msm_gpio_irq_extn.irq_set_type)
345 msm_gpio_irq_extn.irq_set_type(d, flow_type);
346
347 return 0;
348}
349
350/*
351 * When the summary IRQ is raised, any number of GPIO lines may be high.
352 * It is the job of the summary handler to find all those GPIO lines
353 * which have been set as summary IRQ lines and which are triggered,
354 * and to call their interrupt handlers.
355 */
356static irqreturn_t msm_summary_irq_handler(int irq, void *data)
357{
358 unsigned long i;
359 struct irq_desc *desc = irq_to_desc(irq);
360 struct irq_chip *chip = irq_desc_get_chip(desc);
361
362 chained_irq_enter(chip, desc);
363
364 for (i = find_first_bit(msm_gpio.enabled_irqs, NR_MSM_GPIOS);
365 i < NR_MSM_GPIOS;
366 i = find_next_bit(msm_gpio.enabled_irqs, NR_MSM_GPIOS, i + 1)) {
367 if (__msm_gpio_get_intr_status(i))
368 generic_handle_irq(msm_gpio_to_irq(&msm_gpio.gpio_chip,
369 i));
370 }
371
372 chained_irq_exit(chip, desc);
373 return IRQ_HANDLED;
374}
375
376static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
377{
378 int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
379
380 if (on) {
381 if (bitmap_empty(msm_gpio.wake_irqs, NR_MSM_GPIOS))
382 irq_set_irq_wake(TLMM_MSM_SUMMARY_IRQ, 1);
383 set_bit(gpio, msm_gpio.wake_irqs);
384 } else {
385 clear_bit(gpio, msm_gpio.wake_irqs);
386 if (bitmap_empty(msm_gpio.wake_irqs, NR_MSM_GPIOS))
387 irq_set_irq_wake(TLMM_MSM_SUMMARY_IRQ, 0);
388 }
389
390 if (msm_gpio_irq_extn.irq_set_wake)
391 msm_gpio_irq_extn.irq_set_wake(d, on);
392
393 return 0;
394}
395
396static struct irq_chip msm_gpio_irq_chip = {
397 .name = "msmgpio",
398 .irq_mask = msm_gpio_irq_mask,
399 .irq_unmask = msm_gpio_irq_unmask,
400 .irq_ack = msm_gpio_irq_ack,
401 .irq_set_type = msm_gpio_irq_set_type,
402 .irq_set_wake = msm_gpio_irq_set_wake,
403 .irq_disable = msm_gpio_irq_disable,
404};
405
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -0700406#ifdef CONFIG_PM
407static int msm_gpio_suspend(void)
408{
409 unsigned long irq_flags;
410 unsigned long i;
411
412 spin_lock_irqsave(&tlmm_lock, irq_flags);
413 for_each_set_bit(i, msm_gpio.enabled_irqs, NR_MSM_GPIOS)
414 __msm_gpio_set_intr_cfg_enable(i, 0);
415
416 for_each_set_bit(i, msm_gpio.wake_irqs, NR_MSM_GPIOS)
417 __msm_gpio_set_intr_cfg_enable(i, 1);
418 mb();
419 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
420 return 0;
421}
422
423void msm_gpio_show_resume_irq(void)
424{
425 unsigned long irq_flags;
426 int i, irq, intstat;
427
428 if (!msm_show_resume_irq_mask)
429 return;
430
431 spin_lock_irqsave(&tlmm_lock, irq_flags);
432 for_each_set_bit(i, msm_gpio.wake_irqs, NR_MSM_GPIOS) {
433 intstat = __msm_gpio_get_intr_status(i);
434 if (intstat) {
435 irq = msm_gpio_to_irq(&msm_gpio.gpio_chip, i);
436 pr_warning("%s: %d triggered\n",
437 __func__, irq);
438 }
439 }
440 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
441}
442
443static void msm_gpio_resume(void)
444{
445 unsigned long irq_flags;
446 unsigned long i;
447
448 msm_gpio_show_resume_irq();
449
450 spin_lock_irqsave(&tlmm_lock, irq_flags);
451 for_each_set_bit(i, msm_gpio.wake_irqs, NR_MSM_GPIOS)
452 __msm_gpio_set_intr_cfg_enable(i, 0);
453
454 for_each_set_bit(i, msm_gpio.enabled_irqs, NR_MSM_GPIOS)
455 __msm_gpio_set_intr_cfg_enable(i, 1);
456 mb();
457 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
458}
459#else
460#define msm_gpio_suspend NULL
461#define msm_gpio_resume NULL
462#endif
463
464static struct syscore_ops msm_gpio_syscore_ops = {
465 .suspend = msm_gpio_suspend,
466 .resume = msm_gpio_resume,
467};
468
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -0700469static void msm_tlmm_set_field(const struct tlmm_field_cfg *configs,
470 unsigned id, unsigned width, unsigned val)
471{
472 unsigned long irqflags;
473 u32 mask = (1 << width) - 1;
474 u32 __iomem *reg = MSM_TLMM_BASE + configs[id].reg;
475 u32 reg_val;
476
477 spin_lock_irqsave(&tlmm_lock, irqflags);
478 reg_val = __raw_readl(reg);
479 reg_val &= ~(mask << configs[id].off);
480 reg_val |= (val & mask) << configs[id].off;
481 __raw_writel(reg_val, reg);
482 mb();
483 spin_unlock_irqrestore(&tlmm_lock, irqflags);
484}
485
486void msm_tlmm_set_hdrive(enum msm_tlmm_hdrive_tgt tgt, int drv_str)
487{
488 msm_tlmm_set_field(tlmm_hdrv_cfgs, tgt, 3, drv_str);
489}
490EXPORT_SYMBOL(msm_tlmm_set_hdrive);
491
492void msm_tlmm_set_pull(enum msm_tlmm_pull_tgt tgt, int pull)
493{
494 msm_tlmm_set_field(tlmm_pull_cfgs, tgt, 2, pull);
495}
496EXPORT_SYMBOL(msm_tlmm_set_pull);
497
498int gpio_tlmm_config(unsigned config, unsigned disable)
499{
500 unsigned gpio = GPIO_PIN(config);
501
502 if (gpio > NR_MSM_GPIOS)
503 return -EINVAL;
504
505 __gpio_tlmm_config(config);
506 mb();
507
508 return 0;
509}
510EXPORT_SYMBOL(gpio_tlmm_config);
511
512int msm_gpio_install_direct_irq(unsigned gpio, unsigned irq,
513 unsigned int input_polarity)
514{
515 unsigned long irq_flags;
516
517 if (gpio >= NR_MSM_GPIOS || irq >= NR_TLMM_MSM_DIR_CONN_IRQ)
518 return -EINVAL;
519
520 spin_lock_irqsave(&tlmm_lock, irq_flags);
521 __msm_gpio_install_direct_irq(gpio, irq, input_polarity);
522 mb();
523 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
524
525 return 0;
526}
527EXPORT_SYMBOL(msm_gpio_install_direct_irq);
528
Rohit Vaswanib1cc4932012-07-23 21:30:11 -0700529/*
530 * This lock class tells lockdep that GPIO irqs are in a different
531 * category than their parent, so it won't report false recursion.
532 */
533static struct lock_class_key msm_gpio_lock_class;
534
535static int __devinit msm_gpio_probe(struct platform_device *pdev)
536{
537 int ret;
538#ifndef CONFIG_OF
539 int irq, i;
540#endif
541 msm_gpio.gpio_chip.dev = &pdev->dev;
542 spin_lock_init(&tlmm_lock);
543 bitmap_zero(msm_gpio.enabled_irqs, NR_MSM_GPIOS);
544 bitmap_zero(msm_gpio.wake_irqs, NR_MSM_GPIOS);
545 bitmap_zero(msm_gpio.dual_edge_irqs, NR_MSM_GPIOS);
546 ret = gpiochip_add(&msm_gpio.gpio_chip);
547 if (ret < 0)
548 return ret;
549
550#ifndef CONFIG_OF
551 for (i = 0; i < msm_gpio.gpio_chip.ngpio; ++i) {
552 irq = msm_gpio_to_irq(&msm_gpio.gpio_chip, i);
553 irq_set_lockdep_class(irq, &msm_gpio_lock_class);
554 irq_set_chip_and_handler(irq, &msm_gpio_irq_chip,
555 handle_level_irq);
556 set_irq_flags(irq, IRQF_VALID);
557 }
558#endif
559 ret = request_irq(TLMM_MSM_SUMMARY_IRQ, msm_summary_irq_handler,
560 IRQF_TRIGGER_HIGH, "msmgpio", NULL);
561 if (ret) {
562 pr_err("Request_irq failed for TLMM_MSM_SUMMARY_IRQ - %d\n",
563 ret);
564 return ret;
565 }
566 register_syscore_ops(&msm_gpio_syscore_ops);
567 return 0;
568}
569
570#ifdef CONFIG_OF
571static struct of_device_id msm_gpio_of_match[] __devinitdata = {
572 {.compatible = "qcom,msm-gpio", },
573 { },
574};
575#endif
576
577static int __devexit msm_gpio_remove(struct platform_device *pdev)
578{
579 int ret;
580
581 unregister_syscore_ops(&msm_gpio_syscore_ops);
582 ret = gpiochip_remove(&msm_gpio.gpio_chip);
583 if (ret < 0)
584 return ret;
585 irq_set_handler(TLMM_MSM_SUMMARY_IRQ, NULL);
586
587 return 0;
588}
589
590static struct platform_driver msm_gpio_driver = {
591 .probe = msm_gpio_probe,
592 .remove = __devexit_p(msm_gpio_remove),
593 .driver = {
594 .name = "msmgpio",
595 .owner = THIS_MODULE,
596 .of_match_table = of_match_ptr(msm_gpio_of_match),
597 },
598};
599
600static void __exit msm_gpio_exit(void)
601{
602 platform_driver_unregister(&msm_gpio_driver);
603}
604module_exit(msm_gpio_exit);
605
606static int __init msm_gpio_init(void)
607{
608 return platform_driver_register(&msm_gpio_driver);
609}
610postcore_initcall(msm_gpio_init);
611
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -0700612#ifdef CONFIG_OF
Michael Bohanbb6b30f2012-06-01 13:33:51 -0700613static int msm_gpio_irq_domain_xlate(struct irq_domain *d,
614 struct device_node *controller,
615 const u32 *intspec,
616 unsigned int intsize,
617 unsigned long *out_hwirq,
618 unsigned int *out_type)
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -0700619{
620 if (d->of_node != controller)
621 return -EINVAL;
622 if (intsize != 2)
623 return -EINVAL;
624
625 /* hwirq value */
626 *out_hwirq = intspec[0];
627
628 /* irq flags */
629 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
630 return 0;
631}
632
Michael Bohan27e7b942012-07-06 10:25:30 -0700633static int msm_gpio_irq_domain_map(struct irq_domain *d, unsigned int irq,
634 irq_hw_number_t hwirq)
Michael Bohanbb6b30f2012-06-01 13:33:51 -0700635{
Michael Bohan27e7b942012-07-06 10:25:30 -0700636 irq_set_lockdep_class(irq, &msm_gpio_lock_class);
637 irq_set_chip_and_handler(irq, &msm_gpio_irq_chip,
638 handle_level_irq);
639 set_irq_flags(irq, IRQF_VALID);
640
Michael Bohanbb6b30f2012-06-01 13:33:51 -0700641 return 0;
642}
643
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -0700644static struct irq_domain_ops msm_gpio_irq_domain_ops = {
Michael Bohanbb6b30f2012-06-01 13:33:51 -0700645 .xlate = msm_gpio_irq_domain_xlate,
646 .map = msm_gpio_irq_domain_map,
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -0700647};
648
649int __init msm_gpio_of_init(struct device_node *node,
650 struct device_node *parent)
651{
Michael Bohanbb6b30f2012-06-01 13:33:51 -0700652 msm_gpio.domain = irq_domain_add_linear(node, NR_MSM_GPIOS,
653 &msm_gpio_irq_domain_ops, &msm_gpio);
654 if (!msm_gpio.domain) {
655 WARN(1, "Cannot allocate irq_domain\n");
656 return -ENOMEM;
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -0700657 }
658
Sathish Ambleyd2ad0fa2012-03-23 11:23:47 -0700659 return 0;
660}
661#endif
662
663MODULE_AUTHOR("Gregory Bean <gbean@codeaurora.org>");
664MODULE_DESCRIPTION("Driver for Qualcomm MSM TLMMv2 SoC GPIOs");
665MODULE_LICENSE("GPL v2");
666MODULE_ALIAS("sysdev:msmgpio");