Daniel Lezcano | 3ebabaa | 2012-04-19 14:46:32 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2012 Linaro : Daniel Lezcano <daniel.lezcano@linaro.org> (IBM) |
| 3 | * |
| 4 | * Based on the work of Rickard Andersson <rickard.andersson@stericsson.com> |
| 5 | * and Jonas Aaberg <jonas.aberg@stericsson.com>. |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/cpuidle.h> |
| 14 | #include <linux/clockchips.h> |
| 15 | #include <linux/spinlock.h> |
| 16 | #include <linux/atomic.h> |
| 17 | #include <linux/smp.h> |
| 18 | #include <linux/mfd/dbx500-prcmu.h> |
| 19 | |
| 20 | #include <asm/cpuidle.h> |
| 21 | #include <asm/proc-fns.h> |
| 22 | |
| 23 | static atomic_t master = ATOMIC_INIT(0); |
| 24 | static DEFINE_SPINLOCK(master_lock); |
| 25 | static DEFINE_PER_CPU(struct cpuidle_device, ux500_cpuidle_device); |
| 26 | |
| 27 | static inline int ux500_enter_idle(struct cpuidle_device *dev, |
| 28 | struct cpuidle_driver *drv, int index) |
| 29 | { |
| 30 | int this_cpu = smp_processor_id(); |
| 31 | bool recouple = false; |
| 32 | |
| 33 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &this_cpu); |
| 34 | |
| 35 | if (atomic_inc_return(&master) == num_online_cpus()) { |
| 36 | |
| 37 | /* With this lock, we prevent the other cpu to exit and enter |
| 38 | * this function again and become the master */ |
| 39 | if (!spin_trylock(&master_lock)) |
| 40 | goto wfi; |
| 41 | |
| 42 | /* decouple the gic from the A9 cores */ |
Steve Zhan | 5cc2366 | 2013-01-23 11:24:47 +0100 | [diff] [blame] | 43 | if (prcmu_gic_decouple()) { |
| 44 | spin_unlock(&master_lock); |
Daniel Lezcano | 3ebabaa | 2012-04-19 14:46:32 +0200 | [diff] [blame] | 45 | goto out; |
Steve Zhan | 5cc2366 | 2013-01-23 11:24:47 +0100 | [diff] [blame] | 46 | } |
Daniel Lezcano | 3ebabaa | 2012-04-19 14:46:32 +0200 | [diff] [blame] | 47 | |
| 48 | /* If an error occur, we will have to recouple the gic |
| 49 | * manually */ |
| 50 | recouple = true; |
| 51 | |
| 52 | /* At this state, as the gic is decoupled, if the other |
| 53 | * cpu is in WFI, we have the guarantee it won't be wake |
| 54 | * up, so we can safely go to retention */ |
| 55 | if (!prcmu_is_cpu_in_wfi(this_cpu ? 0 : 1)) |
| 56 | goto out; |
| 57 | |
| 58 | /* The prcmu will be in charge of watching the interrupts |
| 59 | * and wake up the cpus */ |
| 60 | if (prcmu_copy_gic_settings()) |
| 61 | goto out; |
| 62 | |
| 63 | /* Check in the meantime an interrupt did |
| 64 | * not occur on the gic ... */ |
| 65 | if (prcmu_gic_pending_irq()) |
| 66 | goto out; |
| 67 | |
| 68 | /* ... and the prcmu */ |
| 69 | if (prcmu_pending_irq()) |
| 70 | goto out; |
| 71 | |
| 72 | /* Go to the retention state, the prcmu will wait for the |
| 73 | * cpu to go WFI and this is what happens after exiting this |
| 74 | * 'master' critical section */ |
| 75 | if (prcmu_set_power_state(PRCMU_AP_IDLE, true, true)) |
| 76 | goto out; |
| 77 | |
| 78 | /* When we switch to retention, the prcmu is in charge |
| 79 | * of recoupling the gic automatically */ |
| 80 | recouple = false; |
| 81 | |
| 82 | spin_unlock(&master_lock); |
| 83 | } |
| 84 | wfi: |
| 85 | cpu_do_idle(); |
| 86 | out: |
| 87 | atomic_dec(&master); |
| 88 | |
| 89 | if (recouple) { |
| 90 | prcmu_gic_recouple(); |
| 91 | spin_unlock(&master_lock); |
| 92 | } |
| 93 | |
| 94 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &this_cpu); |
| 95 | |
| 96 | return index; |
| 97 | } |
| 98 | |
| 99 | static struct cpuidle_driver ux500_idle_driver = { |
| 100 | .name = "ux500_idle", |
| 101 | .owner = THIS_MODULE, |
| 102 | .en_core_tk_irqen = 1, |
| 103 | .states = { |
| 104 | ARM_CPUIDLE_WFI_STATE, |
| 105 | { |
| 106 | .enter = ux500_enter_idle, |
| 107 | .exit_latency = 70, |
| 108 | .target_residency = 260, |
| 109 | .flags = CPUIDLE_FLAG_TIME_VALID, |
| 110 | .name = "ApIdle", |
| 111 | .desc = "ARM Retention", |
| 112 | }, |
| 113 | }, |
| 114 | .safe_state_index = 0, |
| 115 | .state_count = 2, |
| 116 | }; |
| 117 | |
| 118 | /* |
| 119 | * For each cpu, setup the broadcast timer because we will |
| 120 | * need to migrate the timers for the states >= ApIdle. |
| 121 | */ |
| 122 | static void ux500_setup_broadcast_timer(void *arg) |
| 123 | { |
| 124 | int cpu = smp_processor_id(); |
| 125 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu); |
| 126 | } |
| 127 | |
| 128 | int __init ux500_idle_init(void) |
| 129 | { |
| 130 | int ret, cpu; |
| 131 | struct cpuidle_device *device; |
| 132 | |
| 133 | /* Configure wake up reasons */ |
| 134 | prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) | |
| 135 | PRCMU_WAKEUP(ABB)); |
| 136 | |
| 137 | /* |
| 138 | * Configure the timer broadcast for each cpu, that must |
| 139 | * be done from the cpu context, so we use a smp cross |
| 140 | * call with 'on_each_cpu'. |
| 141 | */ |
| 142 | on_each_cpu(ux500_setup_broadcast_timer, NULL, 1); |
| 143 | |
| 144 | ret = cpuidle_register_driver(&ux500_idle_driver); |
| 145 | if (ret) { |
| 146 | printk(KERN_ERR "failed to register ux500 idle driver\n"); |
| 147 | return ret; |
| 148 | } |
| 149 | |
| 150 | for_each_online_cpu(cpu) { |
| 151 | device = &per_cpu(ux500_cpuidle_device, cpu); |
| 152 | device->cpu = cpu; |
| 153 | ret = cpuidle_register_device(device); |
| 154 | if (ret) { |
| 155 | printk(KERN_ERR "Failed to register cpuidle " |
| 156 | "device for cpu%d\n", cpu); |
| 157 | goto out_unregister; |
| 158 | } |
| 159 | } |
| 160 | out: |
| 161 | return ret; |
| 162 | |
| 163 | out_unregister: |
| 164 | for_each_online_cpu(cpu) { |
| 165 | device = &per_cpu(ux500_cpuidle_device, cpu); |
| 166 | cpuidle_unregister_device(device); |
| 167 | } |
| 168 | |
| 169 | cpuidle_unregister_driver(&ux500_idle_driver); |
| 170 | goto out; |
| 171 | } |
| 172 | |
| 173 | device_initcall(ux500_idle_init); |