blob: 4654fba0b6fb1a6664d1a6df7ac3dee4009933e9 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/delay.h>
17#include <linux/init.h>
18#include <linux/io.h>
19#include <mach/msm_iomap.h>
20
21#include "spm.h"
22
23
24enum {
25 MSM_SPM_DEBUG_SHADOW = 1U << 0,
26 MSM_SPM_DEBUG_VCTL = 1U << 1,
27};
28
29static int msm_spm_debug_mask;
30module_param_named(
31 debug_mask, msm_spm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
32);
33
34#define MSM_SPM_PMIC_STATE_IDLE 0
35
36static uint32_t msm_spm_reg_offsets[MSM_SPM_REG_NR] = {
37 [MSM_SPM_REG_SAW_AVS_CTL] = 0x04,
38
39 [MSM_SPM_REG_SAW_VCTL] = 0x08,
40 [MSM_SPM_REG_SAW_STS] = 0x0C,
41 [MSM_SPM_REG_SAW_CFG] = 0x10,
42
43 [MSM_SPM_REG_SAW_SPM_CTL] = 0x14,
44 [MSM_SPM_REG_SAW_SPM_SLP_TMR_DLY] = 0x18,
45 [MSM_SPM_REG_SAW_SPM_WAKE_TMR_DLY] = 0x1C,
46
47 [MSM_SPM_REG_SAW_SPM_PMIC_CTL] = 0x20,
48 [MSM_SPM_REG_SAW_SLP_CLK_EN] = 0x24,
49 [MSM_SPM_REG_SAW_SLP_HSFS_PRECLMP_EN] = 0x28,
50 [MSM_SPM_REG_SAW_SLP_HSFS_POSTCLMP_EN] = 0x2C,
51
52 [MSM_SPM_REG_SAW_SLP_CLMP_EN] = 0x30,
53 [MSM_SPM_REG_SAW_SLP_RST_EN] = 0x34,
54 [MSM_SPM_REG_SAW_SPM_MPM_CFG] = 0x38,
55};
56
57struct msm_spm_device {
58 void __iomem *reg_base_addr;
59 uint32_t reg_shadow[MSM_SPM_REG_NR];
60
61 uint8_t awake_vlevel;
62 uint8_t retention_vlevel;
63 uint8_t collapse_vlevel;
64 uint8_t retention_mid_vlevel;
65 uint8_t collapse_mid_vlevel;
66
67 uint32_t vctl_timeout_us;
68
69 unsigned int low_power_mode;
70 bool notify_rpm;
71 bool dirty;
72};
73
74static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_spm_device, msm_spm_devices);
75static atomic_t msm_spm_set_vdd_x_cpu_allowed = ATOMIC_INIT(1);
76
77/******************************************************************************
78 * Internal helper functions
79 *****************************************************************************/
80
81static inline void msm_spm_set_vctl(
82 struct msm_spm_device *dev, uint32_t vlevel)
83{
84 dev->reg_shadow[MSM_SPM_REG_SAW_VCTL] &= ~0xFF;
85 dev->reg_shadow[MSM_SPM_REG_SAW_VCTL] |= vlevel;
86}
87
88static inline void msm_spm_set_spm_ctl(struct msm_spm_device *dev,
89 uint32_t rpm_bypass, uint32_t mode_encoding)
90{
91 dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL] &= ~0x0F;
92 dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL] |= rpm_bypass << 3;
93 dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL] |= mode_encoding;
94}
95
96static inline void msm_spm_set_pmic_ctl(struct msm_spm_device *dev,
97 uint32_t awake_vlevel, uint32_t mid_vlevel, uint32_t sleep_vlevel)
98{
99 dev->reg_shadow[MSM_SPM_REG_SAW_SPM_PMIC_CTL] =
100 (mid_vlevel << 16) | (awake_vlevel << 8) | (sleep_vlevel);
101}
102
103static inline void msm_spm_set_slp_rst_en(
104 struct msm_spm_device *dev, uint32_t slp_rst_en)
105{
106 dev->reg_shadow[MSM_SPM_REG_SAW_SLP_RST_EN] = slp_rst_en;
107}
108
109static inline void msm_spm_flush_shadow(
110 struct msm_spm_device *dev, unsigned int reg_index)
111{
112 __raw_writel(dev->reg_shadow[reg_index],
113 dev->reg_base_addr + msm_spm_reg_offsets[reg_index]);
114}
115
116static inline void msm_spm_load_shadow(
117 struct msm_spm_device *dev, unsigned int reg_index)
118{
119 dev->reg_shadow[reg_index] = __raw_readl(dev->reg_base_addr +
120 msm_spm_reg_offsets[reg_index]);
121}
122
123static inline uint32_t msm_spm_get_sts_pmic_state(struct msm_spm_device *dev)
124{
125 return (dev->reg_shadow[MSM_SPM_REG_SAW_STS] >> 20) & 0x03;
126}
127
128static inline uint32_t msm_spm_get_sts_curr_pmic_data(
129 struct msm_spm_device *dev)
130{
131 return (dev->reg_shadow[MSM_SPM_REG_SAW_STS] >> 10) & 0xFF;
132}
133
134/******************************************************************************
135 * Public functions
136 *****************************************************************************/
137int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm)
138{
139 struct msm_spm_device *dev = &__get_cpu_var(msm_spm_devices);
140 uint32_t rpm_bypass = notify_rpm ? 0x00 : 0x01;
141
142 if (mode == dev->low_power_mode && notify_rpm == dev->notify_rpm
143 && !dev->dirty)
144 return 0;
145
146 switch (mode) {
147 case MSM_SPM_MODE_CLOCK_GATING:
148 msm_spm_set_spm_ctl(dev, rpm_bypass, 0x00);
149 msm_spm_set_slp_rst_en(dev, 0x00);
150 break;
151
152 case MSM_SPM_MODE_POWER_RETENTION:
153 msm_spm_set_spm_ctl(dev, rpm_bypass, 0x02);
154 msm_spm_set_pmic_ctl(dev, dev->awake_vlevel,
155 dev->retention_mid_vlevel, dev->retention_vlevel);
156 msm_spm_set_slp_rst_en(dev, 0x00);
157 break;
158
159 case MSM_SPM_MODE_POWER_COLLAPSE:
160 msm_spm_set_spm_ctl(dev, rpm_bypass, 0x02);
161 msm_spm_set_pmic_ctl(dev, dev->awake_vlevel,
162 dev->collapse_mid_vlevel, dev->collapse_vlevel);
163 msm_spm_set_slp_rst_en(dev, 0x01);
164 break;
165
166 default:
167 BUG();
168 }
169
170 msm_spm_flush_shadow(dev, MSM_SPM_REG_SAW_SPM_CTL);
171 msm_spm_flush_shadow(dev, MSM_SPM_REG_SAW_SPM_PMIC_CTL);
172 msm_spm_flush_shadow(dev, MSM_SPM_REG_SAW_SLP_RST_EN);
173 /* Ensure that the registers are written before returning */
174 mb();
175
176 dev->low_power_mode = mode;
177 dev->notify_rpm = notify_rpm;
178 dev->dirty = false;
179
180 if (msm_spm_debug_mask & MSM_SPM_DEBUG_SHADOW) {
181 int i;
182 for (i = 0; i < MSM_SPM_REG_NR; i++)
183 pr_info("%s: reg %02x = 0x%08x\n", __func__,
184 msm_spm_reg_offsets[i], dev->reg_shadow[i]);
185 }
186
187 return 0;
188}
189
190int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel)
191{
192 unsigned long flags;
193 struct msm_spm_device *dev;
194 uint32_t timeout_us;
195
196 local_irq_save(flags);
197
198 if (!atomic_read(&msm_spm_set_vdd_x_cpu_allowed) &&
199 unlikely(smp_processor_id() != cpu)) {
200 if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL)
201 pr_info("%s: attempting to set vdd of cpu %u from "
202 "cpu %u\n", __func__, cpu, smp_processor_id());
203 goto set_vdd_x_cpu_bail;
204 }
205
206 dev = &per_cpu(msm_spm_devices, cpu);
207
208 if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL)
209 pr_info("%s: requesting cpu %u vlevel 0x%x\n",
210 __func__, cpu, vlevel);
211
212 msm_spm_set_vctl(dev, vlevel);
213 msm_spm_flush_shadow(dev, MSM_SPM_REG_SAW_VCTL);
214
215 /* Wait for PMIC state to return to idle or until timeout */
216 timeout_us = dev->vctl_timeout_us;
217 msm_spm_load_shadow(dev, MSM_SPM_REG_SAW_STS);
218 while (msm_spm_get_sts_pmic_state(dev) != MSM_SPM_PMIC_STATE_IDLE) {
219 if (!timeout_us)
220 goto set_vdd_bail;
221
222 if (timeout_us > 10) {
223 udelay(10);
224 timeout_us -= 10;
225 } else {
226 udelay(timeout_us);
227 timeout_us = 0;
228 }
229 msm_spm_load_shadow(dev, MSM_SPM_REG_SAW_STS);
230 }
231
232 if (msm_spm_get_sts_curr_pmic_data(dev) != vlevel)
233 goto set_vdd_bail;
234
235 dev->awake_vlevel = vlevel;
236 dev->dirty = true;
237
238 if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL)
239 pr_info("%s: cpu %u done, remaining timeout %uus\n",
240 __func__, cpu, timeout_us);
241
242 local_irq_restore(flags);
243 return 0;
244
245set_vdd_bail:
246 pr_err("%s: cpu %u failed, remaining timeout %uus, vlevel 0x%x\n",
247 __func__, cpu, timeout_us, msm_spm_get_sts_curr_pmic_data(dev));
248
249set_vdd_x_cpu_bail:
250 local_irq_restore(flags);
251 return -EIO;
252}
253
254void msm_spm_reinit(void)
255{
256 struct msm_spm_device *dev = &__get_cpu_var(msm_spm_devices);
257 int i;
258
259 for (i = 0; i < MSM_SPM_REG_NR_INITIALIZE; i++)
260 msm_spm_flush_shadow(dev, i);
261
262 /* Ensure that the registers are written before returning */
263 mb();
264}
265
266void msm_spm_allow_x_cpu_set_vdd(bool allowed)
267{
268 atomic_set(&msm_spm_set_vdd_x_cpu_allowed, allowed ? 1 : 0);
269}
270
271int __init msm_spm_init(struct msm_spm_platform_data *data, int nr_devs)
272{
273 unsigned int cpu;
274
275 BUG_ON(nr_devs < num_possible_cpus());
276 for_each_possible_cpu(cpu) {
277 struct msm_spm_device *dev = &per_cpu(msm_spm_devices, cpu);
278 int i;
279
280 dev->reg_base_addr = data[cpu].reg_base_addr;
281 memcpy(dev->reg_shadow, data[cpu].reg_init_values,
282 sizeof(data[cpu].reg_init_values));
283
284 dev->awake_vlevel = data[cpu].awake_vlevel;
285 dev->retention_vlevel = data[cpu].retention_vlevel;
286 dev->collapse_vlevel = data[cpu].collapse_vlevel;
287 dev->retention_mid_vlevel = data[cpu].retention_mid_vlevel;
288 dev->collapse_mid_vlevel = data[cpu].collapse_mid_vlevel;
289 dev->vctl_timeout_us = data[cpu].vctl_timeout_us;
290
291 for (i = 0; i < MSM_SPM_REG_NR_INITIALIZE; i++)
292 msm_spm_flush_shadow(dev, i);
293
294 /* Ensure that the registers are written before returning */
295 mb();
296
297 dev->low_power_mode = MSM_SPM_MODE_CLOCK_GATING;
298 dev->notify_rpm = false;
299 dev->dirty = true;
300 }
301
302 return 0;
303}