blob: 1f85194fdc548dd9ab9d528da8a905c4a7413189 [file] [log] [blame]
Praveen Chidambaramc0750ca2012-01-08 10:03:28 -07001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/delay.h>
17#include <linux/init.h>
18#include <linux/io.h>
19#include <linux/slab.h>
20#include <mach/msm_iomap.h>
Praveen Chidambaram76679d42011-12-16 14:19:02 -070021#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include "spm.h"
23#include "spm_driver.h"
24
25struct msm_spm_power_modes {
26 uint32_t mode;
27 bool notify_rpm;
28 uint32_t start_addr;
29
30};
31
32struct msm_spm_device {
33 struct msm_spm_driver_data reg_data;
34 struct msm_spm_power_modes *modes;
35 uint32_t num_modes;
36};
37
38static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_spm_device, msm_cpu_spm_device);
39static atomic_t msm_spm_set_vdd_x_cpu_allowed = ATOMIC_INIT(1);
40
41void msm_spm_allow_x_cpu_set_vdd(bool allowed)
42{
43 atomic_set(&msm_spm_set_vdd_x_cpu_allowed, allowed ? 1 : 0);
44}
45
46int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel)
47{
48 unsigned long flags;
49 struct msm_spm_device *dev;
50 int ret = -EIO;
51
52 local_irq_save(flags);
53 if (!atomic_read(&msm_spm_set_vdd_x_cpu_allowed) &&
54 unlikely(smp_processor_id() != cpu)) {
55 goto set_vdd_x_cpu_bail;
56 }
57
58 dev = &per_cpu(msm_cpu_spm_device, cpu);
59 ret = msm_spm_drv_set_vdd(&dev->reg_data, vlevel);
60
61set_vdd_x_cpu_bail:
62 local_irq_restore(flags);
63 return ret;
64}
65
66static int msm_spm_dev_set_low_power_mode(struct msm_spm_device *dev,
67 unsigned int mode, bool notify_rpm)
68{
69 uint32_t i;
70 uint32_t start_addr = 0;
71 int ret = -EINVAL;
72
73 if (mode == MSM_SPM_MODE_DISABLED) {
74 ret = msm_spm_drv_set_spm_enable(&dev->reg_data, false);
75 } else if (!msm_spm_drv_set_spm_enable(&dev->reg_data, true)) {
76 for (i = 0; i < dev->num_modes; i++) {
77 if ((dev->modes[i].mode == mode) &&
78 (dev->modes[i].notify_rpm == notify_rpm)) {
79 start_addr = dev->modes[i].start_addr;
80 break;
81 }
82 }
83 ret = msm_spm_drv_set_low_power_mode(&dev->reg_data,
84 start_addr);
85 }
86 return ret;
87}
88
89static int __init msm_spm_dev_init(struct msm_spm_device *dev,
90 struct msm_spm_platform_data *data)
91{
92 int i, ret = -ENOMEM;
93 uint32_t offset = 0;
94
95 dev->num_modes = data->num_modes;
96 dev->modes = kmalloc(
97 sizeof(struct msm_spm_power_modes) * dev->num_modes,
98 GFP_KERNEL);
99
100 if (!dev->modes)
101 goto spm_failed_malloc;
102
103 ret = msm_spm_drv_init(&dev->reg_data, data);
104
105 if (ret)
106 goto spm_failed_init;
107
108 for (i = 0; i < dev->num_modes; i++) {
109
110 ret = msm_spm_drv_write_seq_data(&dev->reg_data,
111 data->modes[i].cmd, offset);
112 if (ret < 0)
113 goto spm_failed_init;
114
115 dev->modes[i].mode = data->modes[i].mode;
116 dev->modes[i].notify_rpm = data->modes[i].notify_rpm;
117 dev->modes[i].start_addr = offset;
118 offset += ret;
119 }
120 msm_spm_drv_flush_seq_entry(&dev->reg_data);
121 return 0;
122
123spm_failed_init:
124 kfree(dev->modes);
125spm_failed_malloc:
126 return ret;
127}
128
129int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm)
130{
131 struct msm_spm_device *dev = &__get_cpu_var(msm_cpu_spm_device);
132 return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm);
133}
134
135int __init msm_spm_init(struct msm_spm_platform_data *data, int nr_devs)
136{
137 unsigned int cpu;
138 int ret = 0;
139
140 BUG_ON((nr_devs < num_possible_cpus()) || !data);
141
142 for_each_possible_cpu(cpu) {
143 struct msm_spm_device *dev = &per_cpu(msm_cpu_spm_device, cpu);
144 ret = msm_spm_dev_init(dev, &data[cpu]);
145 if (ret < 0) {
146 pr_warn("%s():failed CPU:%u ret:%d\n", __func__,
147 cpu, ret);
148 break;
149 }
150 }
151
152 return ret;
153}
154
Praveen Chidambaramc0750ca2012-01-08 10:03:28 -0700155int msm_spm_turn_on_cpu_rail(unsigned int cpu)
156{
157 uint32_t val = 0;
158 uint32_t timeout = 0;
159 void *reg = NULL;
Stepan Moskovchenko2b0b06e2012-02-03 15:03:52 -0800160 void *saw_bases[] = {
161 0,
162 MSM_SAW1_BASE,
163 MSM_SAW2_BASE,
164 MSM_SAW3_BASE
165 };
Praveen Chidambaramc0750ca2012-01-08 10:03:28 -0700166
Stepan Moskovchenko2b0b06e2012-02-03 15:03:52 -0800167 if (cpu == 0 || cpu >= num_possible_cpus())
Praveen Chidambaramc0750ca2012-01-08 10:03:28 -0700168 return -EINVAL;
169
Stepan Moskovchenko2b0b06e2012-02-03 15:03:52 -0800170 reg = saw_bases[cpu];
Praveen Chidambaramc0750ca2012-01-08 10:03:28 -0700171
Mahesh Sivasubramaniancd91c8f2012-03-07 16:59:05 -0700172 if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_apq8064()) {
Stepan Moskovchenko2b0b06e2012-02-03 15:03:52 -0800173 val = 0xA4;
174 reg += 0x14;
175 timeout = 512;
Praveen Chidambaramc0750ca2012-01-08 10:03:28 -0700176 } else {
177 return -ENOSYS;
178 }
179
180 writel_relaxed(val, reg);
181 mb();
182 udelay(timeout);
183
184 return 0;
185}
186EXPORT_SYMBOL(msm_spm_turn_on_cpu_rail);
187
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188#if defined(CONFIG_MSM_L2_SPM)
189static struct msm_spm_device msm_spm_l2_device;
190
191int msm_spm_l2_set_low_power_mode(unsigned int mode, bool notify_rpm)
192{
193 return msm_spm_dev_set_low_power_mode(
194 &msm_spm_l2_device, mode, notify_rpm);
195}
196
197int __init msm_spm_l2_init(struct msm_spm_platform_data *data)
198{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700199 return msm_spm_dev_init(&msm_spm_l2_device, data);
200}
201#endif