blob: 326faefb4df93dedaae1c2c4ef16deee708e682b [file] [log] [blame]
Praveen Chidambaramc0750ca2012-01-08 10:03:28 -07001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/delay.h>
17#include <linux/init.h>
18#include <linux/io.h>
19#include <linux/slab.h>
20#include <mach/msm_iomap.h>
Praveen Chidambaram76679d42011-12-16 14:19:02 -070021#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022
23#include "spm.h"
24#include "spm_driver.h"
25
26struct msm_spm_power_modes {
27 uint32_t mode;
28 bool notify_rpm;
29 uint32_t start_addr;
30
31};
32
33struct msm_spm_device {
34 struct msm_spm_driver_data reg_data;
35 struct msm_spm_power_modes *modes;
36 uint32_t num_modes;
37};
38
39static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_spm_device, msm_cpu_spm_device);
40static atomic_t msm_spm_set_vdd_x_cpu_allowed = ATOMIC_INIT(1);
41
42void msm_spm_allow_x_cpu_set_vdd(bool allowed)
43{
44 atomic_set(&msm_spm_set_vdd_x_cpu_allowed, allowed ? 1 : 0);
45}
46
47int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel)
48{
49 unsigned long flags;
50 struct msm_spm_device *dev;
51 int ret = -EIO;
52
53 local_irq_save(flags);
54 if (!atomic_read(&msm_spm_set_vdd_x_cpu_allowed) &&
55 unlikely(smp_processor_id() != cpu)) {
56 goto set_vdd_x_cpu_bail;
57 }
58
59 dev = &per_cpu(msm_cpu_spm_device, cpu);
60 ret = msm_spm_drv_set_vdd(&dev->reg_data, vlevel);
61
62set_vdd_x_cpu_bail:
63 local_irq_restore(flags);
64 return ret;
65}
66
67static int msm_spm_dev_set_low_power_mode(struct msm_spm_device *dev,
68 unsigned int mode, bool notify_rpm)
69{
70 uint32_t i;
71 uint32_t start_addr = 0;
72 int ret = -EINVAL;
73
74 if (mode == MSM_SPM_MODE_DISABLED) {
75 ret = msm_spm_drv_set_spm_enable(&dev->reg_data, false);
76 } else if (!msm_spm_drv_set_spm_enable(&dev->reg_data, true)) {
77 for (i = 0; i < dev->num_modes; i++) {
78 if ((dev->modes[i].mode == mode) &&
79 (dev->modes[i].notify_rpm == notify_rpm)) {
80 start_addr = dev->modes[i].start_addr;
81 break;
82 }
83 }
84 ret = msm_spm_drv_set_low_power_mode(&dev->reg_data,
85 start_addr);
86 }
87 return ret;
88}
89
90static int __init msm_spm_dev_init(struct msm_spm_device *dev,
91 struct msm_spm_platform_data *data)
92{
93 int i, ret = -ENOMEM;
94 uint32_t offset = 0;
95
96 dev->num_modes = data->num_modes;
97 dev->modes = kmalloc(
98 sizeof(struct msm_spm_power_modes) * dev->num_modes,
99 GFP_KERNEL);
100
101 if (!dev->modes)
102 goto spm_failed_malloc;
103
104 ret = msm_spm_drv_init(&dev->reg_data, data);
105
106 if (ret)
107 goto spm_failed_init;
108
109 for (i = 0; i < dev->num_modes; i++) {
110
111 ret = msm_spm_drv_write_seq_data(&dev->reg_data,
112 data->modes[i].cmd, offset);
113 if (ret < 0)
114 goto spm_failed_init;
115
116 dev->modes[i].mode = data->modes[i].mode;
117 dev->modes[i].notify_rpm = data->modes[i].notify_rpm;
118 dev->modes[i].start_addr = offset;
119 offset += ret;
120 }
121 msm_spm_drv_flush_seq_entry(&dev->reg_data);
122 return 0;
123
124spm_failed_init:
125 kfree(dev->modes);
126spm_failed_malloc:
127 return ret;
128}
129
130int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm)
131{
132 struct msm_spm_device *dev = &__get_cpu_var(msm_cpu_spm_device);
Praveen Chidambaram76679d42011-12-16 14:19:02 -0700133
134 /* TODO: Remove this after 8064 bring up */
135 if (cpu_is_apq8064())
136 return 0;
137
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700138 return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm);
139}
140
141int __init msm_spm_init(struct msm_spm_platform_data *data, int nr_devs)
142{
143 unsigned int cpu;
144 int ret = 0;
145
Praveen Chidambaram76679d42011-12-16 14:19:02 -0700146 /* TODO: Remove this after 8064 bring up */
147 if (cpu_is_apq8064())
148 return 0;
149
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700150 BUG_ON((nr_devs < num_possible_cpus()) || !data);
151
152 for_each_possible_cpu(cpu) {
153 struct msm_spm_device *dev = &per_cpu(msm_cpu_spm_device, cpu);
154 ret = msm_spm_dev_init(dev, &data[cpu]);
155 if (ret < 0) {
156 pr_warn("%s():failed CPU:%u ret:%d\n", __func__,
157 cpu, ret);
158 break;
159 }
160 }
161
162 return ret;
163}
164
Praveen Chidambaramc0750ca2012-01-08 10:03:28 -0700165int msm_spm_turn_on_cpu_rail(unsigned int cpu)
166{
167 uint32_t val = 0;
168 uint32_t timeout = 0;
169 void *reg = NULL;
170
171 if (cpu >= num_possible_cpus())
172 return -EINVAL;
173
174 switch (cpu) {
175 case 1:
176 reg = MSM_SAW1_BASE;
177 break;
178 case 0:
179 default:
180 return -EFAULT;
181 }
182
183 if (cpu_is_msm8960() || cpu_is_msm8930()) {
184 val = 0xB0;
185 reg += 0x14;
186 timeout = 512;
187 } else {
188 return -ENOSYS;
189 }
190
191 writel_relaxed(val, reg);
192 mb();
193 udelay(timeout);
194
195 return 0;
196}
197EXPORT_SYMBOL(msm_spm_turn_on_cpu_rail);
198
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700199#if defined(CONFIG_MSM_L2_SPM)
200static struct msm_spm_device msm_spm_l2_device;
201
202int msm_spm_l2_set_low_power_mode(unsigned int mode, bool notify_rpm)
203{
204 return msm_spm_dev_set_low_power_mode(
205 &msm_spm_l2_device, mode, notify_rpm);
206}
207
208int __init msm_spm_l2_init(struct msm_spm_platform_data *data)
209{
Praveen Chidambaram76679d42011-12-16 14:19:02 -0700210 /* TODO: Remove this after 8064 bring up */
211 if (cpu_is_apq8064())
212 return 0;
213
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700214 return msm_spm_dev_init(&msm_spm_l2_device, data);
215}
216#endif