blob: 2b17fa3becc8b4f72993a289f4047f06dade6fa2 [file] [log] [blame]
/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <mach/msm_iomap.h>
#include <mach/socinfo.h>
#include "spm.h"
#include "spm_driver.h"
struct msm_spm_power_modes {
uint32_t mode;
bool notify_rpm;
uint32_t start_addr;
};
struct msm_spm_device {
struct msm_spm_driver_data reg_data;
struct msm_spm_power_modes *modes;
uint32_t num_modes;
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_spm_device, msm_cpu_spm_device);
static atomic_t msm_spm_set_vdd_x_cpu_allowed = ATOMIC_INIT(1);
void msm_spm_allow_x_cpu_set_vdd(bool allowed)
{
atomic_set(&msm_spm_set_vdd_x_cpu_allowed, allowed ? 1 : 0);
}
int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel)
{
unsigned long flags;
struct msm_spm_device *dev;
int ret = -EIO;
local_irq_save(flags);
if (!atomic_read(&msm_spm_set_vdd_x_cpu_allowed) &&
unlikely(smp_processor_id() != cpu)) {
goto set_vdd_x_cpu_bail;
}
dev = &per_cpu(msm_cpu_spm_device, cpu);
ret = msm_spm_drv_set_vdd(&dev->reg_data, vlevel);
set_vdd_x_cpu_bail:
local_irq_restore(flags);
return ret;
}
static int msm_spm_dev_set_low_power_mode(struct msm_spm_device *dev,
unsigned int mode, bool notify_rpm)
{
uint32_t i;
uint32_t start_addr = 0;
int ret = -EINVAL;
if (mode == MSM_SPM_MODE_DISABLED) {
ret = msm_spm_drv_set_spm_enable(&dev->reg_data, false);
} else if (!msm_spm_drv_set_spm_enable(&dev->reg_data, true)) {
for (i = 0; i < dev->num_modes; i++) {
if ((dev->modes[i].mode == mode) &&
(dev->modes[i].notify_rpm == notify_rpm)) {
start_addr = dev->modes[i].start_addr;
break;
}
}
ret = msm_spm_drv_set_low_power_mode(&dev->reg_data,
start_addr);
}
return ret;
}
static int __init msm_spm_dev_init(struct msm_spm_device *dev,
struct msm_spm_platform_data *data)
{
int i, ret = -ENOMEM;
uint32_t offset = 0;
dev->num_modes = data->num_modes;
dev->modes = kmalloc(
sizeof(struct msm_spm_power_modes) * dev->num_modes,
GFP_KERNEL);
if (!dev->modes)
goto spm_failed_malloc;
ret = msm_spm_drv_init(&dev->reg_data, data);
if (ret)
goto spm_failed_init;
for (i = 0; i < dev->num_modes; i++) {
ret = msm_spm_drv_write_seq_data(&dev->reg_data,
data->modes[i].cmd, offset);
if (ret < 0)
goto spm_failed_init;
dev->modes[i].mode = data->modes[i].mode;
dev->modes[i].notify_rpm = data->modes[i].notify_rpm;
dev->modes[i].start_addr = offset;
offset += ret;
}
msm_spm_drv_flush_seq_entry(&dev->reg_data);
return 0;
spm_failed_init:
kfree(dev->modes);
spm_failed_malloc:
return ret;
}
void msm_spm_reinit(void)
{
unsigned int cpu;
for_each_possible_cpu(cpu)
msm_spm_drv_reinit(&per_cpu(msm_cpu_spm_device.reg_data, cpu));
}
int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm)
{
struct msm_spm_device *dev = &__get_cpu_var(msm_cpu_spm_device);
return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm);
}
int __init msm_spm_init(struct msm_spm_platform_data *data, int nr_devs)
{
unsigned int cpu;
int ret = 0;
BUG_ON((nr_devs < num_possible_cpus()) || !data);
for_each_possible_cpu(cpu) {
struct msm_spm_device *dev = &per_cpu(msm_cpu_spm_device, cpu);
ret = msm_spm_dev_init(dev, &data[cpu]);
if (ret < 0) {
pr_warn("%s():failed CPU:%u ret:%d\n", __func__,
cpu, ret);
break;
}
}
return ret;
}
int msm_spm_turn_on_cpu_rail(unsigned int cpu)
{
uint32_t val = 0;
uint32_t timeout = 0;
void *reg = NULL;
void *saw_bases[] = {
0,
MSM_SAW1_BASE,
MSM_SAW2_BASE,
MSM_SAW3_BASE
};
if (cpu == 0 || cpu >= num_possible_cpus())
return -EINVAL;
reg = saw_bases[cpu];
if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_apq8064()) {
val = 0xA4;
reg += 0x14;
timeout = 512;
} else {
return -ENOSYS;
}
writel_relaxed(val, reg);
mb();
udelay(timeout);
return 0;
}
EXPORT_SYMBOL(msm_spm_turn_on_cpu_rail);
#if defined(CONFIG_MSM_L2_SPM)
static struct msm_spm_device msm_spm_l2_device;
int msm_spm_l2_set_low_power_mode(unsigned int mode, bool notify_rpm)
{
return msm_spm_dev_set_low_power_mode(
&msm_spm_l2_device, mode, notify_rpm);
}
int __init msm_spm_l2_init(struct msm_spm_platform_data *data)
{
return msm_spm_dev_init(&msm_spm_l2_device, data);
}
void msm_spm_l2_reinit(void)
{
msm_spm_drv_reinit(&msm_spm_l2_device.reg_data);
}
#endif