Merge changes Idbb393f9,I616d1a27 into msm-3.4
* changes:
msm: timer: Add a stub for msm_timer_get_timer0_base
msm: iomap: Add mappings for the MPM PS_HOLD region
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index cb714c2..53193a1 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -105,6 +105,8 @@
CONFIG_IPV6_MIP6=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
+CONFIG_GENLOCK=y
+CONFIG_GENLOCK_MISCDEVICE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_QSEECOM=y
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index d068e0b..38f1d2b 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -2136,11 +2136,6 @@
help
Enables embedded trace collection on MSM8660
-config MSM_SLEEP_STATS
- bool "Enable exporting of MSM sleep stats to userspace"
- depends on CPU_IDLE
- default n
-
config MSM_SLEEP_STATS_DEVICE
bool "Enable exporting of MSM sleep device stats to userspace"
@@ -2474,6 +2469,19 @@
algorithm and the algorithm returns a frequency for the core which is
passed to the frequency change driver.
+config MSM_CPR
+ tristate "Use MSM CPR in S/W mode"
+ help
+ Enable CPR (core power reduction) in S/W mode, where the processor
+ get's the notification from CPR block and programs the PMIC.
+
+config MSM_VP_REGULATOR
+ tristate "Use MSM PMIC8029 C2 regulator"
+ depends on ARCH_MSM8625
+ help
+ Enable MSM PMIC8029 C2 regulator support using APC_PLEVEL access
+ for MSMs like 8625.
+
config HAVE_ARCH_HAS_CURRENT_TIMER
bool
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 83e559f..f763d49 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -350,7 +350,6 @@
obj-$(CONFIG_ARCH_MSM9625) += gpiomux-v2.o gpiomux.o
-obj-$(CONFIG_MSM_SLEEP_STATS) += idle_stats.o
obj-$(CONFIG_MSM_SLEEP_STATS_DEVICE) += idle_stats_device.o
obj-$(CONFIG_MSM_DCVS) += msm_dcvs_scm.o msm_dcvs.o msm_dcvs_idle.o
obj-$(CONFIG_MSM_RUN_QUEUE_STATS) += msm_rq_stats.o
@@ -379,3 +378,9 @@
obj-$(CONFIG_MSM_HSIC_SYSMON_TEST) += hsic_sysmon_test.o
obj-$(CONFIG_MSM_RPM_SMD) += rpm-smd.o
+obj-$(CONFIG_MSM_CPR) += msm_cpr.o
+obj-$(CONFIG_MSM_VP_REGULATOR) += msm_vp.o
+
+ifdef CONFIG_MSM_CPR
+obj-$(CONFIG_DEBUG_FS) += msm_cpr-debug.o
+endif
diff --git a/arch/arm/mach-msm/cpuidle.c b/arch/arm/mach-msm/cpuidle.c
index e4ec4d4..de97186 100644
--- a/arch/arm/mach-msm/cpuidle.c
+++ b/arch/arm/mach-msm/cpuidle.c
@@ -68,29 +68,6 @@
MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE},
};
-
-#ifdef CONFIG_MSM_SLEEP_STATS
-static DEFINE_PER_CPU(struct atomic_notifier_head, msm_cpuidle_notifiers);
-
-int msm_cpuidle_register_notifier(unsigned int cpu, struct notifier_block *nb)
-{
- struct atomic_notifier_head *head =
- &per_cpu(msm_cpuidle_notifiers, cpu);
-
- return atomic_notifier_chain_register(head, nb);
-}
-EXPORT_SYMBOL(msm_cpuidle_register_notifier);
-
-int msm_cpuidle_unregister_notifier(unsigned int cpu, struct notifier_block *nb)
-{
- struct atomic_notifier_head *head =
- &per_cpu(msm_cpuidle_notifiers, cpu);
-
- return atomic_notifier_chain_unregister(head, nb);
-}
-EXPORT_SYMBOL(msm_cpuidle_unregister_notifier);
-#endif
-
static int msm_cpuidle_enter(
struct cpuidle_device *dev, struct cpuidle_driver *drv, int index)
{
@@ -98,17 +75,9 @@
int i = 0;
enum msm_pm_sleep_mode pm_mode;
struct cpuidle_state_usage *st_usage = NULL;
-#ifdef CONFIG_MSM_SLEEP_STATS
- struct atomic_notifier_head *head =
- &__get_cpu_var(msm_cpuidle_notifiers);
-#endif
local_irq_disable();
-#ifdef CONFIG_MSM_SLEEP_STATS
- atomic_notifier_call_chain(head, MSM_CPUIDLE_STATE_ENTER, NULL);
-#endif
-
#ifdef CONFIG_CPU_PM
cpu_pm_enter();
#endif
@@ -128,10 +97,6 @@
cpu_pm_exit();
#endif
-#ifdef CONFIG_MSM_SLEEP_STATS
- atomic_notifier_call_chain(head, MSM_CPUIDLE_STATE_EXIT, NULL);
-#endif
-
local_irq_enable();
return ret;
@@ -219,16 +184,3 @@
return 0;
}
-
-static int __init msm_cpuidle_early_init(void)
-{
-#ifdef CONFIG_MSM_SLEEP_STATS
- unsigned int cpu;
-
- for_each_possible_cpu(cpu)
- ATOMIC_INIT_NOTIFIER_HEAD(&per_cpu(msm_cpuidle_notifiers, cpu));
-#endif
- return 0;
-}
-
-early_initcall(msm_cpuidle_early_init);
diff --git a/arch/arm/mach-msm/idle_stats.c b/arch/arm/mach-msm/idle_stats.c
deleted file mode 100644
index f4d3a27..0000000
--- a/arch/arm/mach-msm/idle_stats.c
+++ /dev/null
@@ -1,545 +0,0 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/cdev.h>
-#include <linux/device.h>
-#include <linux/fs.h>
-#include <linux/hrtimer.h>
-#include <linux/interrupt.h>
-#include <linux/ktime.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/version.h>
-#include <linux/sched.h>
-#include <asm/uaccess.h>
-
-#include "idle_stats.h"
-#include <mach/cpuidle.h>
-
-/******************************************************************************
- * Debug Definitions
- *****************************************************************************/
-
-enum {
- MSM_IDLE_STATS_DEBUG_API = BIT(0),
- MSM_IDLE_STATS_DEBUG_SIGNAL = BIT(1),
- MSM_IDLE_STATS_DEBUG_MIGRATION = BIT(2),
-};
-
-static int msm_idle_stats_debug_mask;
-module_param_named(
- debug_mask, msm_idle_stats_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
-);
-
-/******************************************************************************
- * Driver Definitions
- *****************************************************************************/
-
-#define MSM_IDLE_STATS_DRIVER_NAME "msm_idle_stats"
-
-static dev_t msm_idle_stats_dev_nr;
-static struct cdev msm_idle_stats_cdev;
-static struct class *msm_idle_stats_class;
-
-/******************************************************************************
- * Device Definitions
- *****************************************************************************/
-
-struct msm_idle_stats_device {
- unsigned int cpu;
- struct mutex mutex;
- struct notifier_block notifier;
-
- int64_t collection_expiration;
- struct msm_idle_stats stats;
- struct hrtimer timer;
-
- wait_queue_head_t wait_q;
- atomic_t collecting;
-};
-
-static DEFINE_SPINLOCK(msm_idle_stats_devs_lock);
-static DEFINE_PER_CPU(struct msm_idle_stats_device *, msm_idle_stats_devs);
-
-/******************************************************************************
- *
- *****************************************************************************/
-
-static inline int64_t msm_idle_stats_bound_interval(int64_t interval)
-{
- if (interval <= 0)
- return 1;
-
- if (interval > UINT_MAX)
- return UINT_MAX;
-
- return interval;
-}
-
-static enum hrtimer_restart msm_idle_stats_timer(struct hrtimer *timer)
-{
- struct msm_idle_stats_device *stats_dev;
- unsigned int cpu;
- int64_t now;
- int64_t interval;
-
- stats_dev = container_of(timer, struct msm_idle_stats_device, timer);
- cpu = get_cpu();
-
- if (cpu != stats_dev->cpu) {
- if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_MIGRATION)
- pr_info("%s: timer migrated from cpu%u to cpu%u\n",
- __func__, stats_dev->cpu, cpu);
-
- stats_dev->stats.event = MSM_IDLE_STATS_EVENT_TIMER_MIGRATED;
- goto timer_exit;
- }
-
- now = ktime_to_us(ktime_get());
- interval = now - stats_dev->stats.last_busy_start;
-
- if (stats_dev->stats.busy_timer > 0 &&
- interval >= stats_dev->stats.busy_timer - 1)
- stats_dev->stats.event =
- MSM_IDLE_STATS_EVENT_BUSY_TIMER_EXPIRED;
- else
- stats_dev->stats.event =
- MSM_IDLE_STATS_EVENT_COLLECTION_TIMER_EXPIRED;
-
-timer_exit:
- atomic_set(&stats_dev->collecting, 0);
- wake_up_interruptible(&stats_dev->wait_q);
-
- put_cpu();
- return HRTIMER_NORESTART;
-}
-
-static void msm_idle_stats_pre_idle(struct msm_idle_stats_device *stats_dev)
-{
- int64_t now;
- int64_t interval;
-
- if (smp_processor_id() != stats_dev->cpu) {
- WARN_ON(1);
- return;
- }
-
- if (!atomic_read(&stats_dev->collecting))
- return;
-
- hrtimer_cancel(&stats_dev->timer);
-
- now = ktime_to_us(ktime_get());
- interval = now - stats_dev->stats.last_busy_start;
- interval = msm_idle_stats_bound_interval(interval);
-
- stats_dev->stats.busy_intervals[stats_dev->stats.nr_collected]
- = (__u32) interval;
- stats_dev->stats.last_idle_start = now;
-}
-
-static void msm_idle_stats_post_idle(struct msm_idle_stats_device *stats_dev)
-{
- int64_t now;
- int64_t interval;
- int64_t timer_interval;
- int rc;
-
- if (smp_processor_id() != stats_dev->cpu) {
- WARN_ON(1);
- return;
- }
-
- if (!atomic_read(&stats_dev->collecting))
- return;
-
- now = ktime_to_us(ktime_get());
- interval = now - stats_dev->stats.last_idle_start;
- interval = msm_idle_stats_bound_interval(interval);
-
- stats_dev->stats.idle_intervals[stats_dev->stats.nr_collected]
- = (__u32) interval;
- stats_dev->stats.nr_collected++;
- stats_dev->stats.last_busy_start = now;
-
- if (stats_dev->stats.nr_collected >= MSM_IDLE_STATS_NR_MAX_INTERVALS) {
- stats_dev->stats.event = MSM_IDLE_STATS_EVENT_COLLECTION_FULL;
- goto post_idle_collection_done;
- }
-
- timer_interval = stats_dev->collection_expiration - now;
- if (timer_interval <= 0) {
- stats_dev->stats.event =
- MSM_IDLE_STATS_EVENT_COLLECTION_TIMER_EXPIRED;
- goto post_idle_collection_done;
- }
-
- if (stats_dev->stats.busy_timer > 0 &&
- timer_interval > stats_dev->stats.busy_timer)
- timer_interval = stats_dev->stats.busy_timer;
-
- rc = hrtimer_start(&stats_dev->timer,
- ktime_set(0, timer_interval * 1000), HRTIMER_MODE_REL_PINNED);
- WARN_ON(rc);
-
- return;
-
-post_idle_collection_done:
- atomic_set(&stats_dev->collecting, 0);
- wake_up_interruptible(&stats_dev->wait_q);
-}
-
-static int msm_idle_stats_notified(struct notifier_block *nb,
- unsigned long val, void *v)
-{
- struct msm_idle_stats_device *stats_dev = container_of(
- nb, struct msm_idle_stats_device, notifier);
-
- if (val == MSM_CPUIDLE_STATE_EXIT)
- msm_idle_stats_post_idle(stats_dev);
- else
- msm_idle_stats_pre_idle(stats_dev);
-
- return 0;
-}
-
-static int msm_idle_stats_collect(struct file *filp,
- unsigned int cmd, unsigned long arg)
-{
- struct msm_idle_stats_device *stats_dev;
- struct msm_idle_stats *stats;
- int rc;
-
- stats_dev = (struct msm_idle_stats_device *) filp->private_data;
- stats = &stats_dev->stats;
-
- rc = mutex_lock_interruptible(&stats_dev->mutex);
- if (rc) {
- if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_SIGNAL)
- pr_info("%s: interrupted while waiting on device "
- "mutex\n", __func__);
-
- rc = -EINTR;
- goto collect_exit;
- }
-
- if (atomic_read(&stats_dev->collecting)) {
- pr_err("%s: inconsistent state\n", __func__);
- rc = -EBUSY;
- goto collect_unlock_exit;
- }
-
- rc = copy_from_user(stats, (void *)arg, sizeof(*stats));
- if (rc) {
- rc = -EFAULT;
- goto collect_unlock_exit;
- }
-
- if (stats->nr_collected >= MSM_IDLE_STATS_NR_MAX_INTERVALS ||
- stats->busy_timer > MSM_IDLE_STATS_MAX_TIMER ||
- stats->collection_timer > MSM_IDLE_STATS_MAX_TIMER) {
- rc = -EINVAL;
- goto collect_unlock_exit;
- }
-
- if (get_cpu() != stats_dev->cpu) {
- put_cpu();
- rc = -EACCES;
- goto collect_unlock_exit;
- }
-
- /*
- * When collection_timer == 0, stop collecting at the next
- * post idle.
- */
- stats_dev->collection_expiration =
- ktime_to_us(ktime_get()) + stats->collection_timer;
-
- /*
- * Enable collection before starting any timer.
- */
- atomic_set(&stats_dev->collecting, 1);
-
- /*
- * When busy_timer == 0, do not set any busy timer.
- */
- if (stats->busy_timer > 0) {
- rc = hrtimer_start(&stats_dev->timer,
- ktime_set(0, stats->busy_timer * 1000),
- HRTIMER_MODE_REL_PINNED);
- WARN_ON(rc);
- }
-
- put_cpu();
- if (wait_event_interruptible(stats_dev->wait_q,
- !atomic_read(&stats_dev->collecting))) {
- if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_SIGNAL)
- pr_info("%s: interrupted while waiting on "
- "collection\n", __func__);
-
- hrtimer_cancel(&stats_dev->timer);
- atomic_set(&stats_dev->collecting, 0);
-
- rc = -EINTR;
- goto collect_unlock_exit;
- }
-
- stats->return_timestamp = ktime_to_us(ktime_get());
-
- rc = copy_to_user((void *)arg, stats, sizeof(*stats));
- if (rc) {
- rc = -EFAULT;
- goto collect_unlock_exit;
- }
-
-collect_unlock_exit:
- mutex_unlock(&stats_dev->mutex);
-
-collect_exit:
- return rc;
-}
-
-static int msm_idle_stats_open(struct inode *inode, struct file *filp)
-{
- struct msm_idle_stats_device *stats_dev;
- int rc;
-
- if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
- pr_info("%s: enter\n", __func__);
-
- rc = nonseekable_open(inode, filp);
- if (rc) {
- pr_err("%s: failed to set nonseekable\n", __func__);
- goto open_bail;
- }
-
- stats_dev = (struct msm_idle_stats_device *)
- kzalloc(sizeof(*stats_dev), GFP_KERNEL);
- if (!stats_dev) {
- pr_err("%s: failed to allocate device struct\n", __func__);
- rc = -ENOMEM;
- goto open_bail;
- }
-
- stats_dev->cpu = MINOR(inode->i_rdev);
- mutex_init(&stats_dev->mutex);
- stats_dev->notifier.notifier_call = msm_idle_stats_notified;
- hrtimer_init(&stats_dev->timer,
- CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
- stats_dev->timer.function = msm_idle_stats_timer;
- init_waitqueue_head(&stats_dev->wait_q);
- atomic_set(&stats_dev->collecting, 0);
-
- filp->private_data = stats_dev;
-
- /*
- * Make sure only one device exists per cpu.
- */
- spin_lock(&msm_idle_stats_devs_lock);
- if (per_cpu(msm_idle_stats_devs, stats_dev->cpu)) {
- spin_unlock(&msm_idle_stats_devs_lock);
- rc = -EBUSY;
- goto open_free_bail;
- }
-
- per_cpu(msm_idle_stats_devs, stats_dev->cpu) = stats_dev;
- spin_unlock(&msm_idle_stats_devs_lock);
-
- rc = msm_cpuidle_register_notifier(stats_dev->cpu,
- &stats_dev->notifier);
- if (rc) {
- pr_err("%s: failed to register idle notification\n", __func__);
- goto open_null_bail;
- }
-
- if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
- pr_info("%s: done\n", __func__);
- return 0;
-
-open_null_bail:
- spin_lock(&msm_idle_stats_devs_lock);
- per_cpu(msm_idle_stats_devs, stats_dev->cpu) = NULL;
- spin_unlock(&msm_idle_stats_devs_lock);
-
-open_free_bail:
- kfree(stats_dev);
-
-open_bail:
- if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
- pr_info("%s: exit, %d\n", __func__, rc);
- return rc;
-}
-
-static int msm_idle_stats_release(struct inode *inode, struct file *filp)
-{
- struct msm_idle_stats_device *stats_dev;
- int rc;
-
- if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
- pr_info("%s: enter\n", __func__);
-
- stats_dev = (struct msm_idle_stats_device *) filp->private_data;
- rc = msm_cpuidle_unregister_notifier(stats_dev->cpu,
- &stats_dev->notifier);
- WARN_ON(rc);
-
- spin_lock(&msm_idle_stats_devs_lock);
- per_cpu(msm_idle_stats_devs, stats_dev->cpu) = NULL;
- spin_unlock(&msm_idle_stats_devs_lock);
- filp->private_data = NULL;
-
- hrtimer_cancel(&stats_dev->timer);
- kfree(stats_dev);
-
- if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
- pr_info("%s: done\n", __func__);
- return 0;
-}
-
-static long msm_idle_stats_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- int rc;
-
- if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
- pr_info("%s: enter\n", __func__);
-
- switch (cmd) {
- case MSM_IDLE_STATS_IOC_COLLECT:
- rc = msm_idle_stats_collect(filp, cmd, arg);
- break;
-
- default:
- rc = -ENOTTY;
- break;
- }
-
- if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
- pr_info("%s: exit, %d\n", __func__, rc);
- return rc;
-}
-
-/******************************************************************************
- *
- *****************************************************************************/
-
-static const struct file_operations msm_idle_stats_fops = {
- .owner = THIS_MODULE,
- .open = msm_idle_stats_open,
- .release = msm_idle_stats_release,
- .unlocked_ioctl = msm_idle_stats_ioctl,
-};
-
-static int __init msm_idle_stats_init(void)
-{
- unsigned int nr_cpus = num_possible_cpus();
- struct device *dev;
- int rc;
- int i;
-
- if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
- pr_info("%s: enter\n", __func__);
-
- rc = alloc_chrdev_region(&msm_idle_stats_dev_nr,
- 0, nr_cpus, MSM_IDLE_STATS_DRIVER_NAME);
- if (rc) {
- pr_err("%s: failed to allocate device number, rc %d\n",
- __func__, rc);
- goto init_bail;
- }
-
- msm_idle_stats_class = class_create(THIS_MODULE,
- MSM_IDLE_STATS_DRIVER_NAME);
- if (IS_ERR(msm_idle_stats_class)) {
- pr_err("%s: failed to create device class\n", __func__);
- rc = -ENOMEM;
- goto init_unreg_bail;
- }
-
- for (i = 0; i < nr_cpus; i++) {
- dev = device_create(msm_idle_stats_class, NULL,
- msm_idle_stats_dev_nr + i, NULL,
- MSM_IDLE_STATS_DRIVER_NAME "%d", i);
-
- if (!dev) {
- pr_err("%s: failed to create device %d\n",
- __func__, i);
- rc = -ENOMEM;
- goto init_remove_bail;
- }
- }
-
- cdev_init(&msm_idle_stats_cdev, &msm_idle_stats_fops);
- msm_idle_stats_cdev.owner = THIS_MODULE;
-
- /*
- * Call cdev_add() last, after everything else is initialized and
- * the driver is ready to accept system calls.
- */
- rc = cdev_add(&msm_idle_stats_cdev, msm_idle_stats_dev_nr, nr_cpus);
- if (rc) {
- pr_err("%s: failed to register char device, rc %d\n",
- __func__, rc);
- goto init_remove_bail;
- }
-
- if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
- pr_info("%s: done\n", __func__);
- return 0;
-
-init_remove_bail:
- for (i = i - 1; i >= 0; i--)
- device_destroy(
- msm_idle_stats_class, msm_idle_stats_dev_nr + i);
-
- class_destroy(msm_idle_stats_class);
-
-init_unreg_bail:
- unregister_chrdev_region(msm_idle_stats_dev_nr, nr_cpus);
-
-init_bail:
- if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
- pr_info("%s: exit, %d\n", __func__, rc);
- return rc;
-}
-
-static void __exit msm_idle_stats_exit(void)
-{
- unsigned int nr_cpus = num_possible_cpus();
- int i;
-
- if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
- pr_info("%s: enter\n", __func__);
-
- cdev_del(&msm_idle_stats_cdev);
-
- for (i = nr_cpus - 1; i >= 0; i--)
- device_destroy(
- msm_idle_stats_class, msm_idle_stats_dev_nr + i);
-
- class_destroy(msm_idle_stats_class);
- unregister_chrdev_region(msm_idle_stats_dev_nr, nr_cpus);
-
- if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
- pr_info("%s: done\n", __func__);
-}
-
-module_init(msm_idle_stats_init);
-module_exit(msm_idle_stats_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("idle stats driver");
-MODULE_VERSION("1.0");
diff --git a/arch/arm/mach-msm/idle_stats.h b/arch/arm/mach-msm/idle_stats.h
deleted file mode 100644
index 6c8db1e..0000000
--- a/arch/arm/mach-msm/idle_stats.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __ARCH_ARM_MACH_MSM_IDLE_STATS_H
-#define __ARCH_ARM_MACH_MSM_IDLE_STATS_H
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-enum msm_idle_stats_event {
- MSM_IDLE_STATS_EVENT_BUSY_TIMER_EXPIRED = 1,
- MSM_IDLE_STATS_EVENT_COLLECTION_TIMER_EXPIRED = 2,
- MSM_IDLE_STATS_EVENT_COLLECTION_FULL = 3,
- MSM_IDLE_STATS_EVENT_TIMER_MIGRATED = 4,
-};
-
-/*
- * All time, timer, and time interval values are in units of
- * microseconds unless stated otherwise.
- */
-#define MSM_IDLE_STATS_NR_MAX_INTERVALS 100
-#define MSM_IDLE_STATS_MAX_TIMER 1000000
-
-struct msm_idle_stats {
- __u32 busy_timer;
- __u32 collection_timer;
-
- __u32 busy_intervals[MSM_IDLE_STATS_NR_MAX_INTERVALS];
- __u32 idle_intervals[MSM_IDLE_STATS_NR_MAX_INTERVALS];
- __u32 nr_collected;
- __s64 last_busy_start;
- __s64 last_idle_start;
-
- enum msm_idle_stats_event event;
- __s64 return_timestamp;
-};
-
-#define MSM_IDLE_STATS_IOC_MAGIC 0xD8
-#define MSM_IDLE_STATS_IOC_COLLECT \
- _IOWR(MSM_IDLE_STATS_IOC_MAGIC, 1, struct msm_idle_stats)
-
-#endif /* __ARCH_ARM_MACH_MSM_IDLE_STATS_H */
diff --git a/arch/arm/mach-msm/include/mach/cpuidle.h b/arch/arm/mach-msm/include/mach/cpuidle.h
index 8566e7f..af773a0 100644
--- a/arch/arm/mach-msm/include/mach/cpuidle.h
+++ b/arch/arm/mach-msm/include/mach/cpuidle.h
@@ -37,23 +37,4 @@
static inline int msm_cpuidle_init(void) { return -ENOSYS; }
#endif
-#ifdef CONFIG_MSM_SLEEP_STATS
-enum {
- MSM_CPUIDLE_STATE_ENTER,
- MSM_CPUIDLE_STATE_EXIT
-};
-
-int msm_cpuidle_register_notifier(unsigned int cpu,
- struct notifier_block *nb);
-int msm_cpuidle_unregister_notifier(unsigned int cpu,
- struct notifier_block *nb);
-#else
-static inline int msm_cpuidle_register_notifier(unsigned int cpu,
- struct notifier_block *nb)
-{ return -ENODEV; }
-static inline int msm_cpuidle_unregister_notifier(unsigned int cpu,
- struct notifier_block *nb)
-{ return -ENODEV; }
-#endif
-
#endif /* __ARCH_ARM_MACH_MSM_CPUIDLE_H */
diff --git a/arch/arm/mach-msm/msm_cpr-debug.c b/arch/arm/mach-msm/msm_cpr-debug.c
new file mode 100644
index 0000000..723423c
--- /dev/null
+++ b/arch/arm/mach-msm/msm_cpr-debug.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+
+struct msm_cpr_debug_device {
+ struct mutex debug_mutex;
+ struct dentry *dir;
+ int addr_offset;
+ void __iomem *base;
+};
+
+static inline
+void write_reg(struct msm_cpr_debug_device *cpr, u32 value)
+{
+ writel_relaxed(value, cpr->base + cpr->addr_offset);
+}
+
+static inline u32 read_reg(struct msm_cpr_debug_device *cpr)
+{
+ return readl_relaxed(cpr->base + cpr->addr_offset);
+}
+
+static bool msm_cpr_debug_addr_is_valid(int addr)
+{
+ if (addr < 0 || addr > 0x15C) {
+ pr_err("CPR register address is invalid: %d\n", addr);
+ return false;
+ }
+ return true;
+}
+
+static int msm_cpr_debug_data_set(void *data, u64 val)
+{
+ struct msm_cpr_debug_device *debugdev = data;
+ uint32_t reg = val;
+
+ mutex_lock(&debugdev->debug_mutex);
+
+ if (msm_cpr_debug_addr_is_valid(debugdev->addr_offset))
+ write_reg(debugdev, reg);
+
+ mutex_unlock(&debugdev->debug_mutex);
+ return 0;
+}
+
+static int msm_cpr_debug_data_get(void *data, u64 *val)
+{
+ struct msm_cpr_debug_device *debugdev = data;
+ uint32_t reg;
+
+ mutex_lock(&debugdev->debug_mutex);
+
+ if (msm_cpr_debug_addr_is_valid(debugdev->addr_offset)) {
+ reg = read_reg(debugdev);
+ *val = reg;
+ }
+ mutex_unlock(&debugdev->debug_mutex);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_data_fops, msm_cpr_debug_data_get,
+ msm_cpr_debug_data_set, "0x%02llX\n");
+
+static int msm_cpr_debug_addr_set(void *data, u64 val)
+{
+ struct msm_cpr_debug_device *debugdev = data;
+
+ if (msm_cpr_debug_addr_is_valid(val)) {
+ mutex_lock(&debugdev->debug_mutex);
+ debugdev->addr_offset = val;
+ mutex_unlock(&debugdev->debug_mutex);
+ }
+
+ return 0;
+}
+
+static int msm_cpr_debug_addr_get(void *data, u64 *val)
+{
+ struct msm_cpr_debug_device *debugdev = data;
+
+ mutex_lock(&debugdev->debug_mutex);
+
+ if (msm_cpr_debug_addr_is_valid(debugdev->addr_offset))
+ *val = debugdev->addr_offset;
+
+ mutex_unlock(&debugdev->debug_mutex);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_addr_fops, msm_cpr_debug_addr_get,
+ msm_cpr_debug_addr_set, "0x%03llX\n");
+
+int msm_cpr_debug_init(void *data)
+{
+ char *name = "cpr-debug";
+ struct msm_cpr_debug_device *debugdev;
+ struct dentry *dir;
+ struct dentry *temp;
+ int rc;
+
+ debugdev = kzalloc(sizeof(struct msm_cpr_debug_device), GFP_KERNEL);
+ if (debugdev == NULL) {
+ pr_err("kzalloc failed\n");
+ return -ENOMEM;
+ }
+
+ dir = debugfs_create_dir(name, NULL);
+ if (dir == NULL || IS_ERR(dir)) {
+ pr_err("debugfs_create_dir failed: rc=%ld\n", PTR_ERR(dir));
+ rc = PTR_ERR(dir);
+ goto dir_error;
+ }
+
+ temp = debugfs_create_file("address", S_IRUGO | S_IWUSR, dir, debugdev,
+ &debug_addr_fops);
+ if (temp == NULL || IS_ERR(temp)) {
+ pr_err("debugfs_create_file failed: rc=%ld\n", PTR_ERR(temp));
+ rc = PTR_ERR(temp);
+ goto file_error;
+ }
+
+ temp = debugfs_create_file("data", S_IRUGO | S_IWUSR, dir, debugdev,
+ &debug_data_fops);
+ if (temp == NULL || IS_ERR(temp)) {
+ pr_err("debugfs_create_file failed: rc=%ld\n", PTR_ERR(temp));
+ rc = PTR_ERR(temp);
+ goto file_error;
+ }
+ debugdev->base = data;
+ debugdev->addr_offset = -1;
+ debugdev->dir = dir;
+ mutex_init(&debugdev->debug_mutex);
+
+ return 0;
+
+file_error:
+ debugfs_remove_recursive(dir);
+dir_error:
+ kfree(debugdev);
+
+ return rc;
+}
diff --git a/arch/arm/mach-msm/msm_cpr.c b/arch/arm/mach-msm/msm_cpr.c
new file mode 100644
index 0000000..f4272f3
--- /dev/null
+++ b/arch/arm/mach-msm/msm_cpr.c
@@ -0,0 +1,861 @@
+/*
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/cpufreq.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+
+#include <mach/irqs.h>
+
+#include "msm_cpr.h"
+
+#define MODULE_NAME "msm-cpr"
+
+/* Need platform device handle for suspend and resume APIs */
+static struct platform_device *cpr_pdev;
+
+struct msm_cpr {
+ int curr_osc;
+ int cpr_mode;
+ int prev_mode;
+ uint32_t floor;
+ uint32_t ceiling;
+ void __iomem *base;
+ unsigned int irq;
+ struct mutex cpr_mutex;
+ struct regulator *vreg_cx;
+ const struct msm_cpr_config *config;
+ struct notifier_block freq_transition;
+ struct msm_cpr_vp_data *vp;
+};
+
+/* Need to maintain state data for suspend and resume APIs */
+static struct msm_cpr_reg cpr_save_state;
+
+static inline
+void cpr_write_reg(struct msm_cpr *cpr, u32 offset, u32 value)
+{
+ writel_relaxed(value, cpr->base + offset);
+}
+
+static inline u32 cpr_read_reg(struct msm_cpr *cpr, u32 offset)
+{
+ return readl_relaxed(cpr->base + offset);
+}
+
+static
+void cpr_modify_reg(struct msm_cpr *cpr, u32 offset, u32 mask, u32 value)
+{
+ u32 reg_val;
+
+ reg_val = readl_relaxed(cpr->base + offset);
+ reg_val &= ~mask;
+ reg_val |= value;
+ writel_relaxed(reg_val, cpr->base + offset);
+}
+
+#ifdef DEBUG
+static void cpr_regs_dump_all(struct msm_cpr *cpr)
+{
+ pr_debug("RBCPR_GCNT_TARGET(%d): 0x%x\n",
+ cpr->curr_osc, readl_relaxed(cpr->base +
+ RBCPR_GCNT_TARGET(cpr->curr_osc)));
+ pr_debug("RBCPR_TIMER_INTERVAL: 0x%x\n",
+ readl_relaxed(cpr->base + RBCPR_TIMER_INTERVAL));
+ pr_debug("RBIF_TIMER_ADJUST: 0x%x\n",
+ readl_relaxed(cpr->base + RBIF_TIMER_ADJUST));
+ pr_debug("RBIF_LIMIT: 0x%x\n",
+ readl_relaxed(cpr->base + RBIF_LIMIT));
+ pr_debug("RBCPR_STEP_QUOT: 0x%x\n",
+ readl_relaxed(cpr->base + RBCPR_STEP_QUOT));
+ pr_debug("RBIF_SW_VLEVEL: 0x%x\n",
+ readl_relaxed(cpr->base + RBIF_SW_VLEVEL));
+ pr_debug("RBCPR_DEBUG1: 0x%x\n",
+ readl_relaxed(cpr->base + RBCPR_DEBUG1));
+ pr_debug("RBCPR_RESULT_0: 0x%x\n",
+ readl_relaxed(cpr->base + RBCPR_RESULT_0));
+ pr_debug("RBCPR_RESULT_1: 0x%x\n",
+ readl_relaxed(cpr->base + RBCPR_RESULT_1));
+ pr_debug("RBCPR_QUOT_AVG: 0x%x\n",
+ readl_relaxed(cpr->base + RBCPR_QUOT_AVG));
+ pr_debug("RBCPR_CTL: 0x%x\n",
+ readl_relaxed(cpr->base + RBCPR_CTL));
+ pr_debug("RBIF_IRQ_EN(0): 0x%x\n",
+ cpr_read_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line)));
+ pr_debug("RBIF_IRQ_STATUS: 0x%x\n",
+ cpr_read_reg(cpr, RBIF_IRQ_STATUS));
+}
+#endif
+
+/* Enable the CPR H/W Block */
+static void cpr_enable(struct msm_cpr *cpr)
+{
+ mutex_lock(&cpr->cpr_mutex);
+ cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, ENABLE_CPR);
+ mutex_unlock(&cpr->cpr_mutex);
+}
+
+/* Disable the CPR H/W Block */
+static void cpr_disable(struct msm_cpr *cpr)
+{
+ mutex_lock(&cpr->cpr_mutex);
+ cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, DISABLE_CPR);
+ mutex_unlock(&cpr->cpr_mutex);
+}
+
+static int32_t cpr_poll_result(struct msm_cpr *cpr)
+{
+ uint32_t val = 0;
+ int8_t rc = 0;
+
+ rc = readl_poll_timeout(cpr->base + RBCPR_RESULT_0, val, ~val & BUSY_M,
+ 10, 1000);
+ if (rc)
+ pr_info("%s: RBCPR_RESULT_0 read error: %d\n",
+ __func__, rc);
+ return rc;
+}
+
+static int32_t cpr_poll_result_done(struct msm_cpr *cpr)
+{
+ uint32_t val = 0;
+ int8_t rc = 0;
+
+ rc = readl_poll_timeout(cpr->base + RBIF_IRQ_STATUS, val, val & 0x1,
+ 10, 1000);
+ if (rc)
+ pr_info("%s: RBCPR_IRQ_STATUS read error: %d\n",
+ __func__, rc);
+ return rc;
+}
+
+static void
+cpr_2pt_kv_analysis(struct msm_cpr *cpr, struct msm_cpr_mode *chip_data)
+{
+ int32_t tgt_volt_mV = 0, level_uV, rc;
+ uint32_t quot1, quot2;
+
+ /**
+ * 2 Point KV Analysis to calculate Step Quot
+ * STEP_QUOT is number of QUOT units per PMIC step
+ * STEP_QUOT = (quot1 - quot2) / 4
+ *
+ * The step quot is calculated once for every mode and stored for
+ * later use.
+ */
+ if (chip_data->step_quot != ~0)
+ goto out_2pt_kv;
+
+ /**
+ * Using the value from chip_data->tgt_volt_offset
+ * calculate the new PMIC adjusted voltages and set
+ * the PMIC to provide this value.
+ *
+ * Assuming default voltage is the highest value of safe boot up
+ * voltage, offset is always subtracted from it.
+ *
+ */
+ if (chip_data->tgt_volt_offset > 0) {
+ tgt_volt_mV = chip_data->calibrated_mV -
+ (chip_data->tgt_volt_offset * cpr->vp->step_size);
+ }
+ pr_debug("tgt_volt_mV = %d, calibrated_mV = %d", tgt_volt_mV,
+ chip_data->calibrated_mV);
+
+ /* level_uV = tgt_volt_mV * 1000; */
+ level_uV = 1350000;
+ /* Call the PMIC specific routine to set the voltage */
+ rc = regulator_set_voltage(cpr->vreg_cx, level_uV, level_uV);
+ if (rc) {
+ pr_err("%s: Initial voltage set at %duV failed. %d\n",
+ __func__, level_uV, rc);
+ return;
+ }
+ rc = regulator_enable(cpr->vreg_cx);
+ if (rc) {
+ pr_err("failed to enable %s, rc=%d\n", "vdd_cx", rc);
+ return;
+ }
+
+ /* Store the adjusted value of voltage */
+ chip_data->calibrated_mV = 1300;
+
+ /* Take first CPR measurement at a higher voltage to get QUOT1 */
+
+ /* Enable the Software mode of operation */
+ cpr_modify_reg(cpr, RBCPR_CTL, HW_TO_PMIC_EN_M, SW_MODE);
+
+ /* Enable the cpr measurement */
+ cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, ENABLE_CPR);
+
+ /* IRQ is already disabled */
+ rc = cpr_poll_result_done(cpr);
+ if (rc) {
+ pr_err("%s: Quot1: Exiting due to INT_DONE poll timeout\n",
+ __func__);
+ return;
+ }
+
+ rc = cpr_poll_result(cpr);
+ if (rc) {
+ pr_err("%s: Quot1: Exiting due to BUSY poll timeout\n",
+ __func__);
+ return;
+ }
+
+ quot1 = (cpr_read_reg(cpr, RBCPR_DEBUG1) & QUOT_SLOW_M) >> 12;
+
+ /* Take second CPR measurement at a lower voltage to get QUOT2 */
+ level_uV = 1300000;
+
+ cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, DISABLE_CPR);
+ /* Call the PMIC specific routine to set the voltage */
+ rc = regulator_set_voltage(cpr->vreg_cx, level_uV, level_uV);
+ if (rc) {
+ pr_err("%s: Voltage set at %duV failed. %d\n",
+ __func__, level_uV, rc);
+ return;
+ }
+
+ cpr_modify_reg(cpr, RBCPR_CTL, HW_TO_PMIC_EN_M, SW_MODE);
+ cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, ENABLE_CPR);
+
+ /* cpr_write_reg(cpr, RBIF_CONT_NACK_CMD, 0x1); */
+ rc = cpr_poll_result_done(cpr);
+ if (rc) {
+ pr_err("%s: Quot2: Exiting due to INT_DONE poll timeout\n",
+ __func__);
+ goto err_poll_result_done;
+ }
+ /* IRQ is already disabled */
+ rc = cpr_poll_result(cpr);
+ if (rc) {
+ pr_err("%s: Quot2: Exiting due to BUSY poll timeout\n",
+ __func__);
+ goto err_poll_result;
+ }
+ quot2 = (cpr_read_reg(cpr, RBCPR_DEBUG1) & QUOT_SLOW_M) >> 12;
+ chip_data->step_quot = (quot1 - quot2) / 4;
+ pr_debug("%s: Calculated Step Quot is %d\n",
+ __func__, chip_data->step_quot);
+ /* Disable the cpr */
+ cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, DISABLE_CPR);
+
+out_2pt_kv:
+ /* Program the step quot */
+ cpr_write_reg(cpr, RBCPR_STEP_QUOT, (chip_data->step_quot & 0xFF));
+ return;
+err_poll_result:
+err_poll_result_done:
+ regulator_disable(cpr->vreg_cx);
+}
+
+static inline
+void cpr_irq_clr_and_ack(struct msm_cpr *cpr, uint32_t mask)
+{
+ /* Clear the interrupt */
+ cpr_write_reg(cpr, RBIF_IRQ_CLEAR, 0x3F);
+ /* Acknowledge the Recommendation */
+ cpr_write_reg(cpr, RBIF_CONT_ACK_CMD, 0x1);
+}
+
+static inline
+void cpr_irq_clr_and_nack(struct msm_cpr *cpr, uint32_t mask)
+{
+ cpr_write_reg(cpr, RBIF_IRQ_CLEAR, 0x3F);
+ cpr_write_reg(cpr, RBIF_CONT_NACK_CMD, 0x1);
+}
+
+static void cpr_irq_set(struct msm_cpr *cpr, uint32_t irq, bool enable)
+{
+ uint32_t irq_enabled;
+
+ irq_enabled = cpr_read_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line));
+ if (enable == 1)
+ irq_enabled |= irq;
+ else
+ irq_enabled &= ~irq;
+ cpr_modify_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line),
+ INT_MASK, irq_enabled);
+}
+
+static void
+cpr_up_event_handler(struct msm_cpr *cpr, uint32_t new_volt)
+{
+ int rc, set_volt_mV;
+ struct msm_cpr_mode *chip_data;
+
+ chip_data = &cpr->config->cpr_mode_data[cpr->cpr_mode];
+
+ /**
+ * FIXME: Need to handle a potential race condition between
+ * freq switch handler and CPR interrupt handler here
+ */
+ /* Set New PMIC voltage */
+ set_volt_mV = (new_volt < chip_data->Vmax ? new_volt
+ : chip_data->Vmax);
+ rc = regulator_set_voltage(cpr->vreg_cx, set_volt_mV * 1000,
+ set_volt_mV * 1000);
+ if (rc) {
+ pr_err("%s: Voltage set at %dmV failed. %d\n",
+ __func__, set_volt_mV, rc);
+ cpr_irq_clr_and_nack(cpr, BIT(4) | BIT(0));
+ return;
+ }
+ pr_debug("%s: Voltage set at %dmV\n", __func__, set_volt_mV);
+
+ /**
+ * Save the new calibrated voltage to be re-used
+ * whenever we return to same mode after a mode switch.
+ */
+ chip_data->calibrated_mV = set_volt_mV;
+
+ /* Clear all the interrupts */
+ cpr_write_reg(cpr, RBIF_IRQ_CLEAR, 0x3F);
+
+ /* Disable Auto ACK for Down interrupts */
+ cpr_modify_reg(cpr, RBCPR_CTL, SW_AUTO_CONT_NACK_DN_EN_M, 0);
+
+ /* Enable down interrupts to App as it might have got disabled if CPR
+ * hit Vmin earlier. Voltage set is above Vmin now.
+ */
+ cpr_irq_set(cpr, DOWN_INT, 1);
+
+ /* Acknowledge the Recommendation */
+ cpr_write_reg(cpr, RBIF_CONT_ACK_CMD, 0x1);
+}
+
+static void
+cpr_dn_event_handler(struct msm_cpr *cpr, uint32_t new_volt)
+{
+ int rc, set_volt_mV;
+ struct msm_cpr_mode *chip_data;
+
+ chip_data = &cpr->config->cpr_mode_data[cpr->cpr_mode];
+
+ /**
+ * FIXME: Need to handle a potential race condition between
+ * freq switch handler and CPR interrupt handler here
+ */
+ /* Set New PMIC volt */
+ set_volt_mV = (new_volt > chip_data->Vmin ? new_volt
+ : chip_data->Vmin);
+ rc = regulator_set_voltage(cpr->vreg_cx, set_volt_mV * 1000,
+ set_volt_mV * 1000);
+ if (rc) {
+ pr_err("%s: Voltage at %dmV failed %d\n",
+ __func__, set_volt_mV, rc);
+ cpr_irq_clr_and_nack(cpr, BIT(2) | BIT(0));
+ return;
+ }
+ pr_debug("%s: Voltage set at %dmV\n", __func__, set_volt_mV);
+
+ /**
+ * Save the new calibrated voltage to be re-used
+ * whenever we return to same mode after a mode switch.
+ */
+ chip_data->calibrated_mV = set_volt_mV;
+
+ /* Clear all the interrupts */
+ cpr_write_reg(cpr, RBIF_IRQ_CLEAR, 0x3F);
+
+ if (new_volt <= chip_data->Vmin) {
+ /*
+ * Disable down interrupt to App after we hit Vmin
+ * It shall be enabled after we service an up interrupt
+ *
+ * A race condition between freq switch handler and CPR
+ * interrupt handler is possible. So, do not disable
+ * interrupt if a freq switch already caused a mode
+ * change since we need this interrupt in the new mode.
+ */
+ if (cpr->cpr_mode == cpr->prev_mode) {
+ /* Enable Auto ACK for CPR Down Flags
+ * while DOWN_INT to App is disabled */
+ cpr_modify_reg(cpr, RBCPR_CTL,
+ SW_AUTO_CONT_NACK_DN_EN_M,
+ SW_AUTO_CONT_NACK_DN_EN);
+ cpr_irq_set(cpr, DOWN_INT, 0);
+ pr_debug("%s: DOWN_INT disabled\n", __func__);
+ }
+ }
+ /* Acknowledge the Recommendation */
+ cpr_write_reg(cpr, RBIF_CONT_ACK_CMD, 0x1);
+}
+
+static void cpr_set_vdd(struct msm_cpr *cpr, enum cpr_action action)
+{
+ uint32_t curr_volt, new_volt, error_step;
+ struct msm_cpr_mode *chip_data;
+
+ chip_data = &cpr->config->cpr_mode_data[cpr->cpr_mode];
+ error_step = cpr_read_reg(cpr, RBCPR_RESULT_0) >> 2;
+ error_step &= 0xF;
+ curr_volt = chip_data->calibrated_mV;
+
+ if (action == UP) {
+ /**
+ * Using up margin in the comparison helps avoid having to
+ * change up threshold values in chip register.
+ */
+ if (error_step < (cpr->config->up_threshold +
+ cpr->config->up_margin)) {
+ /* FIXME: Avoid repeated dn interrupts if we are here */
+ pr_debug("UP_INT error step too small to set\n");
+ cpr_irq_clr_and_nack(cpr, BIT(4) | BIT(0));
+ return;
+ }
+
+ /* Calculte new PMIC voltage */
+ new_volt = curr_volt + (error_step * cpr->vp->step_size);
+ pr_debug("UP_INT: new_volt: %d\n", new_volt);
+ cpr_up_event_handler(cpr, new_volt);
+
+ } else if (action == DOWN) {
+ /**
+ * Using down margin in the comparison helps avoid having to
+ * change down threshold values in chip register.
+ */
+ if (error_step < (cpr->config->dn_threshold +
+ cpr->config->dn_margin)) {
+ /* FIXME: Avoid repeated dn interrupts if we are here */
+ pr_debug("DOWN_INT error_step too small to set\n");
+ cpr_irq_clr_and_nack(cpr, BIT(2) | BIT(0));
+ return;
+ }
+
+ /* Calculte new PMIC voltage */
+ new_volt = curr_volt - (error_step * cpr->vp->step_size);
+ pr_debug("DOWN_INT: new_volt: %d\n", new_volt);
+ cpr_dn_event_handler(cpr, new_volt);
+ }
+}
+
+static irqreturn_t cpr_irq0_handler(int irq, void *dev_id)
+{
+ struct msm_cpr *cpr = dev_id;
+ uint32_t reg_val, ctl_reg;
+
+ reg_val = cpr_read_reg(cpr, RBIF_IRQ_STATUS);
+ ctl_reg = cpr_read_reg(cpr, RBCPR_CTL);
+
+ /* Following sequence of handling is as per each IRQ's priority */
+ if (reg_val & BIT(4)) {
+ pr_debug(" CPR:IRQ %d occured for UP Flag\n", irq);
+ cpr_set_vdd(cpr, UP);
+
+ } else if ((reg_val & BIT(2)) && !(ctl_reg & SW_AUTO_CONT_NACK_DN_EN)) {
+ pr_debug(" CPR:IRQ %d occured for Down Flag\n", irq);
+ cpr_set_vdd(cpr, DOWN);
+
+ } else if (reg_val & BIT(1)) {
+ pr_debug(" CPR:IRQ %d occured for Min Flag\n", irq);
+ cpr_irq_clr_and_nack(cpr, BIT(1) | BIT(0));
+
+ } else if (reg_val & BIT(5)) {
+ pr_debug(" CPR:IRQ %d occured for MAX Flag\n", irq);
+ cpr_irq_clr_and_nack(cpr, BIT(5) | BIT(0));
+
+ } else if (reg_val & BIT(3)) {
+ /* SW_AUTO_CONT_ACK_EN is enabled */
+ pr_debug(" CPR:IRQ %d occured for Mid Flag\n", irq);
+ }
+ return IRQ_HANDLED;
+}
+
+static void cpr_config(struct msm_cpr *cpr)
+{
+ uint32_t delay_count, cnt = 0, rc, tmp_uV;
+ struct msm_cpr_mode *chip_data;
+
+ chip_data = &cpr->config->cpr_mode_data[cpr->cpr_mode];
+
+ /* Program the SW vlevel */
+ cpr_modify_reg(cpr, RBIF_SW_VLEVEL, SW_VLEVEL_M,
+ cpr->config->sw_vlevel);
+
+ /* Set the floor and ceiling values */
+ cpr->floor = cpr->config->floor;
+ cpr->ceiling = cpr->config->ceiling;
+
+ /* Program the Ceiling & Floor values */
+ cpr_modify_reg(cpr, RBIF_LIMIT, (CEILING_M | FLOOR_M),
+ ((cpr->ceiling << 6) | cpr->floor));
+
+ /* Program the Up and Down Threshold values */
+ cpr_modify_reg(cpr, RBCPR_CTL, UP_THRESHOLD_M | DN_THRESHOLD_M,
+ cpr->config->up_threshold << 24 |
+ cpr->config->dn_threshold << 28);
+
+ cpr->curr_osc = chip_data->ring_osc;
+
+ /**
+ * Program the gate count and target values
+ * for all the ring oscilators
+ */
+ while (cnt < NUM_OSC) {
+ cpr_modify_reg(cpr, RBCPR_GCNT_TARGET(cnt),
+ (GCNT_M | TARGET_M),
+ (chip_data->ring_osc_data[cnt].gcnt << 12 |
+ chip_data->ring_osc_data[cnt].target_count));
+ pr_debug("RBCPR_GCNT_TARGET(%d): = 0x%x\n", cnt,
+ readl_relaxed(cpr->base + RBCPR_GCNT_TARGET(cnt)));
+ cnt++;
+ }
+
+ /* Configure the step quot */
+ cpr_2pt_kv_analysis(cpr, chip_data);
+
+ /**
+ * Call the PMIC specific routine to set the voltage
+ * Set with an extra step since it helps as per
+ * characterization data.
+ */
+ chip_data->calibrated_mV += cpr->vp->step_size;
+ tmp_uV = chip_data->calibrated_mV * 1000;
+ rc = regulator_set_voltage(cpr->vreg_cx, tmp_uV, tmp_uV);
+ if (rc)
+ pr_err("%s: Voltage set failed %d\n", __func__, rc);
+
+ /* Program the Timer for default delay between CPR measurements */
+ delay_count = 0xFFFF;
+ cpr_write_reg(cpr, RBCPR_TIMER_INTERVAL, delay_count);
+
+ /* Enable the Timer */
+ cpr_modify_reg(cpr, RBCPR_CTL, TIMER_M, ENABLE_TIMER);
+
+ /* Enable Auto ACK for Mid interrupts */
+ cpr_modify_reg(cpr, RBCPR_CTL, SW_AUTO_CONT_ACK_EN_M,
+ SW_AUTO_CONT_ACK_EN);
+}
+
+static void cpr_mode_config(struct msm_cpr *cpr, enum cpr_mode mode)
+{
+ if (cpr->cpr_mode == mode)
+ return;
+
+ cpr->cpr_mode = mode;
+ pr_debug("%s: Switching to %s mode\n", __func__,
+ (mode == TURBO_MODE ? "TURBO" : "NORMAL"));
+
+ /* Configure the new mode */
+ cpr_config(cpr);
+}
+
+static int
+cpr_freq_transition(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct msm_cpr *cpr = container_of(nb, struct msm_cpr, freq_transition);
+ struct cpufreq_freqs *freqs = data;
+
+ switch (val) {
+ case CPUFREQ_PRECHANGE:
+ return 0;
+ pr_debug("pre freq change notification to cpr\n");
+
+ disable_irq(cpr->irq);
+ cpr_disable(cpr);
+ cpr->prev_mode = cpr->cpr_mode;
+ break;
+ case CPUFREQ_POSTCHANGE:
+ return 0;
+ pr_debug("post freq change notification to cpr\n");
+
+ if (freqs->new >= cpr->config->nom_freq_limit)
+ cpr_mode_config(cpr, TURBO_MODE);
+ else
+ cpr_mode_config(cpr, NORMAL_MODE);
+ /**
+ * Enable all interrupts. One of them could be in a disabled
+ * state if vdd had hit Vmax / Vmin earlier
+ */
+ cpr_irq_set(cpr, (UP_INT | DOWN_INT), 1);
+
+ enable_irq(cpr->irq);
+
+ cpr_enable(cpr);
+
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int msm_cpr_resume(struct device *dev)
+{
+ struct msm_cpr *cpr = dev_get_drvdata(dev);
+ int osc_num = cpr->config->cpr_mode_data->ring_osc;
+
+ cpr_write_reg(cpr, RBCPR_TIMER_INTERVAL,
+ cpr_save_state.rbif_timer_interval);
+ cpr_write_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line),
+ cpr_save_state.rbif_int_en);
+ cpr_write_reg(cpr, RBIF_LIMIT,
+ cpr_save_state.rbif_limit);
+ cpr_write_reg(cpr, RBIF_TIMER_ADJUST,
+ cpr_save_state.rbif_timer_adjust);
+ cpr_write_reg(cpr, RBCPR_GCNT_TARGET(osc_num),
+ cpr_save_state.rbcpr_gcnt_target);
+ cpr_write_reg(cpr, RBCPR_STEP_QUOT,
+ cpr_save_state.rbcpr_step_quot);
+ cpr_write_reg(cpr, RBIF_SW_VLEVEL,
+ cpr_save_state.rbif_sw_level);
+
+ cpr_enable(cpr);
+ cpr_write_reg(cpr, RBCPR_CTL,
+ cpr_save_state.rbcpr_ctl);
+ enable_irq(cpr->irq);
+
+ return 0;
+}
+
+static int msm_cpr_suspend(struct device *dev)
+
+{
+ struct msm_cpr *cpr = dev_get_drvdata(dev);
+ int osc_num = cpr->config->cpr_mode_data->ring_osc;
+
+ cpr_save_state.rbif_timer_interval =
+ cpr_read_reg(cpr, RBCPR_TIMER_INTERVAL);
+ cpr_save_state.rbif_int_en =
+ cpr_read_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line));
+ cpr_save_state.rbif_limit =
+ cpr_read_reg(cpr, RBIF_LIMIT);
+ cpr_save_state.rbif_timer_adjust =
+ cpr_read_reg(cpr, RBIF_TIMER_ADJUST);
+ cpr_save_state.rbcpr_gcnt_target =
+ cpr_read_reg(cpr, RBCPR_GCNT_TARGET(osc_num));
+ cpr_save_state.rbcpr_step_quot =
+ cpr_read_reg(cpr, RBCPR_STEP_QUOT);
+ cpr_save_state.rbif_sw_level =
+ cpr_read_reg(cpr, RBIF_SW_VLEVEL);
+ cpr_save_state.rbcpr_ctl =
+ cpr_read_reg(cpr, RBCPR_CTL);
+
+ disable_irq(cpr->irq);
+ cpr_disable(cpr);
+
+ return 0;
+}
+
+void msm_cpr_pm_resume(void)
+{
+ msm_cpr_resume(&cpr_pdev->dev);
+}
+EXPORT_SYMBOL(msm_cpr_pm_resume);
+
+void msm_cpr_pm_suspend(void)
+{
+ msm_cpr_suspend(&cpr_pdev->dev);
+}
+EXPORT_SYMBOL(msm_cpr_pm_suspend);
+#endif
+
+void msm_cpr_disable(void)
+{
+ struct msm_cpr *cpr = platform_get_drvdata(cpr_pdev);
+ cpr_disable(cpr);
+}
+EXPORT_SYMBOL(msm_cpr_disable);
+
+void msm_cpr_enable(void)
+{
+ struct msm_cpr *cpr = platform_get_drvdata(cpr_pdev);
+ cpr_enable(cpr);
+}
+EXPORT_SYMBOL(msm_cpr_enable);
+
+static int __devinit msm_cpr_probe(struct platform_device *pdev)
+{
+ int res, irqn, irq_enabled;
+ struct msm_cpr *cpr;
+ const struct msm_cpr_config *pdata = pdev->dev.platform_data;
+ void __iomem *base;
+ struct resource *mem;
+
+ if (!pdata) {
+ pr_err("CPR: Platform data is not available\n");
+ return -EIO;
+ }
+
+ cpr = devm_kzalloc(&pdev->dev, sizeof(struct msm_cpr), GFP_KERNEL);
+ if (!cpr)
+ return -ENOMEM;
+
+ /* Initialize platform_data */
+ cpr->config = pdata;
+
+ cpr_pdev = pdev;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem || !mem->start) {
+ pr_err("CPR: get resource failed\n");
+ res = -ENXIO;
+ goto out;
+ }
+
+ base = ioremap_nocache(mem->start, resource_size(mem));
+ if (!base) {
+ pr_err("CPR: ioremap failed\n");
+ res = -ENOMEM;
+ goto out;
+ }
+
+ if (cpr->config->irq_line < 0) {
+ pr_err("CPR: Invalid IRQ line specified\n");
+ res = -ENXIO;
+ goto err_ioremap;
+ }
+ irqn = platform_get_irq(pdev, cpr->config->irq_line);
+ if (irqn < 0) {
+ pr_err("CPR: Unable to get irq\n");
+ res = -ENXIO;
+ goto err_ioremap;
+ }
+
+ cpr->irq = irqn;
+
+ cpr->base = base;
+
+ cpr->vp = pdata->vp_data;
+
+ mutex_init(&cpr->cpr_mutex);
+
+ /* Initialize the Voltage domain for CPR */
+ cpr->vreg_cx = regulator_get(&pdev->dev, "vddx_cx");
+ if (IS_ERR(cpr->vreg_cx)) {
+ res = PTR_ERR(cpr->vreg_cx);
+ pr_err("could not get regulator: %d\n", res);
+ goto err_reg_get;
+ }
+
+ /* Assume current mode is TURBO Mode */
+ cpr->cpr_mode = TURBO_MODE;
+ cpr->prev_mode = TURBO_MODE;
+
+ /* Initial configuration of CPR */
+ cpr_config(cpr);
+
+ platform_set_drvdata(pdev, cpr);
+
+ /* Initialze the Debugfs Entry for cpr */
+ res = msm_cpr_debug_init(cpr->base);
+ if (res) {
+ pr_err("CPR: Debugfs Creation Failed\n");
+ goto err_ioremap;
+ }
+
+ /* Register the interrupt handler for IRQ 0 */
+ res = request_threaded_irq(irqn, NULL, cpr_irq0_handler,
+ IRQF_TRIGGER_RISING, "msm-cpr-irq0", cpr);
+ if (res) {
+ pr_err("CPR: request irq failed for IRQ %d\n", irqn);
+ goto err_ioremap;
+ }
+
+ /**
+ * Enable the requested interrupt lines.
+ * Do not enable MID_INT since we shall use
+ * SW_AUTO_CONT_ACK_EN bit.
+ */
+ irq_enabled = INT_MASK & ~MID_INT;
+ cpr_modify_reg(cpr, RBIF_IRQ_EN(cpr->config->irq_line),
+ INT_MASK, irq_enabled);
+
+ /* Enable the cpr */
+ cpr_modify_reg(cpr, RBCPR_CTL, LOOP_EN_M, ENABLE_CPR);
+
+
+ cpr->freq_transition.notifier_call = cpr_freq_transition;
+ cpufreq_register_notifier(&cpr->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ return res;
+
+err_reg_get:
+ free_irq(irqn, cpr);
+err_ioremap:
+ iounmap(base);
+out:
+ return res;
+}
+
+static int __devexit msm_cpr_remove(struct platform_device *pdev)
+{
+ struct msm_cpr *cpr = platform_get_drvdata(pdev);
+
+ cpufreq_unregister_notifier(&cpr->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ regulator_disable(cpr->vreg_cx);
+ regulator_put(cpr->vreg_cx);
+ free_irq(cpr->irq, cpr);
+ iounmap(cpr->base);
+ mutex_destroy(&cpr->cpr_mutex);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct dev_pm_ops msm_cpr_dev_pm_ops = {
+ .suspend = msm_cpr_suspend,
+ .resume = msm_cpr_resume,
+};
+
+static struct platform_driver msm_cpr_driver = {
+ .probe = msm_cpr_probe,
+ .remove = __devexit_p(msm_cpr_remove),
+ .driver = {
+ .name = MODULE_NAME,
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &msm_cpr_dev_pm_ops,
+#endif
+ },
+};
+
+static int __init msm_init_cpr(void)
+{
+ return platform_driver_register(&msm_cpr_driver);
+}
+
+module_init(msm_init_cpr);
+
+static void __exit msm_exit_cpr(void)
+{
+ platform_driver_unregister(&msm_cpr_driver);
+}
+
+module_exit(msm_exit_cpr);
+
+MODULE_DESCRIPTION("MSM CPR Driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-msm/msm_cpr.h b/arch/arm/mach-msm/msm_cpr.h
new file mode 100644
index 0000000..2642b9c
--- /dev/null
+++ b/arch/arm/mach-msm/msm_cpr.h
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_CPR_H
+#define __ARCH_ARM_MACH_MSM_CPR_H
+
+/* Register Offsets for RBCPR */
+
+/* RBCPR Gate Count and Target Registers */
+#define RBCPR_GCNT_TARGET(n) (0x60 + 4 * n)
+
+/* RBCPR Timer Control */
+#define RBCPR_TIMER_INTERVAL 0x44
+#define RBIF_TIMER_ADJUST 0x4C
+
+/* RBCPR Config Register */
+#define RBIF_LIMIT 0x48
+#define RBCPR_STEP_QUOT 0X80
+#define RBCPR_CTL 0x90
+#define RBIF_SW_VLEVEL 0x94
+#define RBIF_CONT_ACK_CMD 0x98
+#define RBIF_CONT_NACK_CMD 0x9C
+
+/* RBCPR Result status Register */
+#define RBCPR_RESULT_0 0xA0
+#define RBCPR_RESULT_1 0xA4
+#define RBCPR_QUOT_AVG 0x118
+
+/* RBCPR DEBUG Register */
+#define RBCPR_DEBUG1 0x120
+
+/* RBCPR Interrupt Control Register */
+#define RBIF_IRQ_EN(n) (0x100 + 4 * n)
+#define RBIF_IRQ_CLEAR 0x110
+#define RBIF_IRQ_STATUS 0x114
+
+/* Bit Mask Values */
+#define GCNT_M 0x003FF000
+#define TARGET_M 0x00000FFF
+#define SW_VLEVEL_M 0x0000003F
+#define UP_FLAG_M 0x00000010
+#define DOWN_FLAG_M 0x00000004
+#define CEILING_M 0x00000FC0
+#define FLOOR_M 0x0000003F
+#define LOOP_EN_M 0x00000001
+#define TIMER_M 0x00000008
+#define SW_AUTO_CONT_ACK_EN_M 0x00000020
+#define SW_AUTO_CONT_NACK_DN_EN_M 0x00000040
+#define HW_TO_PMIC_EN_M BIT(4)
+#define BUSY_M BIT(19)
+#define QUOT_SLOW_M 0x00FFF000
+#define UP_THRESHOLD_M 0x0F000000
+#define DN_THRESHOLD_M 0xF0000000
+
+/* Bit Values */
+#define ENABLE_CPR BIT(0)
+#define DISABLE_CPR 0x0
+#define ENABLE_TIMER BIT(3)
+#define DISABLE_TIMER 0x0
+#define SW_MODE 0x0
+#define SW_AUTO_CONT_ACK_EN BIT(5)
+#define SW_AUTO_CONT_NACK_DN_EN BIT(6)
+
+/* Test values for RBCPR RUMI Testing */
+#define GNT_CNT 0xC0
+#define TARGET 0xEFF
+
+#define CEILING_V 0x30
+#define FLOOR_V 0x15
+
+#define SW_LEVEL 0x20
+
+/* Interrupt Mask for All interrupt flags */
+#define INT_MASK (MIN_INT | DOWN_INT | MID_INT | UP_INT | MAX_INT)
+
+/* Number of oscilator in each sensor */
+#define NUM_OSC 8
+
+#define CPR_MODE 2
+
+/**
+ * enum cpr_mode - Modes in which cpr is used
+ */
+enum cpr_mode {
+ NORMAL_MODE = 0,
+ TURBO_MODE,
+ SVS_MODE,
+};
+
+/**
+ * enum cpr_action - Cpr actions to be taken
+ */
+enum cpr_action {
+ DOWN = 0,
+ UP,
+};
+
+/**
+ * enum cpr_interrupt
+ */
+enum cpr_interrupt {
+ DONE_INT = BIT(0),
+ MIN_INT = BIT(1),
+ DOWN_INT = BIT(2),
+ MID_INT = BIT(3),
+ UP_INT = BIT(4),
+ MAX_INT = BIT(5),
+};
+
+/**
+ * struct msm_vp_data - structure for VP configuration
+ * @min_volt_mV: minimum milivolt level for VP
+ * @max_volt_mV: maximum milivolt level for VP
+ * @default_volt_mV: default milivolt for VP
+ * @step_size_mV: step size of voltage
+ */
+struct msm_cpr_vp_data {
+ int min_volt;
+ int max_volt;
+ int default_volt;
+ int step_size;
+};
+
+/**
+ * struct msm_cpr_osc - Data for CPR ring oscillator
+ * @gcnt: gate count value for the oscillator
+ * @target_count: target value for ring oscillator
+ */
+struct msm_cpr_osc {
+ int gcnt;
+ uint32_t target_count;
+};
+
+/**
+ * struct msm_cpr_mode - Data for CPR modes of operation
+ * @msm_cpr_osc: structure for oscillator data
+ * @ring_osc: ring oscillator of the sensor
+ * @tgt_volt_offset: inital voltage offset from default value
+ * @step_quot: step Quot for CPR calcuation
+ */
+struct msm_cpr_mode {
+ struct msm_cpr_osc ring_osc_data[NUM_OSC];
+ int ring_osc;
+ int32_t tgt_volt_offset;
+ uint32_t step_quot;
+ uint32_t Vmax;
+ uint32_t Vmin;
+ uint32_t calibrated_mV;
+};
+
+/**
+ * struct msm_cpr_config - Platform data for CPR configuration
+ * @ref_clk_khz: clock value of CPR in KHz
+ * @delay_us: timer delay in micro second
+ * @irq_line: irq line to be use (0 or 1 or 2)
+ * @msm_cpr_mode: structure for CPR mode data
+ */
+struct msm_cpr_config {
+ unsigned long ref_clk_khz;
+ unsigned long delay_us;
+ int irq_line;
+ struct msm_cpr_mode *cpr_mode_data;
+ int min_down_step;
+ uint32_t tgt_count_div_N; /* Target Cnt(Nom) = Target Cnt(Turbo) / N */
+ uint32_t floor;
+ uint32_t ceiling;
+ uint32_t sw_vlevel;
+ uint32_t up_threshold;
+ uint32_t dn_threshold;
+ uint32_t up_margin;
+ uint32_t dn_margin;
+ uint32_t nom_freq_limit;
+ struct msm_cpr_vp_data *vp_data;
+};
+
+/**
+* struct msm_cpr_config - CPR Registers
+*/
+struct msm_cpr_reg {
+ uint32_t rbif_timer_interval;
+ uint32_t rbif_int_en;
+ uint32_t rbif_limit;
+ uint32_t rbif_timer_adjust;
+ uint32_t rbcpr_gcnt_target;
+ uint32_t rbcpr_step_quot;
+ uint32_t rbif_sw_level;
+ uint32_t rbcpr_ctl;
+};
+
+#if defined(CONFIG_MSM_CPR) || defined(CONFIG_MSM_CPR_MODULE)
+/* msm_cpr_pm_resume: Used by Power Manager for Idle Power Collapse */
+void msm_cpr_pm_resume(void);
+/* msm_cpr_pm_suspend: Used by Power Manager for Idle Power Collapse */
+void msm_cpr_pm_suspend(void);
+/* msm_cpr_enable: Used by Power Manager for GDFS */
+void msm_cpr_enable(void);
+/* msm_cpr_disable: Used by Power Manager for GDFS */
+void msm_cpr_disable(void);
+#else
+/* msm_cpr_pm_resume: Used by Power Manager for Idle Power Collapse */
+void msm_cpr_pm_resume(void) { }
+/* msm_cpr_pm_suspend: Used by Power Manager for Idle Power Collapse */
+void msm_cpr_pm_suspend(void) { }
+/* msm_cpr_enable: Used by Power Manager for GDFS */
+void msm_cpr_enable(void) { }
+/* msm_cpr_disable: Used by Power Manager for GDFS */
+void msm_cpr_disable(void) { }
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+int msm_cpr_debug_init(void *);
+#else
+static inline int msm_cpr_debug_init(void *) { return 0; }
+#endif
+#endif /* __ARCH_ARM_MACH_MSM_CPR_H */
diff --git a/arch/arm/mach-msm/msm_vp.c b/arch/arm/mach-msm/msm_vp.c
new file mode 100644
index 0000000..2569474
--- /dev/null
+++ b/arch/arm/mach-msm/msm_vp.c
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+#include <mach/msm_iomap.h>
+
+/* Address for Perf Level Registor */
+#define VDD_APC_PLEVEL_BASE (MSM_CLK_CTL_BASE + 0x0298)
+#define VDD_APC_PLEVEL(n) (VDD_APC_PLEVEL_BASE + 4 * n)
+
+/* Address for SYS_P_Level register */
+#define VDD_SVS_PLEVEL_ADDR (MSM_CSR_BASE + 0x124)
+
+#define MV_TO_UV(mv) ((mv)*1000)
+#define UV_TO_MV(uv) (((uv)+999)/1000)
+
+#define MSM_VP_REGULATOR_DEV_NAME "vp-regulator"
+
+/**
+ * Convert Voltage to PLEVEL register value
+ * Here x is required voltage in minivolt
+ * e.g. if Required voltage is 1200mV then
+ * required value to be programmed into the
+ * Plevel register is 0x32. This equation is
+ * based on H/W logic being used in SVS controller.
+ *
+ * Here we are taking the minimum voltage step
+ * to be 12.5mV as per H/W logic and adding 0x20
+ * is for selecting the reference voltage.
+ * 750mV is minimum voltage of MSMC2 smps.
+ */
+#define VOLT_TO_BIT(x) (((x-750)/(12500/1000)) + 0x20)
+#define VREG_VREF_SEL (1 << 5)
+#define VREG_PD_EN (1 << 6)
+
+/**
+ * struct msm_vp - Structure for VP
+ * @regulator_dev: structure for regulator device
+ * @current_voltage: current voltage value
+ */
+struct msm_vp {
+ struct device *dev;
+ struct regulator_dev *rdev;
+ int current_voltage;
+};
+
+/* Function to change the Vdd Level */
+static int vp_reg_set_voltage(struct regulator_dev *rdev, int min_uV,
+ int max_uV, unsigned *sel)
+{
+ struct msm_vp *vp = rdev_get_drvdata(rdev);
+ uint32_t reg_val, perf_level, plevel, cur_plevel, fine_step_volt;
+
+ reg_val = readl_relaxed(VDD_SVS_PLEVEL_ADDR);
+ perf_level = reg_val & 0x07;
+
+ plevel = (min_uV - 750000) / 25000;
+ fine_step_volt = (min_uV - 750000) % 25000;
+
+ /**
+ * Program the new voltage level for the current perf_level
+ * in corresponding PLEVEL register.
+ */
+ cur_plevel = readl_relaxed(VDD_APC_PLEVEL(perf_level));
+ /* clear lower 6 bits */
+ cur_plevel &= ~0x3F;
+ cur_plevel |= (plevel | VREG_VREF_SEL);
+ if (fine_step_volt >= 12500)
+ cur_plevel |= VREG_PD_EN;
+ writel_relaxed(cur_plevel, VDD_APC_PLEVEL(perf_level));
+
+ /* Clear the current perf level */
+ reg_val &= 0xF8;
+ writel_relaxed(reg_val, VDD_SVS_PLEVEL_ADDR);
+
+ /* Initiate the PMIC SSBI request to change the voltage */
+ reg_val |= (BIT(7) | perf_level << 3);
+ writel_relaxed(reg_val, VDD_SVS_PLEVEL_ADDR);
+ mb();
+ udelay(62);
+
+ if ((readl_relaxed(VDD_SVS_PLEVEL_ADDR) & 0x07) != perf_level) {
+ pr_err("Vdd Set Failed\n");
+ return -EIO;
+ }
+
+ vp->current_voltage = (min_uV / 1000);
+ return 0;
+}
+
+static int vp_reg_get_voltage(struct regulator_dev *rdev)
+{
+ struct msm_vp *vp = rdev_get_drvdata(rdev);
+
+ return MV_TO_UV(vp->current_voltage);
+}
+
+static int vp_reg_enable(struct regulator_dev *rdev)
+{
+ return 0;
+}
+
+static int vp_reg_disable(struct regulator_dev *rdev)
+{
+ return 0;
+}
+
+/* Regulator registration specific data */
+/* FIXME: should move to board-xx-regulator.c file */
+static struct regulator_consumer_supply vp_consumer =
+ REGULATOR_SUPPLY("vddx_cx", "msm-cpr");
+
+static struct regulator_init_data vp_reg_data = {
+ .constraints = {
+ .name = "vddx_c2",
+ .min_uV = 750000,
+ .max_uV = 1500000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .boot_on = 1,
+ .input_uV = 0,
+ .always_on = 1,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &vp_consumer,
+};
+
+/* Regulator specific ops */
+static struct regulator_ops vp_reg_ops = {
+ .enable = vp_reg_enable,
+ .disable = vp_reg_disable,
+ .get_voltage = vp_reg_get_voltage,
+ .set_voltage = vp_reg_set_voltage,
+};
+
+/* Regulator Description */
+static struct regulator_desc vp_reg = {
+ .name = "vddcx",
+ .id = -1,
+ .ops = &vp_reg_ops,
+ .type = REGULATOR_VOLTAGE,
+};
+
+static int __devinit msm_vp_reg_probe(struct platform_device *pdev)
+{
+ struct msm_vp *vp;
+ int rc;
+
+ vp = kzalloc(sizeof(struct msm_vp), GFP_KERNEL);
+ if (!vp) {
+ pr_err("Could not allocate memory for VP\n");
+ return -ENOMEM;
+ }
+
+ vp->rdev = regulator_register(&vp_reg, NULL, &vp_reg_data, vp, NULL);
+ if (IS_ERR(vp->rdev)) {
+ rc = PTR_ERR(vp->rdev);
+ pr_err("Failed to register regulator: %d\n", rc);
+ goto error;
+ }
+
+ platform_set_drvdata(pdev, vp);
+
+ return 0;
+error:
+ kfree(vp);
+ return rc;
+}
+
+static int __devexit msm_vp_reg_remove(struct platform_device *pdev)
+{
+ struct msm_vp *vp = platform_get_drvdata(pdev);
+
+ regulator_unregister(vp->rdev);
+ platform_set_drvdata(pdev, NULL);
+ kfree(vp);
+
+ return 0;
+}
+
+static struct platform_driver msm_vp_reg_driver = {
+ .probe = msm_vp_reg_probe,
+ .remove = __devexit_p(msm_vp_reg_remove),
+ .driver = {
+ .name = MSM_VP_REGULATOR_DEV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init msm_vp_reg_init(void)
+{
+ return platform_driver_register(&msm_vp_reg_driver);
+}
+postcore_initcall(msm_vp_reg_init);
+
+static void __exit msm_vp_reg_exit(void)
+{
+ platform_driver_unregister(&msm_vp_reg_driver);
+}
+module_exit(msm_vp_reg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM VP regulator driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:" MSM_VP_REGULATOR_DEV_NAME);
diff --git a/drivers/gpio/qpnp-pin.c b/drivers/gpio/qpnp-pin.c
index 6511c95..7bfb208 100644
--- a/drivers/gpio/qpnp-pin.c
+++ b/drivers/gpio/qpnp-pin.c
@@ -31,6 +31,10 @@
((q_spec)->offset + reg_index)
#define Q_REG_STATUS1 0x8
+#define Q_REG_STATUS1_VAL_MASK 0x1
+#define Q_REG_STATUS1_GPIO_EN_MASK 0x2
+#define Q_REG_STATUS1_MPP_EN_MASK 0x80
+
#define Q_NUM_CTL_REGS 0xD
/* type registers base address offsets */
@@ -418,39 +422,41 @@
}
static int qpnp_pin_read_regs(struct qpnp_pin_chip *q_chip,
- struct qpnp_pin_spec *q_spec, u16 addr, u8 *buf)
+ struct qpnp_pin_spec *q_spec)
{
int bytes_left = q_spec->num_ctl_regs;
int rc;
- char *reg_p = &q_spec->regs[0];
+ char *buf_p = &q_spec->regs[0];
+ u16 reg_addr = Q_REG_ADDR(q_spec, Q_REG_MODE_CTL);
while (bytes_left > 0) {
rc = spmi_ext_register_readl(q_chip->spmi->ctrl, q_spec->slave,
- Q_REG_ADDR(q_spec, Q_REG_MODE_CTL),
- reg_p, bytes_left < 8 ? bytes_left : 8);
+ reg_addr, buf_p, bytes_left < 8 ? bytes_left : 8);
if (rc)
return rc;
bytes_left -= 8;
- reg_p += 8;
+ buf_p += 8;
+ reg_addr += 8;
}
return 0;
}
static int qpnp_pin_write_regs(struct qpnp_pin_chip *q_chip,
- struct qpnp_pin_spec *q_spec, u16 addr, u8 *buf)
+ struct qpnp_pin_spec *q_spec)
{
int bytes_left = q_spec->num_ctl_regs;
int rc;
- char *reg_p = &q_spec->regs[0];
+ char *buf_p = &q_spec->regs[0];
+ u16 reg_addr = Q_REG_ADDR(q_spec, Q_REG_MODE_CTL);
while (bytes_left > 0) {
rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
- Q_REG_ADDR(q_spec, Q_REG_MODE_CTL),
- reg_p, bytes_left < 8 ? bytes_left : 8);
+ reg_addr, buf_p, bytes_left < 8 ? bytes_left : 8);
if (rc)
return rc;
bytes_left -= 8;
- reg_p += 8;
+ buf_p += 8;
+ reg_addr += 8;
}
return 0;
}
@@ -461,9 +467,7 @@
int rc;
struct device *dev = &q_chip->spmi->dev;
- rc = qpnp_pin_read_regs(q_chip, q_spec,
- Q_REG_ADDR(q_spec, Q_REG_MODE_CTL),
- &q_spec->regs[Q_REG_I_MODE_CTL]);
+ rc = qpnp_pin_read_regs(q_chip, q_spec);
if (rc)
dev_err(dev, "%s: unable to read control regs\n", __func__);
@@ -536,9 +540,7 @@
Q_REG_CS_OUT_SHIFT, Q_REG_CS_OUT_MASK,
param->cs_out);
- rc = qpnp_pin_write_regs(q_chip, q_spec,
- Q_REG_ADDR(q_spec, Q_REG_MODE_CTL),
- &q_spec->regs[Q_REG_I_MODE_CTL]);
+ rc = qpnp_pin_write_regs(q_chip, q_spec);
if (rc) {
dev_err(&q_chip->spmi->dev, "%s: unable to write master enable\n",
__func__);
@@ -628,7 +630,7 @@
int rc, ret_val;
struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
struct qpnp_pin_spec *q_spec = NULL;
- u8 buf[1];
+ u8 buf[1], en_mask;
if (WARN_ON(!q_chip))
return -ENODEV;
@@ -640,11 +642,17 @@
/* gpio val is from RT status iff input is enabled */
if ((q_spec->regs[Q_REG_I_MODE_CTL] & Q_REG_MODE_SEL_MASK)
== QPNP_PIN_MODE_DIG_IN) {
- /* INT_RT_STS */
rc = spmi_ext_register_readl(q_chip->spmi->ctrl, q_spec->slave,
Q_REG_ADDR(q_spec, Q_REG_STATUS1),
&buf[0], 1);
- return buf[0];
+
+ en_mask = q_spec->type == Q_GPIO_TYPE ?
+ Q_REG_STATUS1_GPIO_EN_MASK :
+ Q_REG_STATUS1_MPP_EN_MASK;
+ if (!(buf[0] & en_mask))
+ return -EPERM;
+
+ return buf[0] & Q_REG_STATUS1_VAL_MASK;
} else {
ret_val = (q_spec->regs[Q_REG_I_MODE_CTL] &
@@ -671,7 +679,7 @@
Q_REG_OUT_INVERT_SHIFT, Q_REG_OUT_INVERT_MASK, 0);
rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
- Q_REG_ADDR(q_spec, Q_REG_I_MODE_CTL),
+ Q_REG_ADDR(q_spec, Q_REG_MODE_CTL),
&q_spec->regs[Q_REG_I_MODE_CTL], 1);
if (rc)
dev_err(&q_chip->spmi->dev, "%s: spmi write failed\n",
@@ -715,7 +723,7 @@
mode);
rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
- Q_REG_ADDR(q_spec, Q_REG_I_MODE_CTL),
+ Q_REG_ADDR(q_spec, Q_REG_MODE_CTL),
&q_spec->regs[Q_REG_I_MODE_CTL], 1);
return rc;
}
@@ -799,7 +807,7 @@
Q_REG_OUT_TYPE_SHIFT,
Q_REG_OUT_TYPE_MASK);
param.invert = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
- Q_REG_OUT_INVERT_MASK,
+ Q_REG_OUT_INVERT_SHIFT,
Q_REG_OUT_INVERT_MASK);
param.pull = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
Q_REG_PULL_SHIFT, Q_REG_PULL_MASK);
diff --git a/drivers/media/video/msm_vidc/msm_venc.c b/drivers/media/video/msm_vidc/msm_venc.c
index 10c7321..2e51daa 100644
--- a/drivers/media/video/msm_vidc/msm_venc.c
+++ b/drivers/media/video/msm_vidc/msm_venc.c
@@ -22,10 +22,10 @@
#define DEFAULT_WIDTH 1280
#define MIN_NUM_OUTPUT_BUFFERS 2
#define MAX_NUM_OUTPUT_BUFFERS 8
-#define MIN_BIT_RATE 64
-#define MAX_BIT_RATE 160000
-#define DEFAULT_BIT_RATE 64
-#define BIT_RATE_STEP 1
+#define MIN_BIT_RATE 64000
+#define MAX_BIT_RATE 160000000
+#define DEFAULT_BIT_RATE 64000
+#define BIT_RATE_STEP 100
#define MIN_FRAME_RATE 65536
#define MAX_FRAME_RATE 15728640
#define DEFAULT_FRAME_RATE 1966080
diff --git a/drivers/media/video/msm_vidc/vidc_hal.c b/drivers/media/video/msm_vidc/vidc_hal.c
index 16a3ecd..3a924cb 100644
--- a/drivers/media/video/msm_vidc/vidc_hal.c
+++ b/drivers/media/video/msm_vidc/vidc_hal.c
@@ -1201,9 +1201,11 @@
}
case HAL_PARAM_VENC_RATE_CONTROL:
{
+ u32 *rc_mode;
pkt->rg_property_data[0] =
HFI_PROPERTY_PARAM_VENC_RATE_CONTROL;
- switch ((enum hal_rate_control)pdata) {
+ rc_mode = (u32 *)pdata;
+ switch ((enum hal_rate_control) *rc_mode) {
case HAL_RATE_CONTROL_OFF:
pkt->rg_property_data[1] = HFI_RATE_CONTROL_OFF;
break;