Initial Contribution
msm-2.6.38: tag AU_LINUX_ANDROID_GINGERBREAD.02.03.04.00.142
Signed-off-by: Bryan Huntsman <bryanh@codeaurora.org>
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 68f3671..1b1c6e6 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -535,6 +535,124 @@
If your platform uses a different flash partition label for storing
crashdumps, enter it here.
+config TSIF
+ depends on ARCH_MSM
+ tristate "TSIF (Transport Stream InterFace) support"
+ default n
+ ---help---
+ This driver supports low level TSIF interface. It provides API
+ for upper layer drivers. If you have a TSIF hardware, say
+ Y here and read <file:Documentation/arm/msm/tsif.txt>.
+
+ To compile this driver as module, choose M here: the
+ module will be called msm_tsif.
+
+config TSIF_CHRDEV
+ tristate "TSIF character device"
+ depends on TSIF
+ default n
+ ---help---
+ This driver uses low level TSIF interface. It provides character
+ device useable from user space programs: one can read TSIF stream
+ from this device.
+
+ This driver may be used as example for TSIF API usage.
+
+ To compile this driver as module, choose M here: the
+ module will be called tsif_chrdev.
+
+config TSIF_DEBUG
+ bool "Turn on debugging information for tsif driver"
+ depends on TSIF
+ default n
+ ---help---
+ This turns on debugging information for the tsif driver
+
+config HAPTIC_ISA1200
+ tristate "ISA1200 haptic support"
+ depends on I2C
+ default n
+ help
+ The ISA1200 is a high performance enhanced haptic driver.
+
+config PMIC8058_PWM
+ tristate "Qualcomm PM8058 PWM support"
+ depends on PMIC8058
+ default y
+ help
+ This option enables device driver support for the PWM channels
+ on Qualcomm PM8058 chip. Pulse Width Modulation is used for
+ purposes including software controlled brightness of backlight,
+ motor control, and waveform generation.
+
+config PMIC8058_VIBRATOR
+ tristate "Qualcomm PM8058 vibrator support"
+ depends on PMIC8058 && ANDROID_TIMED_OUTPUT
+ default n
+ help
+ This option enables device driver support for the vibrator
+ on Qualcomm PM8058 chip.
+
+config PMIC8058_NFC
+ tristate "Qualcomm PM8058 support for Near Field Communication"
+ depends on PMIC8058
+ default y
+ help
+ Qualcomm PM8058 chip has a module to support NFC (Near Field
+ Communication). This option enables the driver to support it.
+
+config PMIC8058_UPL
+ tristate "Qualcomm PM8058 support for User Programmable Logic"
+ depends on PMIC8058
+ default n
+ help
+ This option enables device driver support for User Programmable Logic
+ on Qualcomm PM8058 chip. The UPL module provides a means to implement
+ simple truth table based logic via a set of control registers. I/O may
+ be routed in and out of the UPL module via GPIO or DTEST pins.
+
+config PMIC8058_XOADC
+ tristate "Qualcomm PM8058 XOADC driver"
+ depends on PMIC8058
+ default n
+ help
+ Enables User processor ADC reads over the XOADC module of Qualcomm's
+ PMIC8058. Driver interface to program registers of the ADC over
+ AMUX channels, devices on programmable MPP's and xotherm.
+
+config PMIC8058_MISC
+ tristate "Qualcomm PM8058 Misc Device driver"
+ depends on PMIC8058
+ default n
+ help
+ Provides functionality for various small drivers utilizing the
+ Qualcomm PM8058 chip. Examples include: signalling when the 32kHz
+ oscillator malfunctions.
+
+config PMIC8058_BATTALARM
+ tristate "Qualcomm PM8058 Battery Alarm Device driver"
+ depends on PMIC8058
+ help
+ This option enables support for the battery alarm module on the
+ Qualcomm PM8058 PMIC chip. This support allows for configuration of
+ the alarm module as well as interrupt handling.
+
+config TZCOM
+ tristate "Trustzone Communicator driver"
+ default n
+ help
+ Provides a communication interface between userspace and
+ TrustZone Operating Environment (TZBSP) using Secure Channel
+ Manager (SCM) interface.
+
+config QFP_FUSE
+ tristate "QFPROM Fuse Read/Write support"
+ help
+ This option enables device driver to read/write QFPROM
+ fuses. The ioctls provides the necessary interface
+ to the fuse block. Currently this is supported only
+ on FSM targets.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 2d43048..1795ecf 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -52,3 +52,17 @@
obj-$(CONFIG_WL127X_RFKILL) += wl127x-rfkill.o
obj-$(CONFIG_APANIC) += apanic.o
obj-$(CONFIG_SENSORS_AK8975) += akm8975.o
+obj-$(CONFIG_TSIF) += msm_tsif.o
+msm_tsif-objs := tsif.o
+obj-$(CONFIG_TSIF_CHRDEV) += tsif_chrdev.o
+obj-$(CONFIG_HAPTIC_ISA1200) += isa1200.o
+obj-$(CONFIG_PMIC8058_PWM) += pmic8058-pwm.o
+obj-$(CONFIG_PMIC8058_VIBRATOR) += pmic8058-vibrator.o
+obj-$(CONFIG_PMIC8058_NFC) += pmic8058-nfc.o
+obj-$(CONFIG_PMIC8058_UPL) += pmic8058-upl.o
+obj-$(CONFIG_MSM_MEMORY_LOW_POWER_MODE_SUSPEND_DEEP_POWER_DOWN) \
+ += msm_migrate_pages.o
+obj-$(CONFIG_PMIC8058_XOADC) += pmic8058-xoadc.o
+obj-$(CONFIG_PMIC8058_MISC) += pmic8058-misc.o
+obj-$(CONFIG_PMIC8058_BATTALARM) += pmic8058-batt-alarm.o
+obj-$(CONFIG_TZCOM) += tzcom.o
diff --git a/drivers/misc/isa1200.c b/drivers/misc/isa1200.c
new file mode 100644
index 0000000..bb3f9a8
--- /dev/null
+++ b/drivers/misc/isa1200.c
@@ -0,0 +1,440 @@
+/*
+ * isa1200.c - Haptic Motor
+ *
+ * Copyright (C) 2009 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ * Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/pwm.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/i2c/isa1200.h>
+#include "../staging/android/timed_output.h"
+
+#define ISA1200_HCTRL0 0x30
+#define ISA1200_HCTRL1 0x31
+#define ISA1200_HCTRL5 0x35
+
+#define ISA1200_HCTRL0_RESET 0x01
+#define ISA1200_HCTRL1_RESET 0x4B
+
+#define ISA1200_HCTRL5_VIB_STRT 0xD5
+#define ISA1200_HCTRL5_VIB_STOP 0x6B
+
+struct isa1200_chip {
+ struct i2c_client *client;
+ struct isa1200_platform_data *pdata;
+ struct pwm_device *pwm;
+ struct hrtimer timer;
+ struct timed_output_dev dev;
+ struct work_struct work;
+ spinlock_t lock;
+ unsigned int enable;
+ unsigned int period_ns;
+};
+
+static int isa1200_read_reg(struct i2c_client *client, int reg)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+ if (ret < 0)
+ dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int isa1200_write_reg(struct i2c_client *client, int reg, u8 value)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, reg, value);
+ if (ret < 0)
+ dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+
+ return ret;
+}
+
+static void isa1200_vib_set(struct isa1200_chip *haptic, int enable)
+{
+ int rc = 0;
+
+ if (enable) {
+ if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE) {
+ int period_us = haptic->period_ns / 1000;
+ rc = pwm_config(haptic->pwm,
+ (period_us * haptic->pdata->duty) / 100,
+ period_us);
+ if (rc < 0)
+ pr_err("%s: pwm_config fail\n", __func__);
+ rc = pwm_enable(haptic->pwm);
+ if (rc < 0)
+ pr_err("%s: pwm_enable fail\n", __func__);
+ } else if (haptic->pdata->mode_ctrl == PWM_GEN_MODE) {
+ rc = isa1200_write_reg(haptic->client,
+ ISA1200_HCTRL5,
+ ISA1200_HCTRL5_VIB_STRT);
+ if (rc < 0)
+ pr_err("%s: start vibartion fail\n", __func__);
+ }
+ } else {
+ if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE)
+ pwm_disable(haptic->pwm);
+ else if (haptic->pdata->mode_ctrl == PWM_GEN_MODE) {
+ rc = isa1200_write_reg(haptic->client,
+ ISA1200_HCTRL5,
+ ISA1200_HCTRL5_VIB_STOP);
+ if (rc < 0)
+ pr_err("%s: stop vibartion fail\n", __func__);
+ }
+ }
+}
+
+static void isa1200_chip_work(struct work_struct *work)
+{
+ struct isa1200_chip *haptic;
+
+ haptic = container_of(work, struct isa1200_chip, work);
+ isa1200_vib_set(haptic, haptic->enable);
+}
+
+static void isa1200_chip_enable(struct timed_output_dev *dev, int value)
+{
+ struct isa1200_chip *haptic = container_of(dev, struct isa1200_chip,
+ dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&haptic->lock, flags);
+ hrtimer_cancel(&haptic->timer);
+ if (value == 0)
+ haptic->enable = 0;
+ else {
+ value = (value > haptic->pdata->max_timeout ?
+ haptic->pdata->max_timeout : value);
+ haptic->enable = 1;
+ hrtimer_start(&haptic->timer,
+ ktime_set(value / 1000, (value % 1000) * 1000000),
+ HRTIMER_MODE_REL);
+ }
+ spin_unlock_irqrestore(&haptic->lock, flags);
+ schedule_work(&haptic->work);
+}
+
+static int isa1200_chip_get_time(struct timed_output_dev *dev)
+{
+ struct isa1200_chip *haptic = container_of(dev, struct isa1200_chip,
+ dev);
+
+ if (hrtimer_active(&haptic->timer)) {
+ ktime_t r = hrtimer_get_remaining(&haptic->timer);
+ struct timeval t = ktime_to_timeval(r);
+ return t.tv_sec * 1000 + t.tv_usec / 1000;
+ } else
+ return 0;
+}
+
+static enum hrtimer_restart isa1200_vib_timer_func(struct hrtimer *timer)
+{
+ struct isa1200_chip *haptic = container_of(timer, struct isa1200_chip,
+ timer);
+ haptic->enable = 0;
+ schedule_work(&haptic->work);
+
+ return HRTIMER_NORESTART;
+}
+
+static void dump_isa1200_reg(char *str, struct i2c_client *client)
+{
+ pr_debug("%s reg0x%x=0x%x, reg0x%x=0x%x, reg0x%x=0x%x\n", str,
+ ISA1200_HCTRL0, isa1200_read_reg(client, ISA1200_HCTRL0),
+ ISA1200_HCTRL1, isa1200_read_reg(client, ISA1200_HCTRL1),
+ ISA1200_HCTRL5, isa1200_read_reg(client, ISA1200_HCTRL5));
+}
+
+static int isa1200_setup(struct i2c_client *client)
+{
+ struct isa1200_chip *haptic = i2c_get_clientdata(client);
+ int value, temp, rc;
+
+ gpio_set_value_cansleep(haptic->pdata->hap_en_gpio, 0);
+ udelay(250);
+ gpio_set_value_cansleep(haptic->pdata->hap_en_gpio, 1);
+
+ value = (haptic->pdata->smart_en << 3) |
+ (haptic->pdata->is_erm << 5) |
+ (haptic->pdata->ext_clk_en << 7);
+
+ rc = isa1200_write_reg(client, ISA1200_HCTRL1, value);
+ if (rc < 0) {
+ pr_err("%s: i2c write failure\n", __func__);
+ return rc;
+ }
+
+ if (haptic->pdata->mode_ctrl == PWM_GEN_MODE) {
+ temp = haptic->pdata->pwm_fd.pwm_div;
+ if (temp < 128 || temp > 1024 || temp % 128) {
+ pr_err("%s: Invalid divider\n", __func__);
+ goto reset_hctrl1;
+ }
+ value = ((temp >> 7) - 1);
+ } else if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE) {
+ temp = haptic->pdata->pwm_fd.pwm_freq;
+ if (temp < 22400 || temp > 172600 || temp % 22400) {
+ pr_err("%s: Invalid frequency\n", __func__);
+ goto reset_hctrl1;
+ }
+ value = ((temp / 22400) - 1);
+ haptic->period_ns = NSEC_PER_SEC / temp;
+ }
+
+ value |= (haptic->pdata->mode_ctrl << 3) |
+ (haptic->pdata->overdrive_high << 5) |
+ (haptic->pdata->overdrive_en << 5) |
+ (haptic->pdata->chip_en << 7);
+
+ rc = isa1200_write_reg(client, ISA1200_HCTRL0, value);
+ if (rc < 0) {
+ pr_err("%s: i2c write failure\n", __func__);
+ goto reset_hctrl1;
+ }
+
+ dump_isa1200_reg("new:", client);
+ return 0;
+
+reset_hctrl1:
+ i2c_smbus_write_byte_data(client, ISA1200_HCTRL1,
+ ISA1200_HCTRL1_RESET);
+ return rc;
+}
+
+static int __devinit isa1200_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct isa1200_chip *haptic;
+ struct isa1200_platform_data *pdata;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev, "%s: no support for i2c read/write"
+ "byte data\n", __func__);
+ return -EIO;
+ }
+
+ pdata = client->dev.platform_data;
+ if (!pdata) {
+ dev_err(&client->dev, "%s: no platform data\n", __func__);
+ return -EINVAL;
+ }
+
+ if (pdata->dev_setup) {
+ ret = pdata->dev_setup(true);
+ if (ret < 0) {
+ dev_err(&client->dev, "dev setup failed\n");
+ return -EINVAL;
+ }
+ }
+
+ haptic = kzalloc(sizeof(struct isa1200_chip), GFP_KERNEL);
+ if (!haptic) {
+ ret = -ENOMEM;
+ goto mem_alloc_fail;
+ }
+ haptic->client = client;
+ haptic->enable = 0;
+ haptic->pdata = pdata;
+
+ if (pdata->power_on) {
+ ret = pdata->power_on(1);
+ if (ret) {
+ dev_err(&client->dev, "%s: power-up failed\n",
+ __func__);
+ goto pwr_up_fail;
+ }
+ }
+
+ spin_lock_init(&haptic->lock);
+ INIT_WORK(&haptic->work, isa1200_chip_work);
+
+ hrtimer_init(&haptic->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ haptic->timer.function = isa1200_vib_timer_func;
+
+ /*register with timed output class*/
+ haptic->dev.name = pdata->name;
+ haptic->dev.get_time = isa1200_chip_get_time;
+ haptic->dev.enable = isa1200_chip_enable;
+ ret = timed_output_dev_register(&haptic->dev);
+ if (ret < 0)
+ goto timed_reg_fail;
+
+ i2c_set_clientdata(client, haptic);
+
+ ret = gpio_is_valid(pdata->hap_en_gpio);
+ if (ret) {
+ ret = gpio_request(pdata->hap_en_gpio, "haptic_gpio");
+ if (ret) {
+ dev_err(&client->dev, "%s: gpio %d request failed\n",
+ __func__, pdata->hap_en_gpio);
+ goto gpio_fail;
+ }
+ } else {
+ dev_err(&client->dev, "%s: Invalid gpio %d\n", __func__,
+ pdata->hap_en_gpio);
+ goto gpio_fail;
+ }
+
+ ret = isa1200_setup(client);
+ if (ret) {
+ dev_err(&client->dev, "%s: setup fail %d\n", __func__, ret);
+ goto gpio_fail;
+ }
+
+ if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE) {
+ haptic->pwm = pwm_request(pdata->pwm_ch_id, id->name);
+ if (IS_ERR(haptic->pwm)) {
+ dev_err(&client->dev, "%s: pwm request failed\n",
+ __func__);
+ ret = PTR_ERR(haptic->pwm);
+ goto reset_hctrl0;
+ }
+ }
+
+ printk(KERN_INFO "%s: %s registered\n", __func__, id->name);
+ return 0;
+
+reset_hctrl0:
+ i2c_smbus_write_byte_data(client, ISA1200_HCTRL0,
+ ISA1200_HCTRL0_RESET);
+gpio_fail:
+ timed_output_dev_unregister(&haptic->dev);
+timed_reg_fail:
+ if (pdata->power_on)
+ pdata->power_on(0);
+pwr_up_fail:
+ kfree(haptic);
+mem_alloc_fail:
+ if (pdata->dev_setup)
+ pdata->dev_setup(false);
+ return ret;
+}
+
+static int __devexit isa1200_remove(struct i2c_client *client)
+{
+ struct isa1200_chip *haptic = i2c_get_clientdata(client);
+
+ hrtimer_cancel(&haptic->timer);
+ cancel_work_sync(&haptic->work);
+
+ /* turn-off current vibration */
+ isa1200_vib_set(haptic, 0);
+
+ if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE)
+ pwm_free(haptic->pwm);
+
+ timed_output_dev_unregister(&haptic->dev);
+ gpio_free(haptic->pdata->hap_en_gpio);
+
+ /* reset hardware registers */
+ i2c_smbus_write_byte_data(client, ISA1200_HCTRL0,
+ ISA1200_HCTRL0_RESET);
+ i2c_smbus_write_byte_data(client, ISA1200_HCTRL1,
+ ISA1200_HCTRL1_RESET);
+
+ if (haptic->pdata->dev_setup)
+ haptic->pdata->dev_setup(false);
+
+ /* power-off the chip */
+ if (haptic->pdata->power_on)
+ haptic->pdata->power_on(0);
+
+ kfree(haptic);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int isa1200_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ struct isa1200_chip *haptic = i2c_get_clientdata(client);
+ int ret;
+
+ hrtimer_cancel(&haptic->timer);
+ cancel_work_sync(&haptic->work);
+ /* turn-off current vibration */
+ isa1200_vib_set(haptic, 0);
+
+ if (haptic->pdata->power_on) {
+ ret = haptic->pdata->power_on(0);
+ if (ret) {
+ dev_err(&client->dev, "power-down failed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int isa1200_resume(struct i2c_client *client)
+{
+ struct isa1200_chip *haptic = i2c_get_clientdata(client);
+ int ret;
+
+ if (haptic->pdata->power_on) {
+ ret = haptic->pdata->power_on(1);
+ if (ret) {
+ dev_err(&client->dev, "power-up failed\n");
+ return ret;
+ }
+ }
+
+ isa1200_setup(client);
+ return 0;
+}
+#else
+#define isa1200_suspend NULL
+#define isa1200_resume NULL
+#endif
+
+static const struct i2c_device_id isa1200_id[] = {
+ { "isa1200_1", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, isa1200_id);
+
+static struct i2c_driver isa1200_driver = {
+ .driver = {
+ .name = "isa1200",
+ },
+ .probe = isa1200_probe,
+ .remove = __devexit_p(isa1200_remove),
+ .suspend = isa1200_suspend,
+ .resume = isa1200_resume,
+ .id_table = isa1200_id,
+};
+
+static int __init isa1200_init(void)
+{
+ return i2c_add_driver(&isa1200_driver);
+}
+
+static void __exit isa1200_exit(void)
+{
+ i2c_del_driver(&isa1200_driver);
+}
+
+module_init(isa1200_init);
+module_exit(isa1200_exit);
+
+MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
+MODULE_DESCRIPTION("ISA1200 Haptic Motor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/msm_migrate_pages.c b/drivers/misc/msm_migrate_pages.c
new file mode 100644
index 0000000..df7af5f
--- /dev/null
+++ b/drivers/misc/msm_migrate_pages.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/memory.h>
+#include <linux/memory_hotplug.h>
+#include <mach/msm_migrate_pages.h>
+
+static unsigned long unstable_memory_state;
+
+unsigned long get_msm_migrate_pages_status(void)
+{
+ return unstable_memory_state;
+}
+EXPORT_SYMBOL(get_msm_migrate_pages_status);
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+static int migrate_pages_callback(struct notifier_block *self,
+ unsigned long action, void *arg)
+{
+ int ret = 0;
+
+ switch (action) {
+ case MEM_ONLINE:
+ unstable_memory_state = action;
+ break;
+ case MEM_OFFLINE:
+ unstable_memory_state = action;
+ break;
+ case MEM_GOING_OFFLINE:
+ case MEM_GOING_ONLINE:
+ case MEM_CANCEL_ONLINE:
+ case MEM_CANCEL_OFFLINE:
+ break;
+ }
+ return ret;
+}
+#endif
+
+static int __devinit msm_migrate_pages_probe(struct platform_device *pdev)
+{
+#ifdef CONFIG_MEMORY_HOTPLUG
+ hotplug_memory_notifier(migrate_pages_callback, 0);
+#endif
+ unstable_memory_state = 0;
+ return 0;
+}
+
+static struct platform_driver msm_migrate_pages_driver = {
+ .probe = msm_migrate_pages_probe,
+ .driver = {
+ .name = "msm_migrate_pages",
+ },
+};
+
+static int __init msm_migrate_pages_init(void)
+{
+ return platform_driver_register(&msm_migrate_pages_driver);
+}
+
+static void __exit msm_migrate_pages_exit(void)
+{
+ platform_driver_unregister(&msm_migrate_pages_driver);
+}
+
+module_init(msm_migrate_pages_init);
+module_exit(msm_migrate_pages_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Get Status of Unstable Memory Region");
diff --git a/drivers/misc/pmem.c b/drivers/misc/pmem.c
index abb73c1..5063551 100644
--- a/drivers/misc/pmem.c
+++ b/drivers/misc/pmem.c
@@ -1,6 +1,7 @@
/* drivers/android/pmem.c
*
* Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -19,20 +20,34 @@
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/list.h>
-#include <linux/mutex.h>
#include <linux/debugfs.h>
#include <linux/android_pmem.h>
#include <linux/mempolicy.h>
-#include <linux/sched.h>
+#include <linux/kobject.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
+#include <asm/sizes.h>
+#include <linux/pm_runtime.h>
+#include <linux/memory_alloc.h>
-#define PMEM_MAX_DEVICES 10
-#define PMEM_MAX_ORDER 128
+#define PMEM_MAX_DEVICES (10)
+
+#define PMEM_MAX_ORDER (128)
#define PMEM_MIN_ALLOC PAGE_SIZE
+#define PMEM_INITIAL_NUM_BITMAP_ALLOCATIONS (64)
+
+#define PMEM_32BIT_WORD_ORDER (5)
+#define PMEM_BITS_PER_WORD_MASK (BITS_PER_LONG - 1)
+
+#ifdef CONFIG_ANDROID_PMEM_DEBUG
#define PMEM_DEBUG 1
+#else
+#define PMEM_DEBUG 0
+#endif
+
+#define SYSTEM_ALLOC_RETRY 10
/* indicates that a refernce to this file has been taken via get_pmem_file,
* the file should not be released until put_pmem_file is called */
@@ -50,7 +65,6 @@
#define PMEM_FLAGS_SUBMAP 0x1 << 3
#define PMEM_FLAGS_UNSUBMAP 0x1 << 4
-
struct pmem_data {
/* in alloc mode: an index into the bitmap
* in no_alloc mode: the size of the allocation */
@@ -93,13 +107,28 @@
#define PMEM_DEBUG_MSGS 0
#if PMEM_DEBUG_MSGS
#define DLOG(fmt,args...) \
- do { printk(KERN_INFO "[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \
+ do { pr_debug("[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \
##args); } \
while (0)
#else
#define DLOG(x...) do {} while (0)
#endif
+enum pmem_align {
+ PMEM_ALIGN_4K,
+ PMEM_ALIGN_1M,
+};
+
+#define PMEM_NAME_SIZE 16
+
+struct alloc_list {
+ void *addr; /* physical addr of allocation */
+ void *aaddr; /* aligned physical addr */
+ unsigned int size; /* total size of allocation */
+ unsigned char __iomem *vaddr; /* Virtual addr */
+ struct list_head allocs;
+};
+
struct pmem_info {
struct miscdevice dev;
/* physical start address of the remaped pmem space */
@@ -112,62 +141,113 @@
unsigned long num_entries;
/* pfn of the garbage page in memory */
unsigned long garbage_pfn;
+ /* which memory type (i.e. SMI, EBI1) this PMEM device is backed by */
+ unsigned memory_type;
+
+ char name[PMEM_NAME_SIZE];
+
/* index of the garbage page in the pmem space */
int garbage_index;
- /* the bitmap for the region indicating which entries are allocated
- * and which are free */
- struct pmem_bits *bitmap;
- /* indicates the region should not be managed with an allocator */
- unsigned no_allocator;
+
+ enum pmem_allocator_type allocator_type;
+
+ int (*allocate)(const int,
+ const unsigned long,
+ const unsigned int);
+ int (*free)(int, int);
+ int (*free_space)(int, struct pmem_freespace *);
+ unsigned long (*len)(int, struct pmem_data *);
+ unsigned long (*start_addr)(int, struct pmem_data *);
+
+ /* actual size of memory element, e.g.: (4 << 10) is 4K */
+ unsigned int quantum;
+
/* indicates maps of this region should be cached, if a mix of
* cached and uncached is desired, set this and open the device with
* O_SYNC to get an uncached region */
unsigned cached;
unsigned buffered;
- /* in no_allocator mode the first mapper gets the whole space and sets
- * this flag */
- unsigned allocated;
+ union {
+ struct {
+ /* in all_or_nothing allocator mode the first mapper
+ * gets the whole space and sets this flag */
+ unsigned allocated;
+ } all_or_nothing;
+
+ struct {
+ /* the buddy allocator bitmap for the region
+ * indicating which entries are allocated and which
+ * are free.
+ */
+
+ struct pmem_bits *buddy_bitmap;
+ } buddy_bestfit;
+
+ struct {
+ unsigned int bitmap_free; /* # of zero bits/quanta */
+ uint32_t *bitmap;
+ int32_t bitmap_allocs;
+ struct {
+ short bit;
+ unsigned short quanta;
+ } *bitm_alloc;
+ } bitmap;
+
+ struct {
+ unsigned long used; /* Bytes currently allocated */
+ struct list_head alist; /* List of allocations */
+ } system_mem;
+ } allocator;
+
+ int id;
+ struct kobject kobj;
+
/* for debugging, creates a list of pmem file structs, the
- * data_list_lock should be taken before pmem_data->sem if both are
+ * data_list_mutex should be taken before pmem_data->sem if both are
* needed */
- struct mutex data_list_lock;
+ struct mutex data_list_mutex;
struct list_head data_list;
- /* pmem_sem protects the bitmap array
- * a write lock should be held when modifying entries in bitmap
- * a read lock should be held when reading data from bits or
- * dereferencing a pointer into bitmap
- *
- * pmem_data->sem protects the pmem data of a particular file
- * Many of the function that require the pmem_data->sem have a non-
- * locking version for when the caller is already holding that sem.
+ /* arena_mutex protects the global allocation arena
*
* IF YOU TAKE BOTH LOCKS TAKE THEM IN THIS ORDER:
- * down(pmem_data->sem) => down(bitmap_sem)
+ * down(pmem_data->sem) => mutex_lock(arena_mutex)
*/
- struct rw_semaphore bitmap_sem;
+ struct mutex arena_mutex;
long (*ioctl)(struct file *, unsigned int, unsigned long);
int (*release)(struct inode *, struct file *);
};
+#define to_pmem_info_id(a) (container_of(a, struct pmem_info, kobj)->id)
static struct pmem_info pmem[PMEM_MAX_DEVICES];
static int id_count;
-#define PMEM_IS_FREE(id, index) !(pmem[id].bitmap[index].allocated)
-#define PMEM_ORDER(id, index) pmem[id].bitmap[index].order
-#define PMEM_BUDDY_INDEX(id, index) (index ^ (1 << PMEM_ORDER(id, index)))
-#define PMEM_NEXT_INDEX(id, index) (index + (1 << PMEM_ORDER(id, index)))
-#define PMEM_OFFSET(index) (index * PMEM_MIN_ALLOC)
-#define PMEM_START_ADDR(id, index) (PMEM_OFFSET(index) + pmem[id].base)
-#define PMEM_LEN(id, index) ((1 << PMEM_ORDER(id, index)) * PMEM_MIN_ALLOC)
-#define PMEM_END_ADDR(id, index) (PMEM_START_ADDR(id, index) + \
- PMEM_LEN(id, index))
-#define PMEM_START_VADDR(id, index) (PMEM_OFFSET(id, index) + pmem[id].vbase)
-#define PMEM_END_VADDR(id, index) (PMEM_START_VADDR(id, index) + \
- PMEM_LEN(id, index))
+#define PMEM_SYSFS_DIR_NAME "pmem_regions" /* under /sys/kernel/ */
+static struct kset *pmem_kset;
+
+#define PMEM_IS_FREE_BUDDY(id, index) \
+ (!(pmem[id].allocator.buddy_bestfit.buddy_bitmap[index].allocated))
+#define PMEM_BUDDY_ORDER(id, index) \
+ (pmem[id].allocator.buddy_bestfit.buddy_bitmap[index].order)
+#define PMEM_BUDDY_INDEX(id, index) \
+ (index ^ (1 << PMEM_BUDDY_ORDER(id, index)))
+#define PMEM_BUDDY_NEXT_INDEX(id, index) \
+ (index + (1 << PMEM_BUDDY_ORDER(id, index)))
+#define PMEM_OFFSET(index) (index * pmem[id].quantum)
+#define PMEM_START_ADDR(id, index) \
+ (PMEM_OFFSET(index) + pmem[id].base)
+#define PMEM_BUDDY_LEN(id, index) \
+ ((1 << PMEM_BUDDY_ORDER(id, index)) * pmem[id].quantum)
+#define PMEM_END_ADDR(id, index) \
+ (PMEM_START_ADDR(id, index) + PMEM_LEN(id, index))
+#define PMEM_START_VADDR(id, index) \
+ (PMEM_OFFSET(id, index) + pmem[id].vbase)
+#define PMEM_END_VADDR(id, index) \
+ (PMEM_START_VADDR(id, index) + PMEM_LEN(id, index))
#define PMEM_REVOKED(data) (data->flags & PMEM_FLAGS_REVOKED)
#define PMEM_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK)))
-#define PMEM_IS_SUBMAP(data) ((data->flags & PMEM_FLAGS_SUBMAP) && \
+#define PMEM_IS_SUBMAP(data) \
+ ((data->flags & PMEM_FLAGS_SUBMAP) && \
(!(data->flags & PMEM_FLAGS_UNSUBMAP)))
static int pmem_release(struct inode *, struct file *);
@@ -182,79 +262,361 @@
.unlocked_ioctl = pmem_ioctl,
};
+#define PMEM_ATTR(_name, _mode, _show, _store) { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
+}
+
+struct pmem_attr {
+ struct attribute attr;
+ ssize_t(*show) (const int id, char * const);
+ ssize_t(*store) (const int id, const char * const, const size_t count);
+};
+#define to_pmem_attr(a) container_of(a, struct pmem_attr, attr)
+
+#define RW_PMEM_ATTR(name) \
+static struct pmem_attr pmem_attr_## name = \
+ PMEM_ATTR(name, S_IRUGO | S_IWUSR, show_pmem_## name, store_pmem_## name)
+
+#define RO_PMEM_ATTR(name) \
+static struct pmem_attr pmem_attr_## name = \
+ PMEM_ATTR(name, S_IRUGO, show_pmem_## name, NULL)
+
+#define WO_PMEM_ATTR(name) \
+static struct pmem_attr pmem_attr_## name = \
+ PMEM_ATTR(name, S_IWUSR, NULL, store_pmem_## name)
+
+static ssize_t show_pmem(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct pmem_attr *a = to_pmem_attr(attr);
+ return a->show ? a->show(to_pmem_info_id(kobj), buf) : -EIO;
+}
+
+static ssize_t store_pmem(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pmem_attr *a = to_pmem_attr(attr);
+ return a->store ? a->store(to_pmem_info_id(kobj), buf, count) : -EIO;
+}
+
+static struct sysfs_ops pmem_ops = {
+ .show = show_pmem,
+ .store = store_pmem,
+};
+
+static ssize_t show_pmem_base(int id, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%lu(%#lx)\n",
+ pmem[id].base, pmem[id].base);
+}
+RO_PMEM_ATTR(base);
+
+static ssize_t show_pmem_size(int id, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%lu(%#lx)\n",
+ pmem[id].size, pmem[id].size);
+}
+RO_PMEM_ATTR(size);
+
+static ssize_t show_pmem_allocator_type(int id, char *buf)
+{
+ switch (pmem[id].allocator_type) {
+ case PMEM_ALLOCATORTYPE_ALLORNOTHING:
+ return scnprintf(buf, PAGE_SIZE, "%s\n", "All or Nothing");
+ case PMEM_ALLOCATORTYPE_BUDDYBESTFIT:
+ return scnprintf(buf, PAGE_SIZE, "%s\n", "Buddy Bestfit");
+ case PMEM_ALLOCATORTYPE_BITMAP:
+ return scnprintf(buf, PAGE_SIZE, "%s\n", "Bitmap");
+ case PMEM_ALLOCATORTYPE_SYSTEM:
+ return scnprintf(buf, PAGE_SIZE, "%s\n", "System heap");
+ default:
+ return scnprintf(buf, PAGE_SIZE,
+ "??? Invalid allocator type (%d) for this region! "
+ "Something isn't right.\n",
+ pmem[id].allocator_type);
+ }
+}
+RO_PMEM_ATTR(allocator_type);
+
+static ssize_t show_pmem_mapped_regions(int id, char *buf)
+{
+ struct list_head *elt;
+ int ret;
+
+ ret = scnprintf(buf, PAGE_SIZE,
+ "pid #: mapped regions (offset, len) (offset,len)...\n");
+
+ mutex_lock(&pmem[id].data_list_mutex);
+ list_for_each(elt, &pmem[id].data_list) {
+ struct pmem_data *data =
+ list_entry(elt, struct pmem_data, list);
+ struct list_head *elt2;
+
+ down_read(&data->sem);
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "pid %u:",
+ data->pid);
+ list_for_each(elt2, &data->region_list) {
+ struct pmem_region_node *region_node = list_entry(elt2,
+ struct pmem_region_node,
+ list);
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+ "(%lx,%lx) ",
+ region_node->region.offset,
+ region_node->region.len);
+ }
+ up_read(&data->sem);
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ }
+ mutex_unlock(&pmem[id].data_list_mutex);
+ return ret;
+}
+RO_PMEM_ATTR(mapped_regions);
+
+#define PMEM_COMMON_SYSFS_ATTRS \
+ &pmem_attr_base.attr, \
+ &pmem_attr_size.attr, \
+ &pmem_attr_allocator_type.attr, \
+ &pmem_attr_mapped_regions.attr
+
+
+static ssize_t show_pmem_allocated(int id, char *buf)
+{
+ ssize_t ret;
+
+ mutex_lock(&pmem[id].arena_mutex);
+ ret = scnprintf(buf, PAGE_SIZE, "%s\n",
+ pmem[id].allocator.all_or_nothing.allocated ?
+ "is allocated" : "is NOT allocated");
+ mutex_unlock(&pmem[id].arena_mutex);
+ return ret;
+}
+RO_PMEM_ATTR(allocated);
+
+static struct attribute *pmem_allornothing_attrs[] = {
+ PMEM_COMMON_SYSFS_ATTRS,
+
+ &pmem_attr_allocated.attr,
+
+ NULL
+};
+
+static struct kobj_type pmem_allornothing_ktype = {
+ .sysfs_ops = &pmem_ops,
+ .default_attrs = pmem_allornothing_attrs,
+};
+
+static ssize_t show_pmem_total_entries(int id, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%lu\n", pmem[id].num_entries);
+}
+RO_PMEM_ATTR(total_entries);
+
+static ssize_t show_pmem_quantum_size(int id, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%u (%#x)\n",
+ pmem[id].quantum, pmem[id].quantum);
+}
+RO_PMEM_ATTR(quantum_size);
+
+static ssize_t show_pmem_buddy_bitmap_dump(int id, char *buf)
+{
+ int ret, i;
+
+ mutex_lock(&pmem[id].data_list_mutex);
+ ret = scnprintf(buf, PAGE_SIZE, "index\torder\tlength\tallocated\n");
+
+ for (i = 0; i < pmem[id].num_entries && (PAGE_SIZE - ret);
+ i = PMEM_BUDDY_NEXT_INDEX(id, i))
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%d\t%d\t%d\t%d\n",
+ i, PMEM_BUDDY_ORDER(id, i),
+ PMEM_BUDDY_LEN(id, i),
+ !PMEM_IS_FREE_BUDDY(id, i));
+
+ mutex_unlock(&pmem[id].data_list_mutex);
+ return ret;
+}
+RO_PMEM_ATTR(buddy_bitmap_dump);
+
+#define PMEM_BITMAP_BUDDY_BESTFIT_COMMON_SYSFS_ATTRS \
+ &pmem_attr_quantum_size.attr, \
+ &pmem_attr_total_entries.attr
+
+static struct attribute *pmem_buddy_bestfit_attrs[] = {
+ PMEM_COMMON_SYSFS_ATTRS,
+
+ PMEM_BITMAP_BUDDY_BESTFIT_COMMON_SYSFS_ATTRS,
+
+ &pmem_attr_buddy_bitmap_dump.attr,
+
+ NULL
+};
+
+static struct kobj_type pmem_buddy_bestfit_ktype = {
+ .sysfs_ops = &pmem_ops,
+ .default_attrs = pmem_buddy_bestfit_attrs,
+};
+
+static ssize_t show_pmem_free_quanta(int id, char *buf)
+{
+ ssize_t ret;
+
+ mutex_lock(&pmem[id].arena_mutex);
+ ret = scnprintf(buf, PAGE_SIZE, "%u\n",
+ pmem[id].allocator.bitmap.bitmap_free);
+ mutex_unlock(&pmem[id].arena_mutex);
+ return ret;
+}
+RO_PMEM_ATTR(free_quanta);
+
+static ssize_t show_pmem_bits_allocated(int id, char *buf)
+{
+ ssize_t ret;
+ unsigned int i;
+
+ mutex_lock(&pmem[id].arena_mutex);
+
+ ret = scnprintf(buf, PAGE_SIZE,
+ "id: %d\nbitnum\tindex\tquanta allocated\n", id);
+
+ for (i = 0; i < pmem[id].allocator.bitmap.bitmap_allocs; i++)
+ if (pmem[id].allocator.bitmap.bitm_alloc[i].bit != -1)
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+ "%u\t%u\t%u\n",
+ i,
+ pmem[id].allocator.bitmap.bitm_alloc[i].bit,
+ pmem[id].allocator.bitmap.bitm_alloc[i].quanta
+ );
+
+ mutex_unlock(&pmem[id].arena_mutex);
+ return ret;
+}
+RO_PMEM_ATTR(bits_allocated);
+
+static struct attribute *pmem_bitmap_attrs[] = {
+ PMEM_COMMON_SYSFS_ATTRS,
+
+ PMEM_BITMAP_BUDDY_BESTFIT_COMMON_SYSFS_ATTRS,
+
+ &pmem_attr_free_quanta.attr,
+ &pmem_attr_bits_allocated.attr,
+
+ NULL
+};
+
+static struct attribute *pmem_system_attrs[] = {
+ PMEM_COMMON_SYSFS_ATTRS,
+
+ NULL
+};
+
+static struct kobj_type pmem_bitmap_ktype = {
+ .sysfs_ops = &pmem_ops,
+ .default_attrs = pmem_bitmap_attrs,
+};
+
+static struct kobj_type pmem_system_ktype = {
+ .sysfs_ops = &pmem_ops,
+ .default_attrs = pmem_system_attrs,
+};
+
static int get_id(struct file *file)
{
return MINOR(file->f_dentry->d_inode->i_rdev);
}
-int is_pmem_file(struct file *file)
+static char *get_name(struct file *file)
+{
+ int id = get_id(file);
+ return pmem[id].name;
+}
+
+static int is_pmem_file(struct file *file)
{
int id;
if (unlikely(!file || !file->f_dentry || !file->f_dentry->d_inode))
return 0;
+
id = get_id(file);
- if (unlikely(id >= PMEM_MAX_DEVICES))
- return 0;
- if (unlikely(file->f_dentry->d_inode->i_rdev !=
- MKDEV(MISC_MAJOR, pmem[id].dev.minor)))
- return 0;
- return 1;
+ return (unlikely(id >= PMEM_MAX_DEVICES ||
+ file->f_dentry->d_inode->i_rdev !=
+ MKDEV(MISC_MAJOR, pmem[id].dev.minor))) ? 0 : 1;
}
static int has_allocation(struct file *file)
{
- struct pmem_data *data;
- /* check is_pmem_file first if not accessed via pmem_file_ops */
-
- if (unlikely(!file->private_data))
- return 0;
- data = (struct pmem_data *)file->private_data;
- if (unlikely(data->index < 0))
- return 0;
- return 1;
+ /* must be called with at least read lock held on
+ * ((struct pmem_data *)(file->private_data))->sem which
+ * means that file is guaranteed not to be NULL upon entry!!
+ * check is_pmem_file first if not accessed via pmem_file_ops */
+ struct pmem_data *pdata = file->private_data;
+ return pdata && pdata->index != -1;
}
static int is_master_owner(struct file *file)
{
struct file *master_file;
- struct pmem_data *data;
+ struct pmem_data *data = file->private_data;
int put_needed, ret = 0;
- if (!is_pmem_file(file) || !has_allocation(file))
+ if (!has_allocation(file))
return 0;
- data = (struct pmem_data *)file->private_data;
if (PMEM_FLAGS_MASTERMAP & data->flags)
return 1;
master_file = fget_light(data->master_fd, &put_needed);
if (master_file && data->master_file == master_file)
ret = 1;
- fput_light(master_file, put_needed);
+ if (master_file)
+ fput_light(master_file, put_needed);
return ret;
}
-static int pmem_free(int id, int index)
+static int pmem_free_all_or_nothing(int id, int index)
{
- /* caller should hold the write lock on pmem_sem! */
- int buddy, curr = index;
+ /* caller should hold the lock on arena_mutex! */
DLOG("index %d\n", index);
- if (pmem[id].no_allocator) {
- pmem[id].allocated = 0;
- return 0;
- }
+ pmem[id].allocator.all_or_nothing.allocated = 0;
+ return 0;
+}
+
+static int pmem_free_space_all_or_nothing(int id,
+ struct pmem_freespace *fs)
+{
+ /* caller should hold the lock on arena_mutex! */
+ fs->total = (unsigned long)
+ pmem[id].allocator.all_or_nothing.allocated == 0 ?
+ pmem[id].size : 0;
+
+ fs->largest = fs->total;
+ return 0;
+}
+
+
+static int pmem_free_buddy_bestfit(int id, int index)
+{
+ /* caller should hold the lock on arena_mutex! */
+ int curr = index;
+ DLOG("index %d\n", index);
+
+
/* clean up the bitmap, merging any buddies */
- pmem[id].bitmap[curr].allocated = 0;
+ pmem[id].allocator.buddy_bestfit.buddy_bitmap[curr].allocated = 0;
/* find a slots buddy Buddy# = Slot# ^ (1 << order)
* if the buddy is also free merge them
* repeat until the buddy is not free or end of the bitmap is reached
*/
do {
- buddy = PMEM_BUDDY_INDEX(id, curr);
- if (PMEM_IS_FREE(id, buddy) &&
- PMEM_ORDER(id, buddy) == PMEM_ORDER(id, curr)) {
- PMEM_ORDER(id, buddy)++;
- PMEM_ORDER(id, curr)++;
+ int buddy = PMEM_BUDDY_INDEX(id, curr);
+ if (buddy < pmem[id].num_entries &&
+ PMEM_IS_FREE_BUDDY(id, buddy) &&
+ PMEM_BUDDY_ORDER(id, buddy) ==
+ PMEM_BUDDY_ORDER(id, curr)) {
+ PMEM_BUDDY_ORDER(id, buddy)++;
+ PMEM_BUDDY_ORDER(id, curr)++;
curr = min(buddy, curr);
} else {
break;
@@ -264,43 +626,222 @@
return 0;
}
+
+static int pmem_free_space_buddy_bestfit(int id,
+ struct pmem_freespace *fs)
+{
+ /* caller should hold the lock on arena_mutex! */
+ int curr;
+ unsigned long size;
+ fs->total = 0;
+ fs->largest = 0;
+
+ for (curr = 0; curr < pmem[id].num_entries;
+ curr = PMEM_BUDDY_NEXT_INDEX(id, curr)) {
+ if (PMEM_IS_FREE_BUDDY(id, curr)) {
+ size = PMEM_BUDDY_LEN(id, curr);
+ if (size > fs->largest)
+ fs->largest = size;
+ fs->total += size;
+ }
+ }
+ return 0;
+}
+
+
+static inline uint32_t start_mask(int bit_start)
+{
+ return (uint32_t)(~0) << (bit_start & PMEM_BITS_PER_WORD_MASK);
+}
+
+static inline uint32_t end_mask(int bit_end)
+{
+ return (uint32_t)(~0) >>
+ ((BITS_PER_LONG - bit_end) & PMEM_BITS_PER_WORD_MASK);
+}
+
+static inline int compute_total_words(int bit_end, int word_index)
+{
+ return ((bit_end + BITS_PER_LONG - 1) >>
+ PMEM_32BIT_WORD_ORDER) - word_index;
+}
+
+static void bitmap_bits_clear_all(uint32_t *bitp, int bit_start, int bit_end)
+{
+ int word_index = bit_start >> PMEM_32BIT_WORD_ORDER, total_words;
+
+ total_words = compute_total_words(bit_end, word_index);
+ if (total_words > 0) {
+ if (total_words == 1) {
+ bitp[word_index] &=
+ ~(start_mask(bit_start) & end_mask(bit_end));
+ } else {
+ bitp[word_index++] &= ~start_mask(bit_start);
+ if (total_words > 2) {
+ int total_bytes;
+
+ total_words -= 2;
+ total_bytes = total_words << 2;
+
+ memset(&bitp[word_index], 0, total_bytes);
+ word_index += total_words;
+ }
+ bitp[word_index] &= ~end_mask(bit_end);
+ }
+ }
+}
+
+static int pmem_free_bitmap(int id, int bitnum)
+{
+ /* caller should hold the lock on arena_mutex! */
+ int i;
+ char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
+
+ DLOG("bitnum %d\n", bitnum);
+
+ for (i = 0; i < pmem[id].allocator.bitmap.bitmap_allocs; i++) {
+ const int curr_bit =
+ pmem[id].allocator.bitmap.bitm_alloc[i].bit;
+
+ if (curr_bit == bitnum) {
+ const int curr_quanta =
+ pmem[id].allocator.bitmap.bitm_alloc[i].quanta;
+
+ bitmap_bits_clear_all(pmem[id].allocator.bitmap.bitmap,
+ curr_bit, curr_bit + curr_quanta);
+ pmem[id].allocator.bitmap.bitmap_free += curr_quanta;
+ pmem[id].allocator.bitmap.bitm_alloc[i].bit = -1;
+ pmem[id].allocator.bitmap.bitm_alloc[i].quanta = 0;
+ return 0;
+ }
+ }
+ printk(KERN_ALERT "pmem: %s: Attempt to free unallocated index %d, id"
+ " %d, pid %d(%s)\n", __func__, bitnum, id, current->pid,
+ get_task_comm(currtask_name, current));
+
+ return -1;
+}
+
+static int pmem_free_system(int id, int index)
+{
+ /* caller should hold the lock on arena_mutex! */
+ struct alloc_list *item;
+
+ DLOG("index %d\n", index);
+ if (index != 0)
+ item = (struct alloc_list *)index;
+ else
+ return 0;
+
+ if (item->vaddr != NULL) {
+ iounmap(item->vaddr);
+ kfree(__va(item->addr));
+ list_del(&item->allocs);
+ kfree(item);
+ }
+
+ return 0;
+}
+
+static int pmem_free_space_bitmap(int id, struct pmem_freespace *fs)
+{
+ int i, j;
+ int max_allocs = pmem[id].allocator.bitmap.bitmap_allocs;
+ int alloc_start = 0;
+ int next_alloc;
+ unsigned long size = 0;
+
+ fs->total = 0;
+ fs->largest = 0;
+
+ for (i = 0; i < max_allocs; i++) {
+
+ int alloc_quanta = 0;
+ int alloc_idx = 0;
+ next_alloc = pmem[id].num_entries;
+
+ /* Look for the lowest bit where next allocation starts */
+ for (j = 0; j < max_allocs; j++) {
+ const int curr_alloc = pmem[id].allocator.
+ bitmap.bitm_alloc[j].bit;
+ if (curr_alloc != -1) {
+ if (alloc_start == curr_alloc)
+ alloc_idx = j;
+ if (alloc_start >= curr_alloc)
+ continue;
+ if (curr_alloc < next_alloc)
+ next_alloc = curr_alloc;
+ }
+ }
+ alloc_quanta = pmem[id].allocator.bitmap.
+ bitm_alloc[alloc_idx].quanta;
+ size = (next_alloc - (alloc_start + alloc_quanta)) *
+ pmem[id].quantum;
+
+ if (size > fs->largest)
+ fs->largest = size;
+ fs->total += size;
+
+ if (next_alloc == pmem[id].num_entries)
+ break;
+ else
+ alloc_start = next_alloc;
+ }
+
+ return 0;
+}
+
+static int pmem_free_space_system(int id, struct pmem_freespace *fs)
+{
+ fs->total = pmem[id].size;
+ fs->largest = pmem[id].size;
+
+ return 0;
+}
+
static void pmem_revoke(struct file *file, struct pmem_data *data);
static int pmem_release(struct inode *inode, struct file *file)
{
- struct pmem_data *data = (struct pmem_data *)file->private_data;
+ struct pmem_data *data = file->private_data;
struct pmem_region_node *region_node;
struct list_head *elt, *elt2;
int id = get_id(file), ret = 0;
-
- mutex_lock(&pmem[id].data_list_lock);
+#if PMEM_DEBUG_MSGS
+ char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
+#endif
+ DLOG("releasing memory pid %u(%s) file %p(%ld) dev %s(id: %d)\n",
+ current->pid, get_task_comm(currtask_name, current),
+ file, file_count(file), get_name(file), id);
+ mutex_lock(&pmem[id].data_list_mutex);
/* if this file is a master, revoke all the memory in the connected
* files */
if (PMEM_FLAGS_MASTERMAP & data->flags) {
- struct pmem_data *sub_data;
list_for_each(elt, &pmem[id].data_list) {
- sub_data = list_entry(elt, struct pmem_data, list);
+ struct pmem_data *sub_data =
+ list_entry(elt, struct pmem_data, list);
+ int is_master;
+
down_read(&sub_data->sem);
- if (PMEM_IS_SUBMAP(sub_data) &&
- file == sub_data->master_file) {
- up_read(&sub_data->sem);
+ is_master = (PMEM_IS_SUBMAP(sub_data) &&
+ file == sub_data->master_file);
+ up_read(&sub_data->sem);
+
+ if (is_master)
pmem_revoke(file, sub_data);
- } else
- up_read(&sub_data->sem);
}
}
list_del(&data->list);
- mutex_unlock(&pmem[id].data_list_lock);
-
+ mutex_unlock(&pmem[id].data_list_mutex);
down_write(&data->sem);
- /* if its not a conencted file and it has an allocation, free it */
+ /* if it is not a connected file and it has an allocation, free it */
if (!(PMEM_FLAGS_CONNECTED & data->flags) && has_allocation(file)) {
- down_write(&pmem[id].bitmap_sem);
- ret = pmem_free(id, data->index);
- up_write(&pmem[id].bitmap_sem);
+ mutex_lock(&pmem[id].arena_mutex);
+ ret = pmem[id].free(id, data->index);
+ mutex_unlock(&pmem[id].arena_mutex);
}
/* if this file is a submap (mapped, connected file), downref the
@@ -333,15 +874,17 @@
struct pmem_data *data;
int id = get_id(file);
int ret = 0;
+#if PMEM_DEBUG_MSGS
+ char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
+#endif
- DLOG("current %u file %p(%d)\n", current->pid, file, file_count(file));
- /* setup file->private_data to indicate its unmapped */
- /* you can only open a pmem device one time */
- if (file->private_data != NULL)
- return -1;
+ DLOG("pid %u(%s) file %p(%ld) dev %s(id: %d)\n",
+ current->pid, get_task_comm(currtask_name, current),
+ file, file_count(file), get_name(file), id);
data = kmalloc(sizeof(struct pmem_data), GFP_KERNEL);
if (!data) {
- printk("pmem: unable to allocate memory for pmem metadata.");
+ printk(KERN_ALERT "pmem: %s: unable to allocate memory for "
+ "pmem metadata.", __func__);
return -1;
}
data->flags = 0;
@@ -359,17 +902,17 @@
file->private_data = data;
INIT_LIST_HEAD(&data->list);
- mutex_lock(&pmem[id].data_list_lock);
+ mutex_lock(&pmem[id].data_list_mutex);
list_add(&data->list, &pmem[id].data_list);
- mutex_unlock(&pmem[id].data_list_lock);
+ mutex_unlock(&pmem[id].data_list_mutex);
return ret;
}
-static unsigned long pmem_order(unsigned long len)
+static unsigned long pmem_order(unsigned long len, int id)
{
int i;
- len = (len + PMEM_MIN_ALLOC - 1)/PMEM_MIN_ALLOC;
+ len = (len + pmem[id].quantum - 1)/pmem[id].quantum;
len--;
for (i = 0; i < sizeof(len)*8; i++)
if (len >> i == 0)
@@ -377,74 +920,385 @@
return i;
}
-static int pmem_allocate(int id, unsigned long len)
+static int pmem_allocator_all_or_nothing(const int id,
+ const unsigned long len,
+ const unsigned int align)
{
- /* caller should hold the write lock on pmem_sem! */
- /* return the corresponding pdata[] entry */
- int curr = 0;
- int end = pmem[id].num_entries;
- int best_fit = -1;
- unsigned long order = pmem_order(len);
-
- if (pmem[id].no_allocator) {
- DLOG("no allocator");
- if ((len > pmem[id].size) || pmem[id].allocated)
- return -1;
- pmem[id].allocated = 1;
- return len;
- }
-
- if (order > PMEM_MAX_ORDER)
+ /* caller should hold the lock on arena_mutex! */
+ DLOG("all or nothing\n");
+ if ((len > pmem[id].size) ||
+ pmem[id].allocator.all_or_nothing.allocated)
return -1;
+ pmem[id].allocator.all_or_nothing.allocated = 1;
+ return len;
+}
+
+static int pmem_allocator_buddy_bestfit(const int id,
+ const unsigned long len,
+ unsigned int align)
+{
+ /* caller should hold the lock on arena_mutex! */
+ int curr;
+ int best_fit = -1;
+ unsigned long order;
+
+ DLOG("buddy bestfit\n");
+ order = pmem_order(len, id);
+ if (order > PMEM_MAX_ORDER)
+ goto out;
+
DLOG("order %lx\n", order);
- /* look through the bitmap:
- * if you find a free slot of the correct order use it
- * otherwise, use the best fit (smallest with size > order) slot
+ /* Look through the bitmap.
+ * If a free slot of the correct order is found, use it.
+ * Otherwise, use the best fit (smallest with size > order) slot.
*/
- while (curr < end) {
- if (PMEM_IS_FREE(id, curr)) {
- if (PMEM_ORDER(id, curr) == (unsigned char)order) {
+ for (curr = 0;
+ curr < pmem[id].num_entries;
+ curr = PMEM_BUDDY_NEXT_INDEX(id, curr))
+ if (PMEM_IS_FREE_BUDDY(id, curr)) {
+ if (PMEM_BUDDY_ORDER(id, curr) ==
+ (unsigned char)order) {
/* set the not free bit and clear others */
best_fit = curr;
break;
}
- if (PMEM_ORDER(id, curr) > (unsigned char)order &&
+ if (PMEM_BUDDY_ORDER(id, curr) >
+ (unsigned char)order &&
(best_fit < 0 ||
- PMEM_ORDER(id, curr) < PMEM_ORDER(id, best_fit)))
+ PMEM_BUDDY_ORDER(id, curr) <
+ PMEM_BUDDY_ORDER(id, best_fit)))
best_fit = curr;
}
- curr = PMEM_NEXT_INDEX(id, curr);
- }
- /* if best_fit < 0, there are no suitable slots,
- * return an error
- */
+ /* if best_fit < 0, there are no suitable slots; return an error */
if (best_fit < 0) {
- printk("pmem: no space left to allocate!\n");
- return -1;
+#if PMEM_DEBUG
+ printk(KERN_ALERT "pmem: %s: no space left to allocate!\n",
+ __func__);
+#endif
+ goto out;
}
/* now partition the best fit:
* split the slot into 2 buddies of order - 1
* repeat until the slot is of the correct order
*/
- while (PMEM_ORDER(id, best_fit) > (unsigned char)order) {
+ while (PMEM_BUDDY_ORDER(id, best_fit) > (unsigned char)order) {
int buddy;
- PMEM_ORDER(id, best_fit) -= 1;
+ PMEM_BUDDY_ORDER(id, best_fit) -= 1;
buddy = PMEM_BUDDY_INDEX(id, best_fit);
- PMEM_ORDER(id, buddy) = PMEM_ORDER(id, best_fit);
+ PMEM_BUDDY_ORDER(id, buddy) = PMEM_BUDDY_ORDER(id, best_fit);
}
- pmem[id].bitmap[best_fit].allocated = 1;
+ pmem[id].allocator.buddy_bestfit.buddy_bitmap[best_fit].allocated = 1;
+out:
return best_fit;
}
-static pgprot_t pmem_access_prot(struct file *file, pgprot_t vma_prot)
+
+static inline unsigned long paddr_from_bit(const int id, const int bitnum)
+{
+ return pmem[id].base + pmem[id].quantum * bitnum;
+}
+
+static inline unsigned long bit_from_paddr(const int id,
+ const unsigned long paddr)
+{
+ return (paddr - pmem[id].base) / pmem[id].quantum;
+}
+
+static void bitmap_bits_set_all(uint32_t *bitp, int bit_start, int bit_end)
+{
+ int word_index = bit_start >> PMEM_32BIT_WORD_ORDER, total_words;
+
+ total_words = compute_total_words(bit_end, word_index);
+ if (total_words > 0) {
+ if (total_words == 1) {
+ bitp[word_index] |=
+ (start_mask(bit_start) & end_mask(bit_end));
+ } else {
+ bitp[word_index++] |= start_mask(bit_start);
+ if (total_words > 2) {
+ int total_bytes;
+
+ total_words -= 2;
+ total_bytes = total_words << 2;
+
+ memset(&bitp[word_index], ~0, total_bytes);
+ word_index += total_words;
+ }
+ bitp[word_index] |= end_mask(bit_end);
+ }
+ }
+}
+
+static int
+bitmap_allocate_contiguous(uint32_t *bitp, int num_bits_to_alloc,
+ int total_bits, int spacing)
+{
+ int bit_start, last_bit, word_index;
+
+ if (num_bits_to_alloc <= 0)
+ return -1;
+
+ for (bit_start = 0; ;
+ bit_start = (last_bit +
+ (word_index << PMEM_32BIT_WORD_ORDER) + spacing - 1)
+ & ~(spacing - 1)) {
+ int bit_end = bit_start + num_bits_to_alloc, total_words;
+
+ if (bit_end > total_bits)
+ return -1; /* out of contiguous memory */
+
+ word_index = bit_start >> PMEM_32BIT_WORD_ORDER;
+ total_words = compute_total_words(bit_end, word_index);
+
+ if (total_words <= 0)
+ return -1;
+
+ if (total_words == 1) {
+ last_bit = fls(bitp[word_index] &
+ (start_mask(bit_start) &
+ end_mask(bit_end)));
+ if (last_bit)
+ continue;
+ } else {
+ int end_word = word_index + (total_words - 1);
+ last_bit =
+ fls(bitp[word_index] & start_mask(bit_start));
+ if (last_bit)
+ continue;
+
+ for (word_index++;
+ word_index < end_word;
+ word_index++) {
+ last_bit = fls(bitp[word_index]);
+ if (last_bit)
+ break;
+ }
+ if (last_bit)
+ continue;
+
+ last_bit = fls(bitp[word_index] & end_mask(bit_end));
+ if (last_bit)
+ continue;
+ }
+ bitmap_bits_set_all(bitp, bit_start, bit_end);
+ return bit_start;
+ }
+ return -1;
+}
+
+static int reserve_quanta(const unsigned int quanta_needed,
+ const int id,
+ unsigned int align)
+{
+ /* alignment should be a valid power of 2 */
+ int ret = -1, start_bit = 0, spacing = 1;
+
+ /* Sanity check */
+ if (quanta_needed > pmem[id].allocator.bitmap.bitmap_free) {
+#if PMEM_DEBUG
+ printk(KERN_ALERT "pmem: %s: request (%d) too big for"
+ " available free (%d)\n", __func__, quanta_needed,
+ pmem[id].allocator.bitmap.bitmap_free);
+#endif
+ return -1;
+ }
+
+ start_bit = bit_from_paddr(id,
+ (pmem[id].base + align - 1) & ~(align - 1));
+ if (start_bit <= -1) {
+#if PMEM_DEBUG
+ printk(KERN_ALERT
+ "pmem: %s: bit_from_paddr fails for"
+ " %u alignment.\n", __func__, align);
+#endif
+ return -1;
+ }
+ spacing = align / pmem[id].quantum;
+ spacing = spacing > 1 ? spacing : 1;
+
+ ret = bitmap_allocate_contiguous(pmem[id].allocator.bitmap.bitmap,
+ quanta_needed,
+ (pmem[id].size + pmem[id].quantum - 1) / pmem[id].quantum,
+ spacing);
+
+#if PMEM_DEBUG
+ if (ret < 0)
+ printk(KERN_ALERT "pmem: %s: not enough contiguous bits free "
+ "in bitmap! Region memory is either too fragmented or"
+ " request is too large for available memory.\n",
+ __func__);
+#endif
+
+ return ret;
+}
+
+static int pmem_allocator_bitmap(const int id,
+ const unsigned long len,
+ const unsigned int align)
+{
+ /* caller should hold the lock on arena_mutex! */
+ int bitnum, i;
+ unsigned int quanta_needed;
+
+ DLOG("bitmap id %d, len %ld, align %u\n", id, len, align);
+ if (!pmem[id].allocator.bitmap.bitm_alloc) {
+#if PMEM_DEBUG
+ printk(KERN_ALERT "pmem: bitm_alloc not present! id: %d\n",
+ id);
+#endif
+ return -1;
+ }
+
+ quanta_needed = (len + pmem[id].quantum - 1) / pmem[id].quantum;
+ DLOG("quantum size %u quanta needed %u free %u id %d\n",
+ pmem[id].quantum, quanta_needed,
+ pmem[id].allocator.bitmap.bitmap_free, id);
+
+ if (pmem[id].allocator.bitmap.bitmap_free < quanta_needed) {
+#if PMEM_DEBUG
+ printk(KERN_ALERT "pmem: memory allocation failure. "
+ "PMEM memory region exhausted, id %d."
+ " Unable to comply with allocation request.\n", id);
+#endif
+ return -1;
+ }
+
+ bitnum = reserve_quanta(quanta_needed, id, align);
+ if (bitnum == -1)
+ goto leave;
+
+ for (i = 0;
+ i < pmem[id].allocator.bitmap.bitmap_allocs &&
+ pmem[id].allocator.bitmap.bitm_alloc[i].bit != -1;
+ i++)
+ ;
+
+ if (i >= pmem[id].allocator.bitmap.bitmap_allocs) {
+ void *temp;
+ int32_t new_bitmap_allocs =
+ pmem[id].allocator.bitmap.bitmap_allocs << 1;
+ int j;
+
+ if (!new_bitmap_allocs) { /* failed sanity check!! */
+#if PMEM_DEBUG
+ pr_alert("pmem: bitmap_allocs number"
+ " wrapped around to zero! Something "
+ "is VERY wrong.\n");
+#endif
+ return -1;
+ }
+
+ if (new_bitmap_allocs > pmem[id].num_entries) {
+ /* failed sanity check!! */
+#if PMEM_DEBUG
+ pr_alert("pmem: required bitmap_allocs"
+ " number exceeds maximum entries possible"
+ " for current quanta\n");
+#endif
+ return -1;
+ }
+
+ temp = krealloc(pmem[id].allocator.bitmap.bitm_alloc,
+ new_bitmap_allocs *
+ sizeof(*pmem[id].allocator.bitmap.bitm_alloc),
+ GFP_KERNEL);
+ if (!temp) {
+#if PMEM_DEBUG
+ pr_alert("pmem: can't realloc bitmap_allocs,"
+ "id %d, current num bitmap allocs %d\n",
+ id, pmem[id].allocator.bitmap.bitmap_allocs);
+#endif
+ return -1;
+ }
+ pmem[id].allocator.bitmap.bitmap_allocs = new_bitmap_allocs;
+ pmem[id].allocator.bitmap.bitm_alloc = temp;
+
+ for (j = i; j < new_bitmap_allocs; j++) {
+ pmem[id].allocator.bitmap.bitm_alloc[j].bit = -1;
+ pmem[id].allocator.bitmap.bitm_alloc[i].quanta = 0;
+ }
+
+ DLOG("increased # of allocated regions to %d for id %d\n",
+ pmem[id].allocator.bitmap.bitmap_allocs, id);
+ }
+
+ DLOG("bitnum %d, bitm_alloc index %d\n", bitnum, i);
+
+ pmem[id].allocator.bitmap.bitmap_free -= quanta_needed;
+ pmem[id].allocator.bitmap.bitm_alloc[i].bit = bitnum;
+ pmem[id].allocator.bitmap.bitm_alloc[i].quanta = quanta_needed;
+leave:
+ return bitnum;
+}
+
+static int pmem_allocator_system(const int id,
+ const unsigned long len,
+ const unsigned int align)
+{
+ /* caller should hold the lock on arena_mutex! */
+ struct alloc_list *list;
+ unsigned long aligned_len;
+ int count = SYSTEM_ALLOC_RETRY;
+ void *buf;
+
+ DLOG("system id %d, len %ld, align %u\n", id, len, align);
+
+ if ((pmem[id].allocator.system_mem.used + len) > pmem[id].size) {
+ DLOG("requested size would be larger than quota\n");
+ return -1;
+ }
+
+ /* Handle alignment */
+ aligned_len = len + align;
+
+ /* Attempt allocation */
+ list = kmalloc(sizeof(struct alloc_list), GFP_KERNEL);
+ if (list == NULL) {
+ printk(KERN_ERR "pmem: failed to allocate system metadata\n");
+ return -1;
+ }
+ list->vaddr = NULL;
+
+ buf = NULL;
+ while ((buf == NULL) && count--) {
+ buf = kmalloc((aligned_len), GFP_KERNEL);
+ if (buf == NULL) {
+ DLOG("pmem: kmalloc %d temporarily failed len= %ld\n",
+ count, aligned_len);
+ }
+ }
+ if (!buf) {
+ printk(KERN_CRIT "pmem: kmalloc failed for id= %d len= %ld\n",
+ id, aligned_len);
+ kfree(list);
+ return -1;
+ }
+ list->size = aligned_len;
+ list->addr = (void *)__pa(buf);
+ list->aaddr = (void *)(((unsigned int)(list->addr) + (align - 1)) &
+ ~(align - 1));
+
+ if (!pmem[id].cached)
+ list->vaddr = ioremap(__pa(buf), aligned_len);
+ else
+ list->vaddr = ioremap_cached(__pa(buf), aligned_len);
+
+ INIT_LIST_HEAD(&list->allocs);
+ list_add(&list->allocs, &pmem[id].allocator.system_mem.alist);
+
+ return (int)list;
+}
+
+static pgprot_t pmem_phys_mem_access_prot(struct file *file, pgprot_t vma_prot)
{
int id = get_id(file);
-#ifdef pgprot_noncached
+#ifdef pgprot_writecombine
if (pmem[id].cached == 0 || file->f_flags & O_SYNC)
- return pgprot_noncached(vma_prot);
+ /* on ARMv6 and ARMv7 this expands to Normal Noncached */
+ return pgprot_writecombine(vma_prot);
#endif
#ifdef pgprot_ext_buffered
else if (pmem[id].buffered)
@@ -453,26 +1307,80 @@
return vma_prot;
}
-static unsigned long pmem_start_addr(int id, struct pmem_data *data)
+static unsigned long pmem_start_addr_all_or_nothing(int id,
+ struct pmem_data *data)
{
- if (pmem[id].no_allocator)
- return PMEM_START_ADDR(id, 0);
- else
- return PMEM_START_ADDR(id, data->index);
+ return PMEM_START_ADDR(id, 0);
+}
+static unsigned long pmem_start_addr_buddy_bestfit(int id,
+ struct pmem_data *data)
+{
+ return PMEM_START_ADDR(id, data->index);
+}
+
+static unsigned long pmem_start_addr_bitmap(int id, struct pmem_data *data)
+{
+ return data->index * pmem[id].quantum + pmem[id].base;
+}
+
+static unsigned long pmem_start_addr_system(int id, struct pmem_data *data)
+{
+ return (unsigned long)(((struct alloc_list *)(data->index))->aaddr);
}
static void *pmem_start_vaddr(int id, struct pmem_data *data)
{
- return pmem_start_addr(id, data) - pmem[id].base + pmem[id].vbase;
+ if (pmem[id].allocator_type == PMEM_ALLOCATORTYPE_SYSTEM)
+ return ((struct alloc_list *)(data->index))->vaddr;
+ else
+ return pmem[id].start_addr(id, data) - pmem[id].base + pmem[id].vbase;
}
-static unsigned long pmem_len(int id, struct pmem_data *data)
+static unsigned long pmem_len_all_or_nothing(int id, struct pmem_data *data)
{
- if (pmem[id].no_allocator)
- return data->index;
- else
- return PMEM_LEN(id, data->index);
+ return data->index;
+}
+
+static unsigned long pmem_len_buddy_bestfit(int id, struct pmem_data *data)
+{
+ return PMEM_BUDDY_LEN(id, data->index);
+}
+
+static unsigned long pmem_len_bitmap(int id, struct pmem_data *data)
+{
+ int i;
+ unsigned long ret = 0;
+
+ mutex_lock(&pmem[id].arena_mutex);
+
+ for (i = 0; i < pmem[id].allocator.bitmap.bitmap_allocs; i++)
+ if (pmem[id].allocator.bitmap.bitm_alloc[i].bit ==
+ data->index) {
+ ret = pmem[id].allocator.bitmap.bitm_alloc[i].quanta *
+ pmem[id].quantum;
+ break;
+ }
+
+ mutex_unlock(&pmem[id].arena_mutex);
+#if PMEM_DEBUG
+ if (i >= pmem[id].allocator.bitmap.bitmap_allocs)
+ pr_alert("pmem: %s: can't find bitnum %d in "
+ "alloc'd array!\n", __func__, data->index);
+#endif
+ return ret;
+}
+
+static unsigned long pmem_len_system(int id, struct pmem_data *data)
+{
+ unsigned long ret = 0;
+
+ mutex_lock(&pmem[id].arena_mutex);
+
+ ret = ((struct alloc_list *)data->index)->size;
+ mutex_unlock(&pmem[id].arena_mutex);
+
+ return ret;
}
static int pmem_map_garbage(int id, struct vm_area_struct *vma,
@@ -509,18 +1417,25 @@
struct pmem_data *data, unsigned long offset,
unsigned long len)
{
+ int ret;
DLOG("map offset %lx len %lx\n", offset, len);
BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_start));
BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_end));
BUG_ON(!PMEM_IS_PAGE_ALIGNED(len));
BUG_ON(!PMEM_IS_PAGE_ALIGNED(offset));
- if (io_remap_pfn_range(vma, vma->vm_start + offset,
- (pmem_start_addr(id, data) + offset) >> PAGE_SHIFT,
- len, vma->vm_page_prot)) {
- return -EAGAIN;
+ ret = io_remap_pfn_range(vma, vma->vm_start + offset,
+ (pmem[id].start_addr(id, data) + offset) >> PAGE_SHIFT,
+ len, vma->vm_page_prot);
+ if (ret) {
+#if PMEM_DEBUG
+ pr_alert("pmem: %s: io_remap_pfn_range fails with "
+ "return value: %d!\n", __func__, ret);
+#endif
+
+ ret = -EAGAIN;
}
- return 0;
+ return ret;
}
static int pmem_remap_pfn_range(int id, struct vm_area_struct *vma,
@@ -538,13 +1453,21 @@
struct file *file = vma->vm_file;
struct pmem_data *data = file->private_data;
int id = get_id(file);
+
+#if PMEM_DEBUG_MSGS
+ char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
+#endif
+ DLOG("Dev %s(id: %d) pid %u(%s) ppid %u file %p count %ld\n",
+ get_name(file), id, current->pid,
+ get_task_comm(currtask_name, current),
+ current->parent->pid, file, file_count(file));
/* this should never be called as we don't support copying pmem
* ranges via fork */
+ down_read(&data->sem);
BUG_ON(!has_allocation(file));
- down_write(&data->sem);
/* remap the garbage pages, forkers don't get access to the data */
pmem_unmap_pfn_range(id, vma, data, 0, vma->vm_start - vma->vm_end);
- up_write(&data->sem);
+ up_read(&data->sem);
}
static void pmem_vma_close(struct vm_area_struct *vma)
@@ -552,15 +1475,29 @@
struct file *file = vma->vm_file;
struct pmem_data *data = file->private_data;
- DLOG("current %u ppid %u file %p count %d\n", current->pid,
- current->parent->pid, file, file_count(file));
- if (unlikely(!is_pmem_file(file) || !has_allocation(file))) {
- printk(KERN_WARNING "pmem: something is very wrong, you are "
+#if PMEM_DEBUG_MSGS
+ char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
+#endif
+ DLOG("Dev %s(id: %d) pid %u(%s) ppid %u file %p count %ld\n",
+ get_name(file), get_id(file), current->pid,
+ get_task_comm(currtask_name, current),
+ current->parent->pid, file, file_count(file));
+
+ if (unlikely(!is_pmem_file(file))) {
+ pr_warning("pmem: something is very wrong, you are "
"closing a vm backing an allocation that doesn't "
"exist!\n");
return;
}
+
down_write(&data->sem);
+ if (unlikely(!has_allocation(file))) {
+ up_write(&data->sem);
+ pr_warning("pmem: something is very wrong, you are "
+ "closing a vm backing an allocation that doesn't "
+ "exist!\n");
+ return;
+ }
if (data->vma == vma) {
data->vma = NULL;
if ((data->flags & PMEM_FLAGS_CONNECTED) &&
@@ -578,64 +1515,78 @@
static int pmem_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct pmem_data *data;
+ struct pmem_data *data = file->private_data;
int index;
unsigned long vma_size = vma->vm_end - vma->vm_start;
int ret = 0, id = get_id(file);
+#if PMEM_DEBUG_MSGS
+ char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
+#endif
+ if (!data) {
+ pr_err("pmem: Invalid file descriptor, no private data\n");
+ return -EINVAL;
+ }
+ DLOG("pid %u(%s) mmap vma_size %lu on dev %s(id: %d)\n", current->pid,
+ get_task_comm(currtask_name, current), vma_size,
+ get_name(file), id);
if (vma->vm_pgoff || !PMEM_IS_PAGE_ALIGNED(vma_size)) {
#if PMEM_DEBUG
- printk(KERN_ERR "pmem: mmaps must be at offset zero, aligned"
+ pr_err("pmem: mmaps must be at offset zero, aligned"
" and a multiple of pages_size.\n");
#endif
return -EINVAL;
}
- data = (struct pmem_data *)file->private_data;
down_write(&data->sem);
/* check this file isn't already mmaped, for submaps check this file
* has never been mmaped */
if ((data->flags & PMEM_FLAGS_SUBMAP) ||
(data->flags & PMEM_FLAGS_UNSUBMAP)) {
#if PMEM_DEBUG
- printk(KERN_ERR "pmem: you can only mmap a pmem file once, "
+ pr_err("pmem: you can only mmap a pmem file once, "
"this file is already mmaped. %x\n", data->flags);
#endif
ret = -EINVAL;
goto error;
}
/* if file->private_data == unalloced, alloc*/
- if (data && data->index == -1) {
- down_write(&pmem[id].bitmap_sem);
- index = pmem_allocate(id, vma->vm_end - vma->vm_start);
- up_write(&pmem[id].bitmap_sem);
+ if (data->index == -1) {
+ mutex_lock(&pmem[id].arena_mutex);
+ index = pmem[id].allocate(id,
+ vma->vm_end - vma->vm_start,
+ SZ_4K);
+ mutex_unlock(&pmem[id].arena_mutex);
+ /* either no space was available or an error occured */
+ if (index == -1) {
+ pr_err("pmem: mmap unable to allocate memory"
+ "on %s\n", get_name(file));
+ ret = -ENOMEM;
+ goto error;
+ }
+ /* store the index of a successful allocation */
data->index = index;
}
- /* either no space was available or an error occured */
- if (!has_allocation(file)) {
- ret = -EINVAL;
- printk("pmem: could not find allocation for map.\n");
- goto error;
- }
- if (pmem_len(id, data) < vma_size) {
+ if (pmem[id].len(id, data) < vma_size) {
#if PMEM_DEBUG
- printk(KERN_WARNING "pmem: mmap size [%lu] does not match"
- "size of backing region [%lu].\n", vma_size,
- pmem_len(id, data));
+ pr_err("pmem: mmap size [%lu] does not match"
+ " size of backing region [%lu].\n", vma_size,
+ pmem[id].len(id, data));
#endif
ret = -EINVAL;
goto error;
}
- vma->vm_pgoff = pmem_start_addr(id, data) >> PAGE_SHIFT;
- vma->vm_page_prot = pmem_access_prot(file, vma->vm_page_prot);
+ vma->vm_pgoff = pmem[id].start_addr(id, data) >> PAGE_SHIFT;
+
+ vma->vm_page_prot = pmem_phys_mem_access_prot(file, vma->vm_page_prot);
if (data->flags & PMEM_FLAGS_CONNECTED) {
struct pmem_region_node *region_node;
struct list_head *elt;
if (pmem_map_garbage(id, vma, data, 0, vma_size)) {
- printk("pmem: mmap failed in kernel!\n");
+ pr_alert("pmem: mmap failed in kernel!\n");
ret = -EAGAIN;
goto error;
}
@@ -663,7 +1614,7 @@
current->pid);
} else {
if (pmem_map_pfn_range(id, vma, data, 0, vma_size)) {
- printk(KERN_INFO "pmem: mmap failed in kernel!\n");
+ pr_err("pmem: mmap failed in kernel!\n");
ret = -EAGAIN;
goto error;
}
@@ -681,103 +1632,155 @@
int get_pmem_user_addr(struct file *file, unsigned long *start,
unsigned long *len)
{
- struct pmem_data *data;
- if (!is_pmem_file(file) || !has_allocation(file)) {
+ int ret = -1;
+
+ if (is_pmem_file(file)) {
+ struct pmem_data *data = file->private_data;
+
+ down_read(&data->sem);
+ if (has_allocation(file)) {
+ if (data->vma) {
+ *start = data->vma->vm_start;
+ *len = data->vma->vm_end - data->vma->vm_start;
+ } else {
+ *start = *len = 0;
#if PMEM_DEBUG
- printk(KERN_INFO "pmem: requested pmem data from invalid"
- "file.\n");
+ pr_err("pmem: %s: no vma present.\n",
+ __func__);
#endif
- return -1;
+ }
+ ret = 0;
+ }
+ up_read(&data->sem);
}
- data = (struct pmem_data *)file->private_data;
- down_read(&data->sem);
- if (data->vma) {
- *start = data->vma->vm_start;
- *len = data->vma->vm_end - data->vma->vm_start;
- } else {
- *start = 0;
- *len = 0;
- }
- up_read(&data->sem);
- return 0;
+
+#if PMEM_DEBUG
+ if (ret)
+ pr_err("pmem: %s: requested pmem data from invalid"
+ "file.\n", __func__);
+#endif
+ return ret;
}
int get_pmem_addr(struct file *file, unsigned long *start,
unsigned long *vstart, unsigned long *len)
{
- struct pmem_data *data;
- int id;
+ int ret = -1;
- if (!is_pmem_file(file) || !has_allocation(file)) {
- return -1;
- }
+ if (is_pmem_file(file)) {
+ struct pmem_data *data = file->private_data;
- data = (struct pmem_data *)file->private_data;
- if (data->index == -1) {
+ down_read(&data->sem);
+ if (has_allocation(file)) {
+ int id = get_id(file);
+
+ *start = pmem[id].start_addr(id, data);
+ *len = pmem[id].len(id, data);
+ *vstart = (unsigned long)
+ pmem_start_vaddr(id, data);
+ up_read(&data->sem);
#if PMEM_DEBUG
- printk(KERN_INFO "pmem: requested pmem data from file with no "
- "allocation.\n");
- return -1;
+ down_write(&data->sem);
+ data->ref++;
+ up_write(&data->sem);
#endif
+ DLOG("returning start %#lx len %lu "
+ "vstart %#lx\n",
+ *start, *len, *vstart);
+ ret = 0;
+ } else {
+ up_read(&data->sem);
+ }
}
- id = get_id(file);
-
- down_read(&data->sem);
- *start = pmem_start_addr(id, data);
- *len = pmem_len(id, data);
- *vstart = (unsigned long)pmem_start_vaddr(id, data);
- up_read(&data->sem);
-#if PMEM_DEBUG
- down_write(&data->sem);
- data->ref++;
- up_write(&data->sem);
-#endif
- return 0;
+ return ret;
}
-int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart,
+int get_pmem_file(unsigned int fd, unsigned long *start, unsigned long *vstart,
unsigned long *len, struct file **filp)
{
- struct file *file;
+ int ret = -1;
+ struct file *file = fget(fd);
- file = fget(fd);
if (unlikely(file == NULL)) {
- printk(KERN_INFO "pmem: requested data from file descriptor "
- "that doesn't exist.");
- return -1;
+ pr_err("pmem: %s: requested data from file "
+ "descriptor that doesn't exist.\n", __func__);
+ } else {
+#if PMEM_DEBUG_MSGS
+ char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
+#endif
+ DLOG("filp %p rdev %d pid %u(%s) file %p(%ld)"
+ " dev %s(id: %d)\n", filp,
+ file->f_dentry->d_inode->i_rdev,
+ current->pid, get_task_comm(currtask_name, current),
+ file, file_count(file), get_name(file), get_id(file));
+
+ if (!get_pmem_addr(file, start, vstart, len)) {
+ if (filp)
+ *filp = file;
+ ret = 0;
+ } else {
+ fput(file);
+ }
}
-
- if (get_pmem_addr(file, start, vstart, len))
- goto end;
-
- if (filp)
- *filp = file;
- return 0;
-end:
- fput(file);
- return -1;
+ return ret;
}
+EXPORT_SYMBOL(get_pmem_file);
+
+int get_pmem_fd(int fd, unsigned long *start, unsigned long *len)
+{
+ unsigned long vstart;
+ return get_pmem_file(fd, start, &vstart, len, NULL);
+}
+EXPORT_SYMBOL(get_pmem_fd);
void put_pmem_file(struct file *file)
{
- struct pmem_data *data;
- int id;
-
- if (!is_pmem_file(file))
- return;
- id = get_id(file);
- data = (struct pmem_data *)file->private_data;
-#if PMEM_DEBUG
- down_write(&data->sem);
- if (data->ref == 0) {
- printk("pmem: pmem_put > pmem_get %s (pid %d)\n",
- pmem[id].dev.name, data->pid);
- BUG();
- }
- data->ref--;
- up_write(&data->sem);
+#if PMEM_DEBUG_MSGS
+ char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
#endif
- fput(file);
+ DLOG("rdev %d pid %u(%s) file %p(%ld)" " dev %s(id: %d)\n",
+ file->f_dentry->d_inode->i_rdev, current->pid,
+ get_task_comm(currtask_name, current), file,
+ file_count(file), get_name(file), get_id(file));
+ if (is_pmem_file(file)) {
+#if PMEM_DEBUG
+ struct pmem_data *data = file->private_data;
+
+ down_write(&data->sem);
+ if (!data->ref--) {
+ data->ref++;
+ pr_alert("pmem: pmem_put > pmem_get %s "
+ "(pid %d)\n",
+ pmem[get_id(file)].dev.name, data->pid);
+ BUG();
+ }
+ up_write(&data->sem);
+#endif
+ fput(file);
+ }
+}
+EXPORT_SYMBOL(put_pmem_file);
+
+void put_pmem_fd(int fd)
+{
+ int put_needed;
+ struct file *file = fget_light(fd, &put_needed);
+
+ if (file) {
+ put_pmem_file(file);
+ fput_light(file, put_needed);
+ }
+}
+
+void flush_pmem_fd(int fd, unsigned long offset, unsigned long len)
+{
+ int fput_needed;
+ struct file *file = fget_light(fd, &fput_needed);
+
+ if (file) {
+ flush_pmem_file(file, offset, len);
+ fput_light(file, fput_needed);
+ }
}
void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len)
@@ -788,21 +1791,50 @@
struct pmem_region_node *region_node;
struct list_head *elt;
void *flush_start, *flush_end;
-
- if (!is_pmem_file(file) || !has_allocation(file)) {
+#ifdef CONFIG_OUTER_CACHE
+ unsigned long phy_start, phy_end;
+#endif
+ if (!is_pmem_file(file))
return;
- }
id = get_id(file);
- data = (struct pmem_data *)file->private_data;
- if (!pmem[id].cached || file->f_flags & O_SYNC)
+ if (!pmem[id].cached)
return;
+ /* is_pmem_file fails if !file */
+ data = file->private_data;
+
down_read(&data->sem);
+ if (!has_allocation(file))
+ goto end;
+
vaddr = pmem_start_vaddr(id, data);
+
+ if (pmem[id].allocator_type == PMEM_ALLOCATORTYPE_SYSTEM) {
+ dmac_flush_range(vaddr,
+ (void *)((unsigned long)vaddr +
+ ((struct alloc_list *)(data->index))->size));
+#ifdef CONFIG_OUTER_CACHE
+ phy_start = pmem_start_addr_system(id, data);
+
+ phy_end = phy_start +
+ ((struct alloc_list *)(data->index))->size;
+
+ outer_flush_range(phy_start, phy_end);
+#endif
+ goto end;
+ }
/* if this isn't a submmapped file, flush the whole thing */
if (unlikely(!(data->flags & PMEM_FLAGS_CONNECTED))) {
- dmac_flush_range(vaddr, vaddr + pmem_len(id, data));
+ dmac_flush_range(vaddr, vaddr + pmem[id].len(id, data));
+#ifdef CONFIG_OUTER_CACHE
+ phy_start = (unsigned long)vaddr -
+ (unsigned long)pmem[id].vbase + pmem[id].base;
+
+ phy_end = phy_start + pmem[id].len(id, data);
+
+ outer_flush_range(phy_start, phy_end);
+#endif
goto end;
}
/* otherwise, flush the region of the file we are drawing */
@@ -814,6 +1846,15 @@
flush_start = vaddr + region_node->region.offset;
flush_end = flush_start + region_node->region.len;
dmac_flush_range(flush_start, flush_end);
+#ifdef CONFIG_OUTER_CACHE
+
+ phy_start = (unsigned long)flush_start -
+ (unsigned long)pmem[id].vbase + pmem[id].base;
+
+ phy_end = phy_start + region_node->region.len;
+
+ outer_flush_range(phy_start, phy_end);
+#endif
break;
}
}
@@ -821,45 +1862,145 @@
up_read(&data->sem);
}
+int pmem_cache_maint(struct file *file, unsigned int cmd,
+ struct pmem_addr *pmem_addr)
+{
+ struct pmem_data *data;
+ int id;
+ unsigned long vaddr, paddr, length, offset,
+ pmem_len, pmem_start_addr;
+
+ /* Called from kernel-space so file may be NULL */
+ if (!file)
+ return -EBADF;
+
+ data = file->private_data;
+ id = get_id(file);
+
+ if (!pmem[id].cached)
+ return 0;
+
+ offset = pmem_addr->offset;
+ length = pmem_addr->length;
+
+ down_read(&data->sem);
+ if (!has_allocation(file)) {
+ up_read(&data->sem);
+ return -EINVAL;
+ }
+ pmem_len = pmem[id].len(id, data);
+ pmem_start_addr = pmem[id].start_addr(id, data);
+ up_read(&data->sem);
+
+ if (offset + length > pmem_len)
+ return -EINVAL;
+
+ vaddr = pmem_addr->vaddr;
+ paddr = pmem_start_addr + offset;
+
+ DLOG("pmem cache maint on dev %s(id: %d)"
+ "(vaddr %lx paddr %lx len %lu bytes)\n",
+ get_name(file), id, vaddr, paddr, length);
+ if (cmd == PMEM_CLEAN_INV_CACHES)
+ clean_and_invalidate_caches(vaddr,
+ length, paddr);
+ else if (cmd == PMEM_CLEAN_CACHES)
+ clean_caches(vaddr, length, paddr);
+ else if (cmd == PMEM_INV_CACHES)
+ invalidate_caches(vaddr, length, paddr);
+
+ return 0;
+}
+EXPORT_SYMBOL(pmem_cache_maint);
+
static int pmem_connect(unsigned long connect, struct file *file)
{
- struct pmem_data *data = (struct pmem_data *)file->private_data;
- struct pmem_data *src_data;
- struct file *src_file;
int ret = 0, put_needed;
+ struct file *src_file;
- down_write(&data->sem);
- /* retrieve the src file and check it is a pmem file with an alloc */
+ if (!file) {
+ pr_err("pmem: %s: NULL file pointer passed in, "
+ "bailing out!\n", __func__);
+ ret = -EINVAL;
+ goto leave;
+ }
+
src_file = fget_light(connect, &put_needed);
- DLOG("connect %p to %p\n", file, src_file);
+
if (!src_file) {
- printk("pmem: src file not found!\n");
- ret = -EINVAL;
- goto err_no_file;
+ pr_err("pmem: %s: src file not found!\n", __func__);
+ ret = -EBADF;
+ goto leave;
}
- if (unlikely(!is_pmem_file(src_file) || !has_allocation(src_file))) {
- printk(KERN_INFO "pmem: src file is not a pmem file or has no "
- "alloc!\n");
- ret = -EINVAL;
- goto err_bad_file;
- }
- src_data = (struct pmem_data *)src_file->private_data;
- if (has_allocation(file) && (data->index != src_data->index)) {
- printk("pmem: file is already mapped but doesn't match this"
- " src_file!\n");
+ if (src_file == file) { /* degenerative case, operator error */
+ pr_err("pmem: %s: src_file and passed in file are "
+ "the same; refusing to connect to self!\n", __func__);
ret = -EINVAL;
- goto err_bad_file;
+ goto put_src_file;
}
- data->index = src_data->index;
- data->flags |= PMEM_FLAGS_CONNECTED;
- data->master_fd = connect;
- data->master_file = src_file;
-err_bad_file:
+ if (unlikely(!is_pmem_file(src_file))) {
+ pr_err("pmem: %s: src file is not a pmem file!\n",
+ __func__);
+ ret = -EINVAL;
+ goto put_src_file;
+ } else {
+ struct pmem_data *src_data = src_file->private_data;
+
+ if (!src_data) {
+ pr_err("pmem: %s: src file pointer has no"
+ "private data, bailing out!\n", __func__);
+ ret = -EINVAL;
+ goto put_src_file;
+ }
+
+ down_read(&src_data->sem);
+
+ if (unlikely(!has_allocation(src_file))) {
+ up_read(&src_data->sem);
+ pr_err("pmem: %s: src file has no allocation!\n",
+ __func__);
+ ret = -EINVAL;
+ } else {
+ struct pmem_data *data;
+ int src_index = src_data->index;
+
+ up_read(&src_data->sem);
+
+ data = file->private_data;
+ if (!data) {
+ pr_err("pmem: %s: passed in file "
+ "pointer has no private data, bailing"
+ " out!\n", __func__);
+ ret = -EINVAL;
+ goto put_src_file;
+ }
+
+ down_write(&data->sem);
+ if (has_allocation(file) &&
+ (data->index != src_index)) {
+ up_write(&data->sem);
+
+ pr_err("pmem: %s: file is already "
+ "mapped but doesn't match this "
+ "src_file!\n", __func__);
+ ret = -EINVAL;
+ } else {
+ data->index = src_index;
+ data->flags |= PMEM_FLAGS_CONNECTED;
+ data->master_fd = connect;
+ data->master_file = src_file;
+
+ up_write(&data->sem);
+
+ DLOG("connect %p to %p\n", file, src_file);
+ }
+ }
+ }
+put_src_file:
fput_light(src_file, put_needed);
-err_no_file:
- up_write(&data->sem);
+leave:
return ret;
}
@@ -878,16 +2019,23 @@
{
int ret = 0;
struct mm_struct *mm = NULL;
+#if PMEM_DEBUG_MSGS
+ char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
+#endif
+ DLOG("pid %u(%s) file %p(%ld)\n",
+ current->pid, get_task_comm(currtask_name, current),
+ file, file_count(file));
+
*locked_mm = NULL;
lock_mm:
down_read(&data->sem);
if (PMEM_IS_SUBMAP(data)) {
mm = get_task_mm(data->task);
if (!mm) {
-#if PMEM_DEBUG
- printk("pmem: can't remap task is gone!\n");
-#endif
up_read(&data->sem);
+#if PMEM_DEBUG
+ pr_alert("pmem: can't remap - task is gone!\n");
+#endif
return -1;
}
}
@@ -902,7 +2050,7 @@
* once */
if (PMEM_IS_SUBMAP(data) && !mm) {
pmem_unlock_data_and_mm(data, mm);
- up_write(&data->sem);
+ DLOG("mapping contention, repeating mmap op\n");
goto lock_mm;
}
/* now check that vma.mm is still there, it could have been
@@ -916,6 +2064,9 @@
data->flags &= ~(PMEM_FLAGS_SUBMAP);
}
pmem_unlock_data_and_mm(data, mm);
+#if PMEM_DEBUG
+ pr_alert("pmem: vma.mm went away!\n");
+#endif
return -1;
}
*locked_mm = mm;
@@ -930,14 +2081,28 @@
struct mm_struct *mm = NULL;
struct list_head *elt, *elt2;
int id = get_id(file);
- struct pmem_data *data = (struct pmem_data *)file->private_data;
+ struct pmem_data *data;
+
+ DLOG("operation %#x, region offset %ld, region len %ld\n",
+ operation, region->offset, region->len);
+
+ if (!is_pmem_file(file)) {
+#if PMEM_DEBUG
+ pr_err("pmem: remap request for non-pmem file descriptor\n");
+#endif
+ return -EINVAL;
+ }
+
+ /* is_pmem_file fails if !file */
+ data = file->private_data;
/* pmem region must be aligned on a page boundry */
if (unlikely(!PMEM_IS_PAGE_ALIGNED(region->offset) ||
!PMEM_IS_PAGE_ALIGNED(region->len))) {
#if PMEM_DEBUG
- printk("pmem: request for unaligned pmem suballocation "
- "%lx %lx\n", region->offset, region->len);
+ pr_err("pmem: request for unaligned pmem"
+ "suballocation %lx %lx\n",
+ region->offset, region->len);
#endif
return -EINVAL;
}
@@ -955,18 +2120,18 @@
* that back in it */
if (!is_master_owner(file)) {
#if PMEM_DEBUG
- printk("pmem: remap requested from non-master process\n");
+ pr_err("pmem: remap requested from non-master process\n");
#endif
ret = -EINVAL;
goto err;
}
/* check that the requested range is within the src allocation */
- if (unlikely((region->offset > pmem_len(id, data)) ||
- (region->len > pmem_len(id, data)) ||
- (region->offset + region->len > pmem_len(id, data)))) {
+ if (unlikely((region->offset > pmem[id].len(id, data)) ||
+ (region->len > pmem[id].len(id, data)) ||
+ (region->offset + region->len > pmem[id].len(id, data)))) {
#if PMEM_DEBUG
- printk(KERN_INFO "pmem: suballoc doesn't fit in src_file!\n");
+ pr_err("pmem: suballoc doesn't fit in src_file!\n");
#endif
ret = -EINVAL;
goto err;
@@ -978,7 +2143,7 @@
if (!region_node) {
ret = -ENOMEM;
#if PMEM_DEBUG
- printk(KERN_INFO "No space to allocate metadata!");
+ pr_alert("pmem: No space to allocate remap metadata!");
#endif
goto err;
}
@@ -999,8 +2164,8 @@
}
if (!found) {
#if PMEM_DEBUG
- printk("pmem: Unmap region does not map any mapped "
- "region!");
+ pr_err("pmem: Unmap region does not map any"
+ " mapped region!");
#endif
ret = -EINVAL;
goto err;
@@ -1010,10 +2175,10 @@
if (data->vma && PMEM_IS_SUBMAP(data)) {
if (operation == PMEM_MAP)
ret = pmem_remap_pfn_range(id, data->vma, data,
- region->offset, region->len);
+ region->offset, region->len);
else if (operation == PMEM_UNMAP)
ret = pmem_unmap_pfn_range(id, data->vma, data,
- region->offset, region->len);
+ region->offset, region->len);
}
err:
@@ -1054,63 +2219,83 @@
static void pmem_get_size(struct pmem_region *region, struct file *file)
{
- struct pmem_data *data = (struct pmem_data *)file->private_data;
+ /* called via ioctl file op, so file guaranteed to be not NULL */
+ struct pmem_data *data = file->private_data;
int id = get_id(file);
+ down_read(&data->sem);
if (!has_allocation(file)) {
region->offset = 0;
region->len = 0;
- return;
} else {
- region->offset = pmem_start_addr(id, data);
- region->len = pmem_len(id, data);
+ region->offset = pmem[id].start_addr(id, data);
+ region->len = pmem[id].len(id, data);
}
- DLOG("offset %lx len %lx\n", region->offset, region->len);
+ up_read(&data->sem);
+ DLOG("offset 0x%lx len 0x%lx\n", region->offset, region->len);
}
static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct pmem_data *data;
+ /* called from user space as file op, so file guaranteed to be not
+ * NULL
+ */
+ struct pmem_data *data = file->private_data;
int id = get_id(file);
+#if PMEM_DEBUG_MSGS
+ char currtask_name[
+ FIELD_SIZEOF(struct task_struct, comm) + 1];
+#endif
+
+ DLOG("pid %u(%s) file %p(%ld) cmd %#x, dev %s(id: %d)\n",
+ current->pid, get_task_comm(currtask_name, current),
+ file, file_count(file), cmd, get_name(file), id);
switch (cmd) {
case PMEM_GET_PHYS:
{
struct pmem_region region;
+
DLOG("get_phys\n");
+ down_read(&data->sem);
if (!has_allocation(file)) {
region.offset = 0;
region.len = 0;
} else {
- data = (struct pmem_data *)file->private_data;
- region.offset = pmem_start_addr(id, data);
- region.len = pmem_len(id, data);
+ region.offset = pmem[id].start_addr(id, data);
+ region.len = pmem[id].len(id, data);
}
- printk(KERN_INFO "pmem: request for physical address of pmem region "
- "from process %d.\n", current->pid);
+ up_read(&data->sem);
+
if (copy_to_user((void __user *)arg, ®ion,
sizeof(struct pmem_region)))
return -EFAULT;
+
+ DLOG("pmem: successful request for "
+ "physical address of pmem region id %d, "
+ "offset 0x%lx, len 0x%lx\n",
+ id, region.offset, region.len);
+
break;
}
case PMEM_MAP:
{
struct pmem_region region;
+ DLOG("map\n");
if (copy_from_user(®ion, (void __user *)arg,
sizeof(struct pmem_region)))
return -EFAULT;
- data = (struct pmem_data *)file->private_data;
return pmem_remap(®ion, file, PMEM_MAP);
}
break;
case PMEM_UNMAP:
{
struct pmem_region region;
+ DLOG("unmap\n");
if (copy_from_user(®ion, (void __user *)arg,
sizeof(struct pmem_region)))
return -EFAULT;
- data = (struct pmem_data *)file->private_data;
return pmem_remap(®ion, file, PMEM_UNMAP);
break;
}
@@ -1136,169 +2321,369 @@
return -EFAULT;
break;
}
+ case PMEM_GET_FREE_SPACE:
+ {
+ struct pmem_freespace fs;
+ DLOG("get freespace on %s(id: %d)\n",
+ get_name(file), id);
+
+ mutex_lock(&pmem[id].arena_mutex);
+ pmem[id].free_space(id, &fs);
+ mutex_unlock(&pmem[id].arena_mutex);
+
+ DLOG("%s(id: %d) total free %lu, largest %lu\n",
+ get_name(file), id, fs.total, fs.largest);
+
+ if (copy_to_user((void __user *)arg, &fs,
+ sizeof(struct pmem_freespace)))
+ return -EFAULT;
+ break;
+ }
+
case PMEM_ALLOCATE:
{
- if (has_allocation(file))
+ int ret = 0;
+ DLOG("allocate, id %d\n", id);
+ down_write(&data->sem);
+ if (has_allocation(file)) {
+ pr_err("pmem: Existing allocation found on "
+ "this file descrpitor\n");
+ up_write(&data->sem);
return -EINVAL;
- data = (struct pmem_data *)file->private_data;
- data->index = pmem_allocate(id, arg);
- break;
+ }
+
+ mutex_lock(&pmem[id].arena_mutex);
+ data->index = pmem[id].allocate(id,
+ arg,
+ SZ_4K);
+ mutex_unlock(&pmem[id].arena_mutex);
+ ret = data->index == -1 ? -ENOMEM :
+ data->index;
+ up_write(&data->sem);
+ return ret;
+ }
+ case PMEM_ALLOCATE_ALIGNED:
+ {
+ struct pmem_allocation alloc;
+ int ret = 0;
+
+ if (copy_from_user(&alloc, (void __user *)arg,
+ sizeof(struct pmem_allocation)))
+ return -EFAULT;
+ DLOG("allocate id align %d %u\n", id, alloc.align);
+ down_write(&data->sem);
+ if (has_allocation(file)) {
+ pr_err("pmem: Existing allocation found on "
+ "this file descrpitor\n");
+ up_write(&data->sem);
+ return -EINVAL;
+ }
+
+ if (alloc.align & (alloc.align - 1)) {
+ pr_err("pmem: Alignment is not a power of 2\n");
+ return -EINVAL;
+ }
+
+ if (alloc.align != SZ_4K &&
+ (pmem[id].allocator_type !=
+ PMEM_ALLOCATORTYPE_BITMAP)) {
+ pr_err("pmem: Non 4k alignment requires bitmap"
+ " allocator on %s\n", pmem[id].name);
+ return -EINVAL;
+ }
+
+ if (alloc.align > SZ_1M ||
+ alloc.align < SZ_4K) {
+ pr_err("pmem: Invalid Alignment (%u) "
+ "specified\n", alloc.align);
+ return -EINVAL;
+ }
+
+ mutex_lock(&pmem[id].arena_mutex);
+ data->index = pmem[id].allocate(id,
+ alloc.size,
+ alloc.align);
+ mutex_unlock(&pmem[id].arena_mutex);
+ ret = data->index == -1 ? -ENOMEM :
+ data->index;
+ up_write(&data->sem);
+ return ret;
}
case PMEM_CONNECT:
DLOG("connect\n");
return pmem_connect(arg, file);
- break;
- case PMEM_CACHE_FLUSH:
+ case PMEM_CLEAN_INV_CACHES:
+ case PMEM_CLEAN_CACHES:
+ case PMEM_INV_CACHES:
{
- struct pmem_region region;
- DLOG("flush\n");
- if (copy_from_user(®ion, (void __user *)arg,
- sizeof(struct pmem_region)))
+ struct pmem_addr pmem_addr;
+
+ if (copy_from_user(&pmem_addr, (void __user *)arg,
+ sizeof(struct pmem_addr)))
return -EFAULT;
- flush_pmem_file(file, region.offset, region.len);
- break;
+
+ return pmem_cache_maint(file, cmd, &pmem_addr);
}
default:
if (pmem[id].ioctl)
return pmem[id].ioctl(file, cmd, arg);
+
+ DLOG("ioctl invalid (%#x)\n", cmd);
return -EINVAL;
}
return 0;
}
-#if PMEM_DEBUG
-static ssize_t debug_open(struct inode *inode, struct file *file)
+static void ioremap_pmem(int id)
{
- file->private_data = inode->i_private;
- return 0;
-}
-
-static ssize_t debug_read(struct file *file, char __user *buf, size_t count,
- loff_t *ppos)
-{
- struct list_head *elt, *elt2;
- struct pmem_data *data;
- struct pmem_region_node *region_node;
- int id = (int)file->private_data;
- const int debug_bufmax = 4096;
- static char buffer[4096];
- int n = 0;
-
- DLOG("debug open\n");
- n = scnprintf(buffer, debug_bufmax,
- "pid #: mapped regions (offset, len) (offset,len)...\n");
-
- mutex_lock(&pmem[id].data_list_lock);
- list_for_each(elt, &pmem[id].data_list) {
- data = list_entry(elt, struct pmem_data, list);
- down_read(&data->sem);
- n += scnprintf(buffer + n, debug_bufmax - n, "pid %u:",
- data->pid);
- list_for_each(elt2, &data->region_list) {
- region_node = list_entry(elt2, struct pmem_region_node,
- list);
- n += scnprintf(buffer + n, debug_bufmax - n,
- "(%lx,%lx) ",
- region_node->region.offset,
- region_node->region.len);
- }
- n += scnprintf(buffer + n, debug_bufmax - n, "\n");
- up_read(&data->sem);
- }
- mutex_unlock(&pmem[id].data_list_lock);
-
- n++;
- buffer[n] = 0;
- return simple_read_from_buffer(buf, count, ppos, buffer, n);
-}
-
-static struct file_operations debug_fops = {
- .read = debug_read,
- .open = debug_open,
-};
+ if (pmem[id].cached)
+ pmem[id].vbase = ioremap_cached(pmem[id].base, pmem[id].size);
+#ifdef ioremap_ext_buffered
+ else if (pmem[id].buffered)
+ pmem[id].vbase = ioremap_ext_buffered(pmem[id].base,
+ pmem[id].size);
#endif
-
-#if 0
-static struct miscdevice pmem_dev = {
- .name = "pmem",
- .fops = &pmem_fops,
-};
-#endif
+ else
+ pmem[id].vbase = ioremap(pmem[id].base, pmem[id].size);
+}
int pmem_setup(struct android_pmem_platform_data *pdata,
long (*ioctl)(struct file *, unsigned int, unsigned long),
int (*release)(struct inode *, struct file *))
{
- int err = 0;
- int i, index = 0;
- int id = id_count;
- id_count++;
+ int i, index = 0, id;
- pmem[id].no_allocator = pdata->no_allocator;
+ if (id_count >= PMEM_MAX_DEVICES) {
+ pr_alert("pmem: %s: unable to register driver(%s) - no more "
+ "devices available!\n", __func__, pdata->name);
+ goto err_no_mem;
+ }
+
+ if (!pdata->size) {
+ pr_alert("pmem: %s: unable to register pmem driver(%s) - zero "
+ "size passed in!\n", __func__, pdata->name);
+ goto err_no_mem;
+ }
+
+ id = id_count++;
+
+ pmem[id].id = id;
+
+ if (pmem[id].allocate) {
+ pr_alert("pmem: %s: unable to register pmem driver - "
+ "duplicate registration of %s!\n",
+ __func__, pdata->name);
+ goto err_no_mem;
+ }
+
+ pmem[id].allocator_type = pdata->allocator_type;
+
+ /* 'quantum' is a "hidden" variable that defaults to 0 in the board
+ * files */
+ pmem[id].quantum = pdata->quantum ?: PMEM_MIN_ALLOC;
+ if (pmem[id].quantum < PMEM_MIN_ALLOC ||
+ !is_power_of_2(pmem[id].quantum)) {
+ pr_alert("pmem: %s: unable to register pmem driver %s - "
+ "invalid quantum value (%#x)!\n",
+ __func__, pdata->name, pmem[id].quantum);
+ goto err_reset_pmem_info;
+ }
+
+ if (pdata->size % pmem[id].quantum) {
+ /* bad alignment for size! */
+ pr_alert("pmem: %s: Unable to register driver %s - "
+ "memory region size (%#lx) is not a multiple of "
+ "quantum size(%#x)!\n", __func__, pdata->name,
+ pdata->size, pmem[id].quantum);
+ goto err_reset_pmem_info;
+ }
+
pmem[id].cached = pdata->cached;
pmem[id].buffered = pdata->buffered;
- pmem[id].base = pdata->start;
pmem[id].size = pdata->size;
+ pmem[id].memory_type = pdata->memory_type;
+ strlcpy(pmem[id].name, pdata->name, PMEM_NAME_SIZE);
+
+ pmem[id].num_entries = pmem[id].size / pmem[id].quantum;
+
+ memset(&pmem[id].kobj, 0, sizeof(pmem[0].kobj));
+ pmem[id].kobj.kset = pmem_kset;
+
+ switch (pmem[id].allocator_type) {
+ case PMEM_ALLOCATORTYPE_ALLORNOTHING:
+ pmem[id].allocate = pmem_allocator_all_or_nothing;
+ pmem[id].free = pmem_free_all_or_nothing;
+ pmem[id].free_space = pmem_free_space_all_or_nothing;
+ pmem[id].len = pmem_len_all_or_nothing;
+ pmem[id].start_addr = pmem_start_addr_all_or_nothing;
+ pmem[id].num_entries = 1;
+ pmem[id].quantum = pmem[id].size;
+ pmem[id].allocator.all_or_nothing.allocated = 0;
+
+ if (kobject_init_and_add(&pmem[id].kobj,
+ &pmem_allornothing_ktype, NULL,
+ "%s", pdata->name))
+ goto out_put_kobj;
+
+ break;
+
+ case PMEM_ALLOCATORTYPE_BUDDYBESTFIT:
+ pmem[id].allocator.buddy_bestfit.buddy_bitmap = kmalloc(
+ pmem[id].num_entries * sizeof(struct pmem_bits),
+ GFP_KERNEL);
+ if (!pmem[id].allocator.buddy_bestfit.buddy_bitmap)
+ goto err_reset_pmem_info;
+
+ memset(pmem[id].allocator.buddy_bestfit.buddy_bitmap, 0,
+ sizeof(struct pmem_bits) * pmem[id].num_entries);
+
+ for (i = sizeof(pmem[id].num_entries) * 8 - 1; i >= 0; i--)
+ if ((pmem[id].num_entries) & 1<<i) {
+ PMEM_BUDDY_ORDER(id, index) = i;
+ index = PMEM_BUDDY_NEXT_INDEX(id, index);
+ }
+ pmem[id].allocate = pmem_allocator_buddy_bestfit;
+ pmem[id].free = pmem_free_buddy_bestfit;
+ pmem[id].free_space = pmem_free_space_buddy_bestfit;
+ pmem[id].len = pmem_len_buddy_bestfit;
+ pmem[id].start_addr = pmem_start_addr_buddy_bestfit;
+ if (kobject_init_and_add(&pmem[id].kobj,
+ &pmem_buddy_bestfit_ktype, NULL,
+ "%s", pdata->name))
+ goto out_put_kobj;
+
+ break;
+
+ case PMEM_ALLOCATORTYPE_BITMAP: /* 0, default if not explicit */
+ pmem[id].allocator.bitmap.bitm_alloc = kmalloc(
+ PMEM_INITIAL_NUM_BITMAP_ALLOCATIONS *
+ sizeof(*pmem[id].allocator.bitmap.bitm_alloc),
+ GFP_KERNEL);
+ if (!pmem[id].allocator.bitmap.bitm_alloc) {
+ pr_alert("pmem: %s: Unable to register pmem "
+ "driver %s - can't allocate "
+ "bitm_alloc!\n",
+ __func__, pdata->name);
+ goto err_reset_pmem_info;
+ }
+
+ if (kobject_init_and_add(&pmem[id].kobj,
+ &pmem_bitmap_ktype, NULL,
+ "%s", pdata->name))
+ goto out_put_kobj;
+
+ for (i = 0; i < PMEM_INITIAL_NUM_BITMAP_ALLOCATIONS; i++) {
+ pmem[id].allocator.bitmap.bitm_alloc[i].bit = -1;
+ pmem[id].allocator.bitmap.bitm_alloc[i].quanta = 0;
+ }
+
+ pmem[id].allocator.bitmap.bitmap_allocs =
+ PMEM_INITIAL_NUM_BITMAP_ALLOCATIONS;
+
+ pmem[id].allocator.bitmap.bitmap =
+ kcalloc((pmem[id].num_entries + 31) / 32,
+ sizeof(unsigned int), GFP_KERNEL);
+ if (!pmem[id].allocator.bitmap.bitmap) {
+ pr_alert("pmem: %s: Unable to register pmem "
+ "driver - can't allocate bitmap!\n",
+ __func__);
+ goto err_cant_register_device;
+ }
+ pmem[id].allocator.bitmap.bitmap_free = pmem[id].num_entries;
+
+ pmem[id].allocate = pmem_allocator_bitmap;
+ pmem[id].free = pmem_free_bitmap;
+ pmem[id].free_space = pmem_free_space_bitmap;
+ pmem[id].len = pmem_len_bitmap;
+ pmem[id].start_addr = pmem_start_addr_bitmap;
+
+ DLOG("bitmap allocator id %d (%s), num_entries %u, raw size "
+ "%lu, quanta size %u\n",
+ id, pdata->name, pmem[id].allocator.bitmap.bitmap_free,
+ pmem[id].size, pmem[id].quantum);
+ break;
+
+ case PMEM_ALLOCATORTYPE_SYSTEM:
+
+ INIT_LIST_HEAD(&pmem[id].allocator.system_mem.alist);
+
+ pmem[id].allocator.system_mem.used = 0;
+ pmem[id].vbase = NULL;
+
+ if (kobject_init_and_add(&pmem[id].kobj,
+ &pmem_system_ktype, NULL,
+ "%s", pdata->name))
+ goto out_put_kobj;
+
+ pmem[id].allocate = pmem_allocator_system;
+ pmem[id].free = pmem_free_system;
+ pmem[id].free_space = pmem_free_space_system;
+ pmem[id].len = pmem_len_system;
+ pmem[id].start_addr = pmem_start_addr_system;
+ pmem[id].num_entries = 0;
+ pmem[id].quantum = PAGE_SIZE;
+
+ DLOG("system allocator id %d (%s), raw size %lu\n",
+ id, pdata->name, pmem[id].size);
+ break;
+
+ default:
+ pr_alert("Invalid allocator type (%d) for pmem driver\n",
+ pdata->allocator_type);
+ goto err_reset_pmem_info;
+ }
+
pmem[id].ioctl = ioctl;
pmem[id].release = release;
- init_rwsem(&pmem[id].bitmap_sem);
- mutex_init(&pmem[id].data_list_lock);
+ mutex_init(&pmem[id].arena_mutex);
+ mutex_init(&pmem[id].data_list_mutex);
INIT_LIST_HEAD(&pmem[id].data_list);
+
pmem[id].dev.name = pdata->name;
pmem[id].dev.minor = id;
pmem[id].dev.fops = &pmem_fops;
- printk(KERN_INFO "%s: %d init\n", pdata->name, pdata->cached);
+ pr_info("pmem: Initializing %s (user-space) as %s\n",
+ pdata->name, pdata->cached ? "cached" : "non-cached");
- err = misc_register(&pmem[id].dev);
- if (err) {
- printk(KERN_ALERT "Unable to register pmem driver!\n");
+ if (misc_register(&pmem[id].dev)) {
+ pr_alert("Unable to register pmem driver!\n");
goto err_cant_register_device;
}
- pmem[id].num_entries = pmem[id].size / PMEM_MIN_ALLOC;
- pmem[id].bitmap = kmalloc(pmem[id].num_entries *
- sizeof(struct pmem_bits), GFP_KERNEL);
- if (!pmem[id].bitmap)
- goto err_no_mem_for_metadata;
+ pmem[id].base = allocate_contiguous_memory_nomap(pmem[id].size,
+ pmem[id].memory_type, PAGE_SIZE);
- memset(pmem[id].bitmap, 0, sizeof(struct pmem_bits) *
- pmem[id].num_entries);
-
- for (i = sizeof(pmem[id].num_entries) * 8 - 1; i >= 0; i--) {
- if ((pmem[id].num_entries) & 1<<i) {
- PMEM_ORDER(id, index) = i;
- index = PMEM_NEXT_INDEX(id, index);
+ if (pmem[id].allocator_type != PMEM_ALLOCATORTYPE_SYSTEM) {
+ ioremap_pmem(id);
+ if (pmem[id].vbase == 0) {
+ pr_err("pmem: ioremap failed for device %s\n",
+ pmem[id].name);
+ goto error_cant_remap;
}
}
- if (pmem[id].cached)
- pmem[id].vbase = ioremap_cached(pmem[id].base,
- pmem[id].size);
-#ifdef ioremap_ext_buffered
- else if (pmem[id].buffered)
- pmem[id].vbase = ioremap_ext_buffered(pmem[id].base,
- pmem[id].size);
-#endif
- else
- pmem[id].vbase = ioremap(pmem[id].base, pmem[id].size);
-
- if (pmem[id].vbase == 0)
- goto error_cant_remap;
+ pr_info("allocating %lu bytes at %p (%lx physical) for %s\n",
+ pmem[id].size, pmem[id].vbase, pmem[id].base, pmem[id].name);
pmem[id].garbage_pfn = page_to_pfn(alloc_page(GFP_KERNEL));
- if (pmem[id].no_allocator)
- pmem[id].allocated = 0;
-#if PMEM_DEBUG
- debugfs_create_file(pdata->name, S_IFREG | S_IRUGO, NULL, (void *)id,
- &debug_fops);
-#endif
return 0;
+
error_cant_remap:
- kfree(pmem[id].bitmap);
-err_no_mem_for_metadata:
misc_deregister(&pmem[id].dev);
err_cant_register_device:
+out_put_kobj:
+ kobject_put(&pmem[id].kobj);
+ if (pmem[id].allocator_type == PMEM_ALLOCATORTYPE_BUDDYBESTFIT)
+ kfree(pmem[id].allocator.buddy_bestfit.buddy_bitmap);
+ else if (pmem[id].allocator_type == PMEM_ALLOCATORTYPE_BITMAP) {
+ kfree(pmem[id].allocator.bitmap.bitmap);
+ kfree(pmem[id].allocator.bitmap.bitm_alloc);
+ }
+err_reset_pmem_info:
+ pmem[id].allocate = 0;
+ pmem[id].dev.minor = -1;
+err_no_mem:
return -1;
}
@@ -1307,31 +2692,62 @@
struct android_pmem_platform_data *pdata;
if (!pdev || !pdev->dev.platform_data) {
- printk(KERN_ALERT "Unable to probe pmem!\n");
+ pr_alert("Unable to probe pmem!\n");
return -1;
}
pdata = pdev->dev.platform_data;
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
return pmem_setup(pdata, NULL, NULL);
}
-
static int pmem_remove(struct platform_device *pdev)
{
int id = pdev->id;
__free_page(pfn_to_page(pmem[id].garbage_pfn));
+ pm_runtime_disable(&pdev->dev);
misc_deregister(&pmem[id].dev);
return 0;
}
+static int pmem_runtime_suspend(struct device *dev)
+{
+ dev_dbg(dev, "pm_runtime: suspending...\n");
+ return 0;
+}
+
+static int pmem_runtime_resume(struct device *dev)
+{
+ dev_dbg(dev, "pm_runtime: resuming...\n");
+ return 0;
+}
+
+static const struct dev_pm_ops pmem_dev_pm_ops = {
+ .runtime_suspend = pmem_runtime_suspend,
+ .runtime_resume = pmem_runtime_resume,
+};
+
static struct platform_driver pmem_driver = {
.probe = pmem_probe,
.remove = pmem_remove,
- .driver = { .name = "android_pmem" }
+ .driver = { .name = "android_pmem",
+ .pm = &pmem_dev_pm_ops,
+ }
};
static int __init pmem_init(void)
{
+ /* create /sys/kernel/<PMEM_SYSFS_DIR_NAME> directory */
+ pmem_kset = kset_create_and_add(PMEM_SYSFS_DIR_NAME,
+ NULL, kernel_kobj);
+ if (!pmem_kset) {
+ pr_err("pmem(%s):kset_create_and_add fail\n", __func__);
+ return -ENOMEM;
+ }
+
return platform_driver_register(&pmem_driver);
}
diff --git a/drivers/misc/pmic8058-batt-alarm.c b/drivers/misc/pmic8058-batt-alarm.c
new file mode 100644
index 0000000..bff0720
--- /dev/null
+++ b/drivers/misc/pmic8058-batt-alarm.c
@@ -0,0 +1,753 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Qualcomm PMIC 8058 Battery Alarm Device driver
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/pmic8058-batt-alarm.h>
+#include <linux/mfd/pmic8058.h>
+
+/* PMIC 8058 Battery Alarm SSBI registers */
+#define REG_THRESHOLD 0x023
+#define REG_CTRL1 0x024
+#define REG_CTRL2 0x0AA
+#define REG_PWM_CTRL 0x0A3
+
+/* Available voltage threshold values */
+#define THRESHOLD_MIN_MV 2500
+#define THRESHOLD_MAX_MV 5675
+#define THRESHOLD_STEP_MV 25
+
+/* Register bit definitions */
+
+/* Threshold register */
+#define THRESHOLD_UPPER_MASK 0xF0
+#define THRESHOLD_LOWER_MASK 0x0F
+#define THRESHOLD_UPPER_SHIFT 4
+#define THRESHOLD_LOWER_SHIFT 0
+
+/* CTRL 1 register */
+#define CTRL1_BATT_ALARM_EN_MASK 0x80
+#define CTRL1_HOLD_TIME_MASK 0x70
+#define CTRL1_STATUS_UPPER_MASK 0x02
+#define CTRL1_STATUS_LOWER_MASK 0x01
+#define CTRL1_HOLD_TIME_SHIFT 4
+#define CTRL1_HOLD_TIME_MIN 0
+#define CTRL1_HOLD_TIME_MAX 7
+
+/* CTRL 2 register */
+#define CTRL2_COMP_UPPER_DISABLE_MASK 0x80
+#define CTRL2_COMP_LOWER_DISABLE_MASK 0x40
+#define CTRL2_FINE_STEP_UPPER_MASK 0x30
+#define CTRL2_RANGE_EXT_UPPER_MASK 0x08
+#define CTRL2_FINE_STEP_LOWER_MASK 0x06
+#define CTRL2_RANGE_EXT_LOWER_MASK 0x01
+#define CTRL2_FINE_STEP_UPPER_SHIFT 4
+#define CTRL2_FINE_STEP_LOWER_SHIFT 1
+
+/* PWM control register */
+#define PWM_CTRL_ALARM_EN_MASK 0xC0
+#define PWM_CTRL_ALARM_EN_NEVER 0x00
+#define PWM_CTRL_ALARM_EN_TCXO 0x40
+#define PWM_CTRL_ALARM_EN_PWM 0x80
+#define PWM_CTRL_ALARM_EN_ALWAYS 0xC0
+#define PWM_CTRL_PRE_MASK 0x38
+#define PWM_CTRL_DIV_MASK 0x07
+#define PWM_CTRL_PRE_SHIFT 3
+#define PWM_CTRL_DIV_SHIFT 0
+#define PWM_CTRL_PRE_MIN 0
+#define PWM_CTRL_PRE_MAX 7
+#define PWM_CTRL_DIV_MIN 1
+#define PWM_CTRL_DIV_MAX 7
+
+/* PWM control input range */
+#define PWM_CTRL_PRE_INPUT_MIN 2
+#define PWM_CTRL_PRE_INPUT_MAX 9
+#define PWM_CTRL_DIV_INPUT_MIN 2
+#define PWM_CTRL_DIV_INPUT_MAX 8
+
+/* Available voltage threshold values */
+#define THRESHOLD_BASIC_MIN_MV 2800
+#define THRESHOLD_EXT_MIN_MV 4400
+
+/*
+ * Default values used during initialization:
+ * Slowest PWM rate to ensure minimal status jittering when crossing thresholds.
+ * Largest hold time also helps reduce status value jittering. Comparators
+ * are disabled by default and must be turned on by calling
+ * pm8058_batt_alarm_state_set.
+ */
+#define DEFAULT_THRESHOLD_LOWER 3200
+#define DEFAULT_THRESHOLD_UPPER 4300
+#define DEFAULT_HOLD_TIME PM8058_BATT_ALARM_HOLD_TIME_16_MS
+#define DEFAULT_USE_PWM 1
+#define DEFAULT_PWM_SCALER 9
+#define DEFAULT_PWM_DIVIDER 8
+#define DEFAULT_LOWER_ENABLE 0
+#define DEFAULT_UPPER_ENABLE 0
+
+struct pm8058_batt_alarm_device {
+ struct srcu_notifier_head irq_notifier_list;
+ struct pm8058_chip *pm_chip;
+ struct mutex batt_mutex;
+ unsigned int irq;
+ int notifier_count;
+ u8 reg_threshold;
+ u8 reg_ctrl1;
+ u8 reg_ctrl2;
+ u8 reg_pwm_ctrl;
+};
+static struct pm8058_batt_alarm_device *the_battalarm;
+
+static int pm8058_reg_write(struct pm8058_chip *chip, u16 addr, u8 val, u8 mask,
+ u8 *reg_save)
+{
+ int rc = 0;
+ u8 reg;
+
+ reg = (*reg_save & ~mask) | (val & mask);
+ if (reg != *reg_save)
+ rc = pm8058_write(chip, addr, ®, 1);
+ if (rc)
+ pr_err("pm8058_write failed; addr=%03X, rc=%d\n", addr, rc);
+ else
+ *reg_save = reg;
+ return rc;
+}
+
+/**
+ * pm8058_batt_alarm_state_set - enable or disable the threshold comparators
+ * @enable_lower_comparator: 1 = enable comparator, 0 = disable comparator
+ * @enable_upper_comparator: 1 = enable comparator, 0 = disable comparator
+ *
+ * RETURNS: an appropriate -ERRNO error value on error, or zero for success.
+ */
+int pm8058_batt_alarm_state_set(int enable_lower_comparator,
+ int enable_upper_comparator)
+{
+ struct pm8058_batt_alarm_device *battdev = the_battalarm;
+ int rc;
+ u8 reg_ctrl1 = 0, reg_ctrl2 = 0;
+
+ if (!battdev) {
+ pr_err("no battery alarm device found.\n");
+ return -ENXIO;
+ }
+
+ if (!enable_lower_comparator)
+ reg_ctrl2 |= CTRL2_COMP_LOWER_DISABLE_MASK;
+ if (!enable_upper_comparator)
+ reg_ctrl2 |= CTRL2_COMP_UPPER_DISABLE_MASK;
+
+ if (enable_lower_comparator || enable_upper_comparator)
+ reg_ctrl1 = CTRL1_BATT_ALARM_EN_MASK;
+
+ mutex_lock(&battdev->batt_mutex);
+ rc = pm8058_reg_write(battdev->pm_chip, REG_CTRL1, reg_ctrl1,
+ CTRL1_BATT_ALARM_EN_MASK, &battdev->reg_ctrl1);
+ if (rc)
+ goto bail;
+
+ rc = pm8058_reg_write(battdev->pm_chip, REG_CTRL2, reg_ctrl2,
+ CTRL2_COMP_LOWER_DISABLE_MASK | CTRL2_COMP_UPPER_DISABLE_MASK,
+ &battdev->reg_ctrl2);
+
+bail:
+ mutex_unlock(&battdev->batt_mutex);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pm8058_batt_alarm_state_set);
+
+/**
+ * pm8058_batt_alarm_threshold_set - set the lower and upper alarm thresholds
+ * @lower_threshold_mV: battery undervoltage threshold in millivolts
+ * @upper_threshold_mV: battery overvoltage threshold in millivolts
+ *
+ * RETURNS: an appropriate -ERRNO error value on error, or zero for success.
+ */
+int pm8058_batt_alarm_threshold_set(int lower_threshold_mV,
+ int upper_threshold_mV)
+{
+ struct pm8058_batt_alarm_device *battdev = the_battalarm;
+ int step, fine_step, rc;
+ u8 reg_threshold = 0, reg_ctrl2 = 0;
+
+ if (!battdev) {
+ pr_err("no battery alarm device found.\n");
+ return -ENXIO;
+ }
+
+ if (lower_threshold_mV < THRESHOLD_MIN_MV
+ || lower_threshold_mV > THRESHOLD_MAX_MV) {
+ pr_err("lower threshold value, %d mV, is outside of allowable "
+ "range: [%d, %d] mV\n", lower_threshold_mV,
+ THRESHOLD_MIN_MV, THRESHOLD_MAX_MV);
+ return -EINVAL;
+ }
+
+ if (upper_threshold_mV < THRESHOLD_MIN_MV
+ || upper_threshold_mV > THRESHOLD_MAX_MV) {
+ pr_err("upper threshold value, %d mV, is outside of allowable "
+ "range: [%d, %d] mV\n", upper_threshold_mV,
+ THRESHOLD_MIN_MV, THRESHOLD_MAX_MV);
+ return -EINVAL;
+ }
+
+ if (upper_threshold_mV < lower_threshold_mV) {
+ pr_err("lower threshold value, %d mV, must be <= upper "
+ "threshold value, %d mV\n", lower_threshold_mV,
+ upper_threshold_mV);
+ return -EINVAL;
+ }
+
+ /* Determine register settings for lower threshold. */
+ if (lower_threshold_mV < THRESHOLD_BASIC_MIN_MV) {
+ /* Extended low range */
+ reg_ctrl2 |= CTRL2_RANGE_EXT_LOWER_MASK;
+
+ step = (lower_threshold_mV - THRESHOLD_MIN_MV)
+ / THRESHOLD_STEP_MV;
+
+ fine_step = step & 0x3;
+ /* Extended low range is for steps 0 to 2 */
+ step >>= 2;
+
+ reg_threshold |= (step << THRESHOLD_LOWER_SHIFT)
+ & THRESHOLD_LOWER_MASK;
+ reg_ctrl2 |= (fine_step << CTRL2_FINE_STEP_LOWER_SHIFT)
+ & CTRL2_FINE_STEP_LOWER_MASK;
+ } else if (lower_threshold_mV >= THRESHOLD_EXT_MIN_MV) {
+ /* Extended high range */
+ reg_ctrl2 |= CTRL2_RANGE_EXT_LOWER_MASK;
+
+ step = (lower_threshold_mV - THRESHOLD_EXT_MIN_MV)
+ / THRESHOLD_STEP_MV;
+
+ fine_step = step & 0x3;
+ /* Extended high range is for steps 3 to 15 */
+ step = (step >> 2) + 3;
+
+ reg_threshold |= (step << THRESHOLD_LOWER_SHIFT)
+ & THRESHOLD_LOWER_MASK;
+ reg_ctrl2 |= (fine_step << CTRL2_FINE_STEP_LOWER_SHIFT)
+ & CTRL2_FINE_STEP_LOWER_MASK;
+ } else {
+ /* Basic range */
+ step = (lower_threshold_mV - THRESHOLD_BASIC_MIN_MV)
+ / THRESHOLD_STEP_MV;
+
+ fine_step = step & 0x3;
+ step >>= 2;
+
+ reg_threshold |= (step << THRESHOLD_LOWER_SHIFT)
+ & THRESHOLD_LOWER_MASK;
+ reg_ctrl2 |= (fine_step << CTRL2_FINE_STEP_LOWER_SHIFT)
+ & CTRL2_FINE_STEP_LOWER_MASK;
+ }
+
+ /* Determine register settings for upper threshold. */
+ if (upper_threshold_mV < THRESHOLD_BASIC_MIN_MV) {
+ /* Extended low range */
+ reg_ctrl2 |= CTRL2_RANGE_EXT_UPPER_MASK;
+
+ step = (upper_threshold_mV - THRESHOLD_MIN_MV)
+ / THRESHOLD_STEP_MV;
+
+ fine_step = step & 0x3;
+ /* Extended low range is for steps 0 to 2 */
+ step >>= 2;
+
+ reg_threshold |= (step << THRESHOLD_UPPER_SHIFT)
+ & THRESHOLD_UPPER_MASK;
+ reg_ctrl2 |= (fine_step << CTRL2_FINE_STEP_UPPER_SHIFT)
+ & CTRL2_FINE_STEP_UPPER_MASK;
+ } else if (upper_threshold_mV >= THRESHOLD_EXT_MIN_MV) {
+ /* Extended high range */
+ reg_ctrl2 |= CTRL2_RANGE_EXT_UPPER_MASK;
+
+ step = (upper_threshold_mV - THRESHOLD_EXT_MIN_MV)
+ / THRESHOLD_STEP_MV;
+
+ fine_step = step & 0x3;
+ /* Extended high range is for steps 3 to 15 */
+ step = (step >> 2) + 3;
+
+ reg_threshold |= (step << THRESHOLD_UPPER_SHIFT)
+ & THRESHOLD_UPPER_MASK;
+ reg_ctrl2 |= (fine_step << CTRL2_FINE_STEP_UPPER_SHIFT)
+ & CTRL2_FINE_STEP_UPPER_MASK;
+ } else {
+ /* Basic range */
+ step = (upper_threshold_mV - THRESHOLD_BASIC_MIN_MV)
+ / THRESHOLD_STEP_MV;
+
+ fine_step = step & 0x3;
+ step >>= 2;
+
+ reg_threshold |= (step << THRESHOLD_UPPER_SHIFT)
+ & THRESHOLD_UPPER_MASK;
+ reg_ctrl2 |= (fine_step << CTRL2_FINE_STEP_UPPER_SHIFT)
+ & CTRL2_FINE_STEP_UPPER_MASK;
+ }
+
+ mutex_lock(&battdev->batt_mutex);
+ rc = pm8058_reg_write(battdev->pm_chip, REG_THRESHOLD, reg_threshold,
+ THRESHOLD_LOWER_MASK | THRESHOLD_UPPER_MASK,
+ &battdev->reg_threshold);
+ if (rc)
+ goto bail;
+
+ rc = pm8058_reg_write(battdev->pm_chip, REG_CTRL2, reg_ctrl2,
+ CTRL2_FINE_STEP_LOWER_MASK | CTRL2_FINE_STEP_UPPER_MASK
+ | CTRL2_RANGE_EXT_LOWER_MASK | CTRL2_RANGE_EXT_UPPER_MASK,
+ &battdev->reg_ctrl2);
+
+bail:
+ mutex_unlock(&battdev->batt_mutex);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pm8058_batt_alarm_threshold_set);
+
+/**
+ * pm8058_batt_alarm_status_read - get status of both threshold comparators
+ *
+ * RETURNS: < 0 = error
+ * 0 = battery voltage ok
+ * BIT(0) set = battery voltage below lower threshold
+ * BIT(1) set = battery voltage above upper threshold
+ */
+int pm8058_batt_alarm_status_read(void)
+{
+ struct pm8058_batt_alarm_device *battdev = the_battalarm;
+ int status, rc;
+
+ if (!battdev) {
+ pr_err("no battery alarm device found.\n");
+ return -ENXIO;
+ }
+
+ mutex_lock(&battdev->batt_mutex);
+ rc = pm8058_read(battdev->pm_chip, REG_CTRL1, &battdev->reg_ctrl1, 1);
+
+ status = ((battdev->reg_ctrl1 & CTRL1_STATUS_LOWER_MASK)
+ ? PM8058_BATT_ALARM_STATUS_BELOW_LOWER : 0)
+ | ((battdev->reg_ctrl1 & CTRL1_STATUS_UPPER_MASK)
+ ? PM8058_BATT_ALARM_STATUS_ABOVE_UPPER : 0);
+ mutex_unlock(&battdev->batt_mutex);
+
+ if (rc) {
+ pr_err("pm8058_read failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(pm8058_batt_alarm_status_read);
+
+/**
+ * pm8058_batt_alarm_hold_time_set - set hold time of interrupt output *
+ * @hold_time: amount of time that battery voltage must remain outside of the
+ * threshold range before the battery alarm interrupt triggers
+ *
+ * RETURNS: an appropriate -ERRNO error value on error, or zero for success.
+ */
+int pm8058_batt_alarm_hold_time_set(enum pm8058_batt_alarm_hold_time hold_time)
+{
+ struct pm8058_batt_alarm_device *battdev = the_battalarm;
+ int rc;
+ u8 reg_ctrl1 = 0;
+
+ if (!battdev) {
+ pr_err("no battery alarm device found.\n");
+ return -ENXIO;
+ }
+
+ if (hold_time < CTRL1_HOLD_TIME_MIN
+ || hold_time > CTRL1_HOLD_TIME_MAX) {
+
+ pr_err("hold time, %d, is outside of allowable range: "
+ "[%d, %d]\n", hold_time, CTRL1_HOLD_TIME_MIN,
+ CTRL1_HOLD_TIME_MAX);
+ return -EINVAL;
+ }
+
+ reg_ctrl1 = hold_time << CTRL1_HOLD_TIME_SHIFT;
+
+ mutex_lock(&battdev->batt_mutex);
+ rc = pm8058_reg_write(battdev->pm_chip, REG_CTRL1, reg_ctrl1,
+ CTRL1_HOLD_TIME_MASK, &battdev->reg_ctrl1);
+ mutex_unlock(&battdev->batt_mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pm8058_batt_alarm_hold_time_set);
+
+/**
+ * pm8058_batt_alarm_pwm_rate_set - set battery alarm update rate *
+ * @use_pwm: 1 = use PWM update rate, 0 = comparators always active
+ * @clock_scaler: PWM clock scaler = 2 to 9
+ * @clock_divider: PWM clock divider = 2 to 8
+ *
+ * This function sets the rate at which the battery alarm module enables
+ * the threshold comparators. The rate is determined by the following equation:
+ *
+ * f_update = (1024 Hz) / (clock_divider * (2 ^ clock_scaler))
+ *
+ * Thus, the update rate can range from 0.25 Hz to 128 Hz.
+ *
+ * RETURNS: an appropriate -ERRNO error value on error, or zero for success.
+ */
+int pm8058_batt_alarm_pwm_rate_set(int use_pwm, int clock_scaler,
+ int clock_divider)
+{
+ struct pm8058_batt_alarm_device *battdev = the_battalarm;
+ int rc;
+ u8 reg_pwm_ctrl = 0, mask = 0;
+
+ if (!battdev) {
+ pr_err("no battery alarm device found.\n");
+ return -ENXIO;
+ }
+
+ if (use_pwm && (clock_scaler < PWM_CTRL_PRE_INPUT_MIN
+ || clock_scaler > PWM_CTRL_PRE_INPUT_MAX)) {
+ pr_err("PWM clock scaler, %d, is outside of allowable range: "
+ "[%d, %d]\n", clock_scaler, PWM_CTRL_PRE_INPUT_MIN,
+ PWM_CTRL_PRE_INPUT_MAX);
+ return -EINVAL;
+ }
+
+ if (use_pwm && (clock_divider < PWM_CTRL_DIV_INPUT_MIN
+ || clock_divider > PWM_CTRL_DIV_INPUT_MAX)) {
+ pr_err("PWM clock divider, %d, is outside of allowable range: "
+ "[%d, %d]\n", clock_divider, PWM_CTRL_DIV_INPUT_MIN,
+ PWM_CTRL_DIV_INPUT_MAX);
+ return -EINVAL;
+ }
+
+ if (!use_pwm) {
+ /* Turn off PWM control and always enable. */
+ reg_pwm_ctrl = PWM_CTRL_ALARM_EN_ALWAYS;
+ mask = PWM_CTRL_ALARM_EN_MASK;
+ } else {
+ /* Use PWM control. */
+ reg_pwm_ctrl = PWM_CTRL_ALARM_EN_PWM;
+ mask = PWM_CTRL_ALARM_EN_MASK | PWM_CTRL_PRE_MASK
+ | PWM_CTRL_DIV_MASK;
+
+ clock_scaler -= PWM_CTRL_PRE_INPUT_MIN - PWM_CTRL_PRE_MIN;
+ clock_divider -= PWM_CTRL_DIV_INPUT_MIN - PWM_CTRL_DIV_MIN;
+
+ reg_pwm_ctrl |= (clock_scaler << PWM_CTRL_PRE_SHIFT)
+ & PWM_CTRL_PRE_MASK;
+ reg_pwm_ctrl |= (clock_divider << PWM_CTRL_DIV_SHIFT)
+ & PWM_CTRL_DIV_MASK;
+ }
+
+ mutex_lock(&battdev->batt_mutex);
+ rc = pm8058_reg_write(battdev->pm_chip, REG_PWM_CTRL, reg_pwm_ctrl,
+ mask, &battdev->reg_pwm_ctrl);
+ mutex_unlock(&battdev->batt_mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pm8058_batt_alarm_pwm_rate_set);
+
+/*
+ * Handle the BATT_ALARM interrupt:
+ * Battery voltage is above or below threshold range.
+ */
+static irqreturn_t pm8058_batt_alarm_isr(int irq, void *data)
+{
+ struct pm8058_batt_alarm_device *battdev = data;
+ int status;
+
+ if (battdev) {
+ status = pm8058_batt_alarm_status_read();
+
+ if (status < 0)
+ pr_err("failed to read status, rc=%d\n", status);
+ else
+ srcu_notifier_call_chain(&battdev->irq_notifier_list,
+ status, NULL);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * pm8058_batt_alarm_register_notifier - register a notifier to run when a
+ * battery voltage change interrupt fires
+ * @nb: notifier block containing callback function to register
+ *
+ * nb->notifier_call must point to a function of this form -
+ * int (*notifier_call)(struct notifier_block *nb, unsigned long status,
+ * void *unused);
+ * "status" will receive the battery alarm status; "unused" will be NULL.
+ *
+ * RETURNS: an appropriate -ERRNO error value on error, or zero for success.
+ */
+int pm8058_batt_alarm_register_notifier(struct notifier_block *nb)
+{
+ int rc;
+
+ if (!the_battalarm) {
+ pr_err("no battery alarm device found.\n");
+ return -ENXIO;
+ }
+
+ rc = srcu_notifier_chain_register(&the_battalarm->irq_notifier_list,
+ nb);
+ mutex_lock(&the_battalarm->batt_mutex);
+ if (rc == 0) {
+ if (the_battalarm->notifier_count == 0) {
+ /* request the irq */
+ rc = request_threaded_irq(the_battalarm->irq, NULL,
+ pm8058_batt_alarm_isr,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "pm8058-batt_alarm-irq", the_battalarm);
+ if (rc < 0) {
+ pr_err("request_irq(%d) failed, rc=%d\n",
+ the_battalarm->irq, rc);
+ goto done;
+ }
+
+ rc = irq_set_irq_wake(the_battalarm->irq, 1);
+ if (rc < 0) {
+ pr_err("irq_set_irq_wake(%d,1) failed, rc=%d\n",
+ the_battalarm->irq, rc);
+ goto done;
+ }
+ }
+
+ the_battalarm->notifier_count++;
+ }
+done:
+ mutex_unlock(&the_battalarm->batt_mutex);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pm8058_batt_alarm_register_notifier);
+
+/**
+ * pm8058_batt_alarm_unregister_notifier - unregister a notifier that is run
+ * when a battery voltage change interrupt fires
+ * @nb: notifier block containing callback function to unregister
+ *
+ * RETURNS: an appropriate -ERRNO error value on error, or zero for success.
+ */
+int pm8058_batt_alarm_unregister_notifier(struct notifier_block *nb)
+{
+ int rc;
+
+ if (!the_battalarm) {
+ pr_err("no battery alarm device found.\n");
+ return -ENXIO;
+ }
+
+ rc = srcu_notifier_chain_unregister(&the_battalarm->irq_notifier_list,
+ nb);
+ if (rc == 0) {
+ mutex_lock(&the_battalarm->batt_mutex);
+
+ the_battalarm->notifier_count--;
+
+ if (the_battalarm->notifier_count == 0)
+ free_irq(the_battalarm->irq, the_battalarm);
+
+ WARN_ON(the_battalarm->notifier_count < 0);
+
+ mutex_unlock(&the_battalarm->batt_mutex);
+ }
+
+
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pm8058_batt_alarm_unregister_notifier);
+
+static int pm8058_batt_alarm_reg_init(struct pm8058_batt_alarm_device *battdev)
+{
+ int rc = 0;
+
+ /* save the current register states */
+ rc = pm8058_read(battdev->pm_chip, REG_THRESHOLD,
+ &battdev->reg_threshold, 1);
+ if (rc)
+ goto bail;
+
+ rc = pm8058_read(battdev->pm_chip, REG_CTRL1,
+ &battdev->reg_ctrl1, 1);
+ if (rc)
+ goto bail;
+
+ rc = pm8058_read(battdev->pm_chip, REG_CTRL2,
+ &battdev->reg_ctrl2, 1);
+ if (rc)
+ goto bail;
+
+ rc = pm8058_read(battdev->pm_chip, REG_PWM_CTRL,
+ &battdev->reg_pwm_ctrl, 1);
+ if (rc)
+ goto bail;
+
+bail:
+ if (rc)
+ pr_err("pm8058_read failed; initial register states "
+ "unknown, rc=%d\n", rc);
+ return rc;
+}
+
+static int pm8058_batt_alarm_config(void)
+{
+ int rc = 0;
+
+ /* Use default values when no platform data is provided. */
+ rc = pm8058_batt_alarm_threshold_set(DEFAULT_THRESHOLD_LOWER,
+ DEFAULT_THRESHOLD_UPPER);
+ if (rc) {
+ pr_err("threshold_set failed, rc=%d\n", rc);
+ goto done;
+ }
+
+ rc = pm8058_batt_alarm_hold_time_set(DEFAULT_HOLD_TIME);
+ if (rc) {
+ pr_err("hold_time_set failed, rc=%d\n", rc);
+ goto done;
+ }
+
+ rc = pm8058_batt_alarm_pwm_rate_set(DEFAULT_USE_PWM,
+ DEFAULT_PWM_SCALER, DEFAULT_PWM_DIVIDER);
+ if (rc) {
+ pr_err("pwm_rate_set failed, rc=%d\n", rc);
+ goto done;
+ }
+
+ rc = pm8058_batt_alarm_state_set(DEFAULT_LOWER_ENABLE,
+ DEFAULT_UPPER_ENABLE);
+ if (rc) {
+ pr_err("state_set failed, rc=%d\n", rc);
+ goto done;
+ }
+
+done:
+ return rc;
+}
+
+static int __devinit pm8058_batt_alarm_probe(struct platform_device *pdev)
+{
+ struct pm8058_batt_alarm_device *battdev;
+ struct pm8058_chip *pm_chip;
+ unsigned int irq;
+ int rc;
+
+ pm_chip = dev_get_drvdata(pdev->dev.parent);
+ if (pm_chip == NULL) {
+ pr_err("no driver data passed in.\n");
+ rc = -EFAULT;
+ goto exit_input;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (!irq) {
+ pr_err("no IRQ passed in.\n");
+ rc = -EFAULT;
+ goto exit_input;
+ }
+
+ battdev = kzalloc(sizeof *battdev, GFP_KERNEL);
+ if (battdev == NULL) {
+ pr_err("kzalloc() failed.\n");
+ rc = -ENOMEM;
+ goto exit_input;
+ }
+
+ battdev->pm_chip = pm_chip;
+ platform_set_drvdata(pdev, battdev);
+
+ srcu_init_notifier_head(&battdev->irq_notifier_list);
+
+ battdev->irq = irq;
+ battdev->notifier_count = 0;
+ mutex_init(&battdev->batt_mutex);
+
+ rc = pm8058_batt_alarm_reg_init(battdev);
+ if (rc)
+ goto exit_free_dev;
+
+ the_battalarm = battdev;
+
+ rc = pm8058_batt_alarm_config();
+ if (rc)
+ goto exit_free_dev;
+
+ pr_notice("OK\n");
+ return 0;
+
+exit_free_dev:
+ mutex_destroy(&battdev->batt_mutex);
+ srcu_cleanup_notifier_head(&battdev->irq_notifier_list);
+ platform_set_drvdata(pdev, battdev->pm_chip);
+ kfree(battdev);
+exit_input:
+ return rc;
+}
+
+static int __devexit pm8058_batt_alarm_remove(struct platform_device *pdev)
+{
+ struct pm8058_batt_alarm_device *battdev = platform_get_drvdata(pdev);
+
+ mutex_destroy(&battdev->batt_mutex);
+ srcu_cleanup_notifier_head(&battdev->irq_notifier_list);
+ platform_set_drvdata(pdev, battdev->pm_chip);
+ free_irq(battdev->irq, battdev);
+ kfree(battdev);
+
+ the_battalarm = NULL;
+
+ return 0;
+}
+
+static struct platform_driver pm8058_batt_alarm_driver = {
+ .probe = pm8058_batt_alarm_probe,
+ .remove = __devexit_p(pm8058_batt_alarm_remove),
+ .driver = {
+ .name = "pm8058-batt-alarm",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init pm8058_batt_alarm_init(void)
+{
+ return platform_driver_register(&pm8058_batt_alarm_driver);
+}
+
+static void __exit pm8058_batt_alarm_exit(void)
+{
+ platform_driver_unregister(&pm8058_batt_alarm_driver);
+}
+
+module_init(pm8058_batt_alarm_init);
+module_exit(pm8058_batt_alarm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC8058 Battery Alarm Device driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:pm8058-batt-alarm");
diff --git a/drivers/misc/pmic8058-misc.c b/drivers/misc/pmic8058-misc.c
new file mode 100644
index 0000000..77a2f47
--- /dev/null
+++ b/drivers/misc/pmic8058-misc.c
@@ -0,0 +1,335 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * Qualcomm PMIC8058 Misc Device driver
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mfd/pmic8058.h>
+#include <linux/pmic8058-misc.h>
+
+/* VIB_DRV register */
+#define SSBI_REG_ADDR_DRV_VIB 0x4A
+
+#define PM8058_VIB_DRIVE_SHIFT 3
+#define PM8058_VIB_LOGIC_SHIFT 2
+#define PM8058_VIB_MIN_LEVEL_mV 1200
+#define PM8058_VIB_MAX_LEVEL_mV 3100
+
+/* COINCELL_CHG register */
+#define SSBI_REG_ADDR_COINCELL_CHG (0x2F)
+#define PM8058_COINCELL_RESISTOR_SHIFT (2)
+
+/* Resource offsets. */
+enum PM8058_MISC_IRQ {
+ PM8058_MISC_IRQ_OSC_HALT = 0
+};
+
+struct pm8058_misc_device {
+ struct pm8058_chip *pm_chip;
+ struct dentry *dgb_dir;
+ unsigned int osc_halt_irq;
+ u64 osc_halt_count;
+};
+
+static struct pm8058_misc_device *misc_dev;
+
+int pm8058_vibrator_config(struct pm8058_vib_config *vib_config)
+{
+ u8 reg = 0;
+ int rc;
+
+ if (misc_dev == NULL) {
+ pr_info("misc_device is NULL\n");
+ return -EINVAL;
+ }
+
+ if (vib_config->drive_mV) {
+ if (vib_config->drive_mV < PM8058_VIB_MIN_LEVEL_mV ||
+ vib_config->drive_mV > PM8058_VIB_MAX_LEVEL_mV) {
+ pr_err("Invalid vibrator drive strength\n");
+ return -EINVAL;
+ }
+ }
+
+ reg = (vib_config->drive_mV / 100) << PM8058_VIB_DRIVE_SHIFT;
+
+ reg |= (!!vib_config->active_low) << PM8058_VIB_LOGIC_SHIFT;
+
+ reg |= vib_config->enable_mode;
+
+ rc = pm8058_write(misc_dev->pm_chip, SSBI_REG_ADDR_DRV_VIB, ®, 1);
+ if (rc)
+ pr_err("%s: pm8058 write failed: rc=%d\n", __func__, rc);
+
+ return rc;
+}
+EXPORT_SYMBOL(pm8058_vibrator_config);
+
+/**
+ * pm8058_coincell_chg_config - Disables or enables the coincell charger, and
+ * configures its voltage and resistor settings.
+ * @chg_config: Holds both voltage and resistor values, and a
+ * switch to change the state of charger.
+ * If state is to disable the charger then
+ * both voltage and resistor are disregarded.
+ *
+ * RETURNS: an appropriate -ERRNO error value on error, or zero for success.
+ */
+int pm8058_coincell_chg_config(struct pm8058_coincell_chg_config *chg_config)
+{
+ u8 reg, voltage, resistor;
+ int rc;
+
+ reg = 0;
+ voltage = 0;
+ resistor = 0;
+ rc = 0;
+
+ if (misc_dev == NULL) {
+ pr_err("misc_device is NULL\n");
+ return -EINVAL;
+ }
+
+ if (chg_config == NULL) {
+ pr_err("chg_config is NULL\n");
+ return -EINVAL;
+ }
+
+ if (chg_config->state == PM8058_COINCELL_CHG_DISABLE) {
+ rc = pm8058_write(misc_dev->pm_chip,
+ SSBI_REG_ADDR_COINCELL_CHG, ®, 1);
+ if (rc)
+ pr_err("%s: pm8058 write failed: rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ voltage = chg_config->voltage;
+ resistor = chg_config->resistor;
+
+ if (voltage < PM8058_COINCELL_VOLTAGE_3p2V ||
+ (voltage > PM8058_COINCELL_VOLTAGE_3p0V &&
+ voltage != PM8058_COINCELL_VOLTAGE_2p5V)) {
+ pr_err("Invalid voltage value provided\n");
+ return -EINVAL;
+ }
+
+ if (resistor < PM8058_COINCELL_RESISTOR_2100_OHMS ||
+ resistor > PM8058_COINCELL_RESISTOR_800_OHMS) {
+ pr_err("Invalid resistor value provided\n");
+ return -EINVAL;
+ }
+
+ reg |= voltage;
+
+ reg |= (resistor << PM8058_COINCELL_RESISTOR_SHIFT);
+
+ rc = pm8058_write(misc_dev->pm_chip,
+ SSBI_REG_ADDR_COINCELL_CHG, ®, 1);
+
+ if (rc)
+ pr_err("%s: pm8058 write failed: rc=%d\n", __func__, rc);
+
+ return rc;
+}
+EXPORT_SYMBOL(pm8058_coincell_chg_config);
+
+/* Handle the OSC_HALT interrupt: 32 kHz XTAL oscillator has stopped. */
+static irqreturn_t pm8058_osc_halt_isr(int irq, void *data)
+{
+ struct pm8058_misc_device *miscdev = data;
+ u64 count = 0;
+
+ if (miscdev) {
+ miscdev->osc_halt_count++;
+ count = miscdev->osc_halt_count;
+ }
+
+ pr_crit("%s: OSC_HALT interrupt has triggered, 32 kHz XTAL oscillator"
+ " has halted (%llu)!\n", __func__, count);
+
+ return IRQ_HANDLED;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int osc_halt_count_get(void *data, u64 *val)
+{
+ struct pm8058_misc_device *miscdev = data;
+
+ if (miscdev == NULL) {
+ pr_err("%s: null pointer input.\n", __func__);
+ return -EINVAL;
+ }
+
+ *val = miscdev->osc_halt_count;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(dbg_osc_halt_fops, osc_halt_count_get, NULL, "%llu\n");
+
+static int __devinit pmic8058_misc_dbg_probe(struct pm8058_misc_device *miscdev)
+{
+ struct dentry *dent;
+ struct dentry *temp;
+
+ if (miscdev == NULL) {
+ pr_err("%s: no parent data passed in.\n", __func__);
+ return -EINVAL;
+ }
+
+ dent = debugfs_create_dir("pm8058-misc", NULL);
+ if (dent == NULL || IS_ERR(dent)) {
+ pr_err("%s: ERR debugfs_create_dir: dent=0x%X\n",
+ __func__, (unsigned)dent);
+ return -ENOMEM;
+ }
+
+ temp = debugfs_create_file("osc_halt_count", S_IRUSR, dent,
+ miscdev, &dbg_osc_halt_fops);
+ if (temp == NULL || IS_ERR(temp)) {
+ pr_err("%s: ERR debugfs_create_file: dent=0x%X\n",
+ __func__, (unsigned)temp);
+ goto debug_error;
+ }
+
+ miscdev->dgb_dir = dent;
+ return 0;
+
+debug_error:
+ debugfs_remove_recursive(dent);
+ return -ENOMEM;
+}
+
+static int __devexit pmic8058_misc_dbg_remove(
+ struct pm8058_misc_device *miscdev)
+{
+ if (miscdev->dgb_dir)
+ debugfs_remove_recursive(miscdev->dgb_dir);
+
+ return 0;
+}
+
+#else
+
+static int __devinit pmic8058_misc_dbg_probe(struct pm8058_misc_device *miscdev)
+{
+ return 0;
+}
+
+static int __devexit pmic8058_misc_dbg_remove(
+ struct pm8058_misc_device *miscdev)
+{
+ return 0;
+}
+
+#endif
+
+
+static int __devinit pmic8058_misc_probe(struct platform_device *pdev)
+{
+ struct pm8058_misc_device *miscdev;
+ struct pm8058_chip *pm_chip;
+ unsigned int irq;
+ int rc;
+
+ pm_chip = dev_get_drvdata(pdev->dev.parent);
+ if (pm_chip == NULL) {
+ pr_err("%s: no driver data passed in.\n", __func__);
+ return -EFAULT;
+ }
+
+ irq = platform_get_irq(pdev, PM8058_MISC_IRQ_OSC_HALT);
+ if (!irq) {
+ pr_err("%s: no IRQ passed in.\n", __func__);
+ return -EFAULT;
+ }
+
+ miscdev = kzalloc(sizeof *miscdev, GFP_KERNEL);
+ if (miscdev == NULL) {
+ pr_err("%s: kzalloc() failed.\n", __func__);
+ return -ENOMEM;
+ }
+
+ miscdev->pm_chip = pm_chip;
+ platform_set_drvdata(pdev, miscdev);
+
+ rc = request_threaded_irq(irq, NULL, pm8058_osc_halt_isr,
+ IRQF_TRIGGER_RISING | IRQF_DISABLED,
+ "pm8058-osc_halt-irq", miscdev);
+ if (rc < 0) {
+ pr_err("%s: request_irq(%d) FAIL: %d\n", __func__, irq, rc);
+ platform_set_drvdata(pdev, miscdev->pm_chip);
+ kfree(miscdev);
+ return rc;
+ }
+ miscdev->osc_halt_irq = irq;
+ miscdev->osc_halt_count = 0;
+
+ rc = pmic8058_misc_dbg_probe(miscdev);
+ if (rc)
+ return rc;
+
+ misc_dev = miscdev;
+
+ pr_notice("%s: OK\n", __func__);
+ return 0;
+}
+
+static int __devexit pmic8058_misc_remove(struct platform_device *pdev)
+{
+ struct pm8058_misc_device *miscdev = platform_get_drvdata(pdev);
+
+ pmic8058_misc_dbg_remove(miscdev);
+
+ platform_set_drvdata(pdev, miscdev->pm_chip);
+ free_irq(miscdev->osc_halt_irq, miscdev);
+ kfree(miscdev);
+
+ return 0;
+}
+
+static struct platform_driver pmic8058_misc_driver = {
+ .probe = pmic8058_misc_probe,
+ .remove = __devexit_p(pmic8058_misc_remove),
+ .driver = {
+ .name = "pm8058-misc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init pm8058_misc_init(void)
+{
+ return platform_driver_register(&pmic8058_misc_driver);
+}
+
+static void __exit pm8058_misc_exit(void)
+{
+ platform_driver_unregister(&pmic8058_misc_driver);
+}
+
+module_init(pm8058_misc_init);
+module_exit(pm8058_misc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC8058 Misc Device driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:pmic8058-misc");
diff --git a/drivers/misc/pmic8058-nfc.c b/drivers/misc/pmic8058-nfc.c
new file mode 100644
index 0000000..76a19f4
--- /dev/null
+++ b/drivers/misc/pmic8058-nfc.c
@@ -0,0 +1,322 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * Qualcomm PMIC8058 NFC driver
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/mfd/pmic8058.h>
+#include <linux/pmic8058-nfc.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+
+/* PMIC8058 NFC */
+#define SSBI_REG_NFC_CTRL 0x14D
+#define SSBI_REG_NFC_TEST 0x14E
+
+/* NFC_CTRL */
+#define PM8058_NFC_SUPPORT_EN 0x80
+#define PM8058_NFC_LDO_EN 0x40
+#define PM8058_NFC_EN 0x20
+#define PM8058_NFC_EXT_VDDLDO_EN 0x10
+#define PM8058_NFC_VPH_PWR_EN 0x08
+#define PM8058_NFC_RESERVED 0x04
+#define PM8058_NFC_VDDLDO_LEVEL 0x03
+
+/* NFC_TEST */
+#define PM8058_NFC_VDDLDO_MON_EN 0x80
+#define PM8058_NFC_ATEST_EN 0x40
+#define PM8058_NFC_DTEST1_EN 0x20
+#define PM8058_NFC_RESERVED2 0x18
+#define PM8058_NFC_VDDLDO_OK_S 0x04
+#define PM8058_NFC_MBG_EN_S 0x02
+#define PM8058_NFC_EXT_EN_S 0x01
+
+struct pm8058_nfc_device {
+ struct mutex nfc_mutex;
+ struct pm8058_chip *pm_chip;
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *dent;
+#endif
+};
+static struct pm8058_nfc_device *nfc_dev;
+
+/* APIs */
+/*
+ * pm8058_nfc_request - request a handle to access NFC device
+ */
+struct pm8058_nfc_device *pm8058_nfc_request(void)
+{
+ return nfc_dev;
+}
+EXPORT_SYMBOL(pm8058_nfc_request);
+
+/*
+ * pm8058_nfc_config - configure NFC signals
+ *
+ * @nfcdev: the NFC device
+ * @mask: signal mask to configure
+ * @flags: control flags
+ */
+int pm8058_nfc_config(struct pm8058_nfc_device *nfcdev, u32 mask, u32 flags)
+{
+ u8 nfc_ctrl, nfc_test, m, f;
+ int rc;
+
+ if (nfcdev == NULL || IS_ERR(nfcdev) || !mask)
+ return -EINVAL;
+ if (nfcdev->pm_chip == NULL)
+ return -ENODEV;
+
+ mutex_lock(&nfcdev->nfc_mutex);
+
+ if (!(mask & PM_NFC_CTRL_REQ))
+ goto config_test;
+
+ rc = pm8058_read(nfcdev->pm_chip, SSBI_REG_NFC_CTRL, &nfc_ctrl, 1);
+ if (rc) {
+ pr_err("%s: FAIL pm8058_read(): rc=%d (nfc_ctrl=0x%x)\n",
+ __func__, rc, nfc_ctrl);
+ goto config_done;
+ }
+
+ m = mask & 0x00ff;
+ f = flags & 0x00ff;
+ nfc_ctrl &= ~m;
+ nfc_ctrl |= m & f;
+
+ rc = pm8058_write(nfcdev->pm_chip, SSBI_REG_NFC_CTRL, &nfc_ctrl, 1);
+ if (rc) {
+ pr_err("%s: FAIL pm8058_write(): rc=%d (nfc_ctrl=0x%x)\n",
+ __func__, rc, nfc_ctrl);
+ goto config_done;
+ }
+
+config_test:
+ if (!(mask & PM_NFC_TEST_REQ))
+ goto config_done;
+
+ rc = pm8058_read(nfcdev->pm_chip, SSBI_REG_NFC_TEST, &nfc_test, 1);
+ if (rc) {
+ pr_err("%s: FAIL pm8058_read(): rc=%d (nfc_test=0x%x)\n",
+ __func__, rc, nfc_test);
+ goto config_done;
+ }
+
+ m = (mask >> 8) & 0x00ff;
+ f = (flags >> 8) & 0x00ff;
+ nfc_test &= ~m;
+ nfc_test |= m & f;
+
+ rc = pm8058_write(nfcdev->pm_chip, SSBI_REG_NFC_TEST, &nfc_test, 1);
+ if (rc) {
+ pr_err("%s: FAIL pm8058_write(): rc=%d (nfc_test=0x%x)\n",
+ __func__, rc, nfc_test);
+ goto config_done;
+ }
+
+config_done:
+ mutex_unlock(&nfcdev->nfc_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(pm8058_nfc_config);
+
+/*
+ * pm8058_nfc_get_status - get NFC status
+ *
+ * @nfcdev: the NFC device
+ * @mask: of status mask to read
+ * @status: pointer to the status variable
+ */
+int pm8058_nfc_get_status(struct pm8058_nfc_device *nfcdev,
+ u32 mask, u32 *status)
+{
+ u8 nfc_ctrl, nfc_test;
+ u32 st;
+ int rc;
+
+ if (nfcdev == NULL || IS_ERR(nfcdev) || status == NULL)
+ return -EINVAL;
+ if (nfcdev->pm_chip == NULL)
+ return -ENODEV;
+
+ st = 0;
+ mutex_lock(&nfcdev->nfc_mutex);
+
+ if (!(mask & PM_NFC_CTRL_REQ))
+ goto read_test;
+
+ rc = pm8058_read(nfcdev->pm_chip, SSBI_REG_NFC_CTRL, &nfc_ctrl, 1);
+ if (rc) {
+ pr_err("%s: FAIL pm8058_read(): rc=%d (nfc_ctrl=0x%x)\n",
+ __func__, rc, nfc_ctrl);
+ goto get_status_done;
+ }
+
+read_test:
+ if (!(mask & (PM_NFC_TEST_REQ | PM_NFC_TEST_STATUS)))
+ goto get_status_done;
+
+ rc = pm8058_read(nfcdev->pm_chip, SSBI_REG_NFC_TEST, &nfc_test, 1);
+ if (rc)
+ pr_err("%s: FAIL pm8058_read(): rc=%d (nfc_test=0x%x)\n",
+ __func__, rc, nfc_test);
+
+get_status_done:
+ st = nfc_ctrl;
+ st |= nfc_test << 8;
+ *status = st;
+
+ mutex_unlock(&nfcdev->nfc_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(pm8058_nfc_get_status);
+
+/*
+ * pm8058_nfc_free - free the NFC device
+ */
+void pm8058_nfc_free(struct pm8058_nfc_device *nfcdev)
+{
+ /* Disable all signals */
+ pm8058_nfc_config(nfcdev, PM_NFC_CTRL_REQ, 0);
+}
+EXPORT_SYMBOL(pm8058_nfc_free);
+
+#if defined(CONFIG_DEBUG_FS)
+static int pm8058_nfc_debug_set(void *data, u64 val)
+{
+ struct pm8058_nfc_device *nfcdev;
+ u32 mask, control;
+ int rc;
+
+ nfcdev = (struct pm8058_nfc_device *)data;
+ control = (u32)val & 0xffff;
+ mask = ((u32)val >> 16) & 0xffff;
+ rc = pm8058_nfc_config(nfcdev, mask, control);
+ if (rc)
+ pr_err("%s: ERR pm8058_nfc_config: rc=%d, "
+ "[mask, control]=[0x%x, 0x%x]\n",
+ __func__, rc, mask, control);
+
+ return 0;
+}
+
+static int pm8058_nfc_debug_get(void *data, u64 *val)
+{
+ struct pm8058_nfc_device *nfcdev;
+ u32 status;
+ int rc;
+
+ nfcdev = (struct pm8058_nfc_device *)data;
+ rc = pm8058_nfc_get_status(nfcdev, (u32)-1, &status);
+ if (rc)
+ pr_err("%s: ERR pm8058_nfc_get_status: rc=%d, status=0x%x\n",
+ __func__, rc, status);
+
+ if (val)
+ *val = (u64)status;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(pm8058_nfc_fops, pm8058_nfc_debug_get,
+ pm8058_nfc_debug_set, "%llu\n");
+
+static int pm8058_nfc_debug_init(struct pm8058_nfc_device *nfcdev)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_file("pm8058-nfc", 0644, NULL,
+ (void *)nfcdev, &pm8058_nfc_fops);
+
+ if (dent == NULL || IS_ERR(dent))
+ pr_err("%s: ERR debugfs_create_file: dent=0x%x\n",
+ __func__, (unsigned)dent);
+
+ nfcdev->dent = dent;
+ return 0;
+}
+#endif
+
+static int __devinit pmic8058_nfc_probe(struct platform_device *pdev)
+{
+ struct pm8058_chip *pm_chip;
+ struct pm8058_nfc_device *nfcdev;
+
+ pm_chip = dev_get_drvdata(pdev->dev.parent);
+ if (pm_chip == NULL) {
+ pr_err("%s: no parent data passed in.\n", __func__);
+ return -EFAULT;
+ }
+
+ nfcdev = kzalloc(sizeof *nfcdev, GFP_KERNEL);
+ if (nfcdev == NULL) {
+ pr_err("%s: kzalloc() failed.\n", __func__);
+ return -ENOMEM;
+ }
+
+ mutex_init(&nfcdev->nfc_mutex);
+
+ nfcdev->pm_chip = pm_chip;
+ nfc_dev = nfcdev;
+ platform_set_drvdata(pdev, nfcdev);
+
+#if defined(CONFIG_DEBUG_FS)
+ pm8058_nfc_debug_init(nfc_dev);
+#endif
+
+ pr_notice("%s: OK\n", __func__);
+ return 0;
+}
+
+static int __devexit pmic8058_nfc_remove(struct platform_device *pdev)
+{
+ struct pm8058_nfc_device *nfcdev = platform_get_drvdata(pdev);
+
+#if defined(CONFIG_DEBUG_FS)
+ debugfs_remove(nfcdev->dent);
+#endif
+
+ platform_set_drvdata(pdev, nfcdev->pm_chip);
+ kfree(nfcdev);
+ return 0;
+}
+
+static struct platform_driver pmic8058_nfc_driver = {
+ .probe = pmic8058_nfc_probe,
+ .remove = __devexit_p(pmic8058_nfc_remove),
+ .driver = {
+ .name = "pm8058-nfc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init pm8058_nfc_init(void)
+{
+ return platform_driver_register(&pmic8058_nfc_driver);
+}
+
+static void __exit pm8058_nfc_exit(void)
+{
+ platform_driver_unregister(&pmic8058_nfc_driver);
+}
+
+module_init(pm8058_nfc_init);
+module_exit(pm8058_nfc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC8058 NFC driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:pmic8058-nfc");
diff --git a/drivers/misc/pmic8058-pwm.c b/drivers/misc/pmic8058-pwm.c
new file mode 100644
index 0000000..2c04bdc
--- /dev/null
+++ b/drivers/misc/pmic8058-pwm.c
@@ -0,0 +1,926 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * Qualcomm PMIC8058 PWM driver
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/pwm.h>
+#include <linux/mfd/pmic8058.h>
+#include <linux/pmic8058-pwm.h>
+#include <linux/slab.h>
+
+#define PM8058_LPG_BANKS 8
+#define PM8058_PWM_CHANNELS PM8058_LPG_BANKS /* MAX=8 */
+
+#define PM8058_LPG_CTL_REGS 7
+
+/* PMIC8058 LPG/PWM */
+#define SSBI_REG_ADDR_LPG_CTL_BASE 0x13C
+#define SSBI_REG_ADDR_LPG_CTL(n) (SSBI_REG_ADDR_LPG_CTL_BASE + (n))
+#define SSBI_REG_ADDR_LPG_BANK_SEL 0x143
+#define SSBI_REG_ADDR_LPG_BANK_EN 0x144
+#define SSBI_REG_ADDR_LPG_LUT_CFG0 0x145
+#define SSBI_REG_ADDR_LPG_LUT_CFG1 0x146
+#define SSBI_REG_ADDR_LPG_TEST 0x147
+
+/* Control 0 */
+#define PM8058_PWM_1KHZ_COUNT_MASK 0xF0
+#define PM8058_PWM_1KHZ_COUNT_SHIFT 4
+
+#define PM8058_PWM_1KHZ_COUNT_MAX 15
+
+#define PM8058_PWM_OUTPUT_EN 0x08
+#define PM8058_PWM_PWM_EN 0x04
+#define PM8058_PWM_RAMP_GEN_EN 0x02
+#define PM8058_PWM_RAMP_START 0x01
+
+#define PM8058_PWM_PWM_START (PM8058_PWM_OUTPUT_EN \
+ | PM8058_PWM_PWM_EN)
+#define PM8058_PWM_RAMP_GEN_START (PM8058_PWM_RAMP_GEN_EN \
+ | PM8058_PWM_RAMP_START)
+
+/* Control 1 */
+#define PM8058_PWM_REVERSE_EN 0x80
+#define PM8058_PWM_BYPASS_LUT 0x40
+#define PM8058_PWM_HIGH_INDEX_MASK 0x3F
+
+/* Control 2 */
+#define PM8058_PWM_LOOP_EN 0x80
+#define PM8058_PWM_RAMP_UP 0x40
+#define PM8058_PWM_LOW_INDEX_MASK 0x3F
+
+/* Control 3 */
+#define PM8058_PWM_VALUE_BIT7_0 0xFF
+#define PM8058_PWM_VALUE_BIT5_0 0x3F
+
+/* Control 4 */
+#define PM8058_PWM_VALUE_BIT8 0x80
+
+#define PM8058_PWM_CLK_SEL_MASK 0x60
+#define PM8058_PWM_CLK_SEL_SHIFT 5
+
+#define PM8058_PWM_CLK_SEL_NO 0
+#define PM8058_PWM_CLK_SEL_1KHZ 1
+#define PM8058_PWM_CLK_SEL_32KHZ 2
+#define PM8058_PWM_CLK_SEL_19P2MHZ 3
+
+#define PM8058_PWM_PREDIVIDE_MASK 0x18
+#define PM8058_PWM_PREDIVIDE_SHIFT 3
+
+#define PM8058_PWM_PREDIVIDE_2 0
+#define PM8058_PWM_PREDIVIDE_3 1
+#define PM8058_PWM_PREDIVIDE_5 2
+#define PM8058_PWM_PREDIVIDE_6 3
+
+#define PM8058_PWM_M_MASK 0x07
+#define PM8058_PWM_M_MIN 0
+#define PM8058_PWM_M_MAX 7
+
+/* Control 5 */
+#define PM8058_PWM_PAUSE_COUNT_HI_MASK 0xFC
+#define PM8058_PWM_PAUSE_COUNT_HI_SHIFT 2
+
+#define PM8058_PWM_PAUSE_ENABLE_HIGH 0x02
+#define PM8058_PWM_SIZE_9_BIT 0x01
+
+/* Control 6 */
+#define PM8058_PWM_PAUSE_COUNT_LO_MASK 0xFC
+#define PM8058_PWM_PAUSE_COUNT_LO_SHIFT 2
+
+#define PM8058_PWM_PAUSE_ENABLE_LOW 0x02
+#define PM8058_PWM_RESERVED 0x01
+
+#define PM8058_PWM_PAUSE_COUNT_MAX 56 /* < 2^6 = 64*/
+
+/* LUT_CFG1 */
+#define PM8058_PWM_LUT_READ 0x40
+
+/* TEST */
+#define PM8058_PWM_DTEST_MASK 0x38
+#define PM8058_PWM_DTEST_SHIFT 3
+
+#define PM8058_PWM_DTEST_BANK_MASK 0x07
+
+/* PWM frequency support
+ *
+ * PWM Frequency = Clock Frequency / (N * T)
+ * or
+ * PWM Period = Clock Period * (N * T)
+ * where
+ * N = 2^9 or 2^6 for 9-bit or 6-bit PWM size
+ * T = Pre-divide * 2^m, m = 0..7 (exponent)
+ *
+ * We use this formula to figure out m for the best pre-divide and clock:
+ * (PWM Period / N) / 2^m = (Pre-divide * Clock Period)
+*/
+#define NUM_CLOCKS 3
+
+#define NSEC_1000HZ (NSEC_PER_SEC / 1000)
+#define NSEC_32768HZ (NSEC_PER_SEC / 32768)
+#define NSEC_19P2MHZ (NSEC_PER_SEC / 19200000)
+
+#define CLK_PERIOD_MIN NSEC_19P2MHZ
+#define CLK_PERIOD_MAX NSEC_1000HZ
+
+#define NUM_PRE_DIVIDE 3 /* No default support for pre-divide = 6 */
+
+#define PRE_DIVIDE_0 2
+#define PRE_DIVIDE_1 3
+#define PRE_DIVIDE_2 5
+
+#define PRE_DIVIDE_MIN PRE_DIVIDE_0
+#define PRE_DIVIDE_MAX PRE_DIVIDE_2
+
+static char *clks[NUM_CLOCKS] = {
+ "1K", "32768", "19.2M"
+};
+
+static unsigned pre_div[NUM_PRE_DIVIDE] = {
+ PRE_DIVIDE_0, PRE_DIVIDE_1, PRE_DIVIDE_2
+};
+
+static unsigned int pt_t[NUM_PRE_DIVIDE][NUM_CLOCKS] = {
+ { PRE_DIVIDE_0 * NSEC_1000HZ,
+ PRE_DIVIDE_0 * NSEC_32768HZ,
+ PRE_DIVIDE_0 * NSEC_19P2MHZ,
+ },
+ { PRE_DIVIDE_1 * NSEC_1000HZ,
+ PRE_DIVIDE_1 * NSEC_32768HZ,
+ PRE_DIVIDE_1 * NSEC_19P2MHZ,
+ },
+ { PRE_DIVIDE_2 * NSEC_1000HZ,
+ PRE_DIVIDE_2 * NSEC_32768HZ,
+ PRE_DIVIDE_2 * NSEC_19P2MHZ,
+ },
+};
+
+#define MIN_MPT ((PRE_DIVIDE_MIN * CLK_PERIOD_MIN) << PM8058_PWM_M_MIN)
+#define MAX_MPT ((PRE_DIVIDE_MAX * CLK_PERIOD_MAX) << PM8058_PWM_M_MAX)
+
+/* Private data */
+struct pm8058_pwm_chip;
+
+struct pwm_device {
+ int pwm_id; /* = bank/channel id */
+ int in_use;
+ const char *label;
+ int pwm_period;
+ int pwm_duty;
+ u8 pwm_ctl[PM8058_LPG_CTL_REGS];
+ int irq;
+ struct pm8058_pwm_chip *chip;
+};
+
+struct pm8058_pwm_chip {
+ struct pwm_device pwm_dev[PM8058_PWM_CHANNELS];
+ u8 bank_mask;
+ struct mutex pwm_mutex;
+ struct pm8058_chip *pm_chip;
+ struct pm8058_pwm_pdata *pdata;
+};
+
+static struct pm8058_pwm_chip *pwm_chip;
+
+struct pw8058_pwm_config {
+ int pwm_size; /* round up to 6 or 9 for 6/9-bit PWM SIZE */
+ int clk;
+ int pre_div;
+ int pre_div_exp;
+ int pwm_value;
+ int bypass_lut;
+
+ /* LUT parameters when bypass_lut is 0 */
+ int lut_duty_ms;
+ int lut_lo_index;
+ int lut_hi_index;
+ int lut_pause_hi;
+ int lut_pause_lo;
+ int flags;
+};
+
+static u16 duty_msec[PM8058_PWM_1KHZ_COUNT_MAX + 1] = {
+ 0, 1, 2, 3, 4, 6, 8, 16, 18, 24, 32, 36, 64, 128, 256, 512
+};
+
+static u16 pause_count[PM8058_PWM_PAUSE_COUNT_MAX + 1] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 23, 28, 31, 42, 47, 56, 63, 83, 94, 111, 125, 167, 188, 222, 250, 333,
+ 375, 500, 667, 750, 800, 900, 1000, 1100,
+ 1200, 1300, 1400, 1500, 1600, 1800, 2000, 2500,
+ 3000, 3500, 4000, 4500, 5000, 5500, 6000, 6500,
+ 7000
+};
+
+/* Internal functions */
+static int pm8058_pwm_bank_enable(struct pwm_device *pwm, int enable)
+{
+ int rc;
+ u8 reg;
+ struct pm8058_pwm_chip *chip;
+
+ chip = pwm->chip;
+
+ if (enable)
+ reg = chip->bank_mask | (1 << pwm->pwm_id);
+ else
+ reg = chip->bank_mask & ~(1 << pwm->pwm_id);
+
+ rc = pm8058_write(chip->pm_chip, SSBI_REG_ADDR_LPG_BANK_EN, ®, 1);
+ if (rc) {
+ pr_err("%s: pm8058_write(): rc=%d (Enable LPG Bank)\n",
+ __func__, rc);
+ goto bail_out;
+ }
+ chip->bank_mask = reg;
+
+bail_out:
+ return rc;
+}
+
+static int pm8058_pwm_bank_sel(struct pwm_device *pwm)
+{
+ int rc;
+ u8 reg;
+
+ reg = pwm->pwm_id;
+ rc = pm8058_write(pwm->chip->pm_chip, SSBI_REG_ADDR_LPG_BANK_SEL,
+ ®, 1);
+ if (rc)
+ pr_err("%s: pm8058_write(): rc=%d (Select PWM Bank)\n",
+ __func__, rc);
+ return rc;
+}
+
+static int pm8058_pwm_start(struct pwm_device *pwm, int start, int ramp_start)
+{
+ int rc;
+ u8 reg;
+
+ if (start) {
+ reg = pwm->pwm_ctl[0] | PM8058_PWM_PWM_START;
+ if (ramp_start)
+ reg |= PM8058_PWM_RAMP_GEN_START;
+ else
+ reg &= ~PM8058_PWM_RAMP_GEN_START;
+ } else {
+ reg = pwm->pwm_ctl[0] & ~PM8058_PWM_PWM_START;
+ reg &= ~PM8058_PWM_RAMP_GEN_START;
+ }
+
+ rc = pm8058_write(pwm->chip->pm_chip, SSBI_REG_ADDR_LPG_CTL(0),
+ ®, 1);
+ if (rc)
+ pr_err("%s: pm8058_write(): rc=%d (Enable PWM Ctl 0)\n",
+ __func__, rc);
+ else
+ pwm->pwm_ctl[0] = reg;
+ return rc;
+}
+
+static void pm8058_pwm_calc_period(unsigned int period_us,
+ struct pw8058_pwm_config *pwm_conf)
+{
+ int n, m, clk, div;
+ int best_m, best_div, best_clk;
+ int last_err, cur_err, better_err, better_m;
+ unsigned int tmp_p, last_p, min_err, period_n;
+
+ /* PWM Period / N : handle underflow or overflow */
+ if (period_us < (PM_PWM_PERIOD_MAX / NSEC_PER_USEC))
+ period_n = (period_us * NSEC_PER_USEC) >> 6;
+ else
+ period_n = (period_us >> 6) * NSEC_PER_USEC;
+ if (period_n >= MAX_MPT) {
+ n = 9;
+ period_n >>= 3;
+ } else
+ n = 6;
+
+ min_err = MAX_MPT;
+ best_m = 0;
+ best_clk = 0;
+ best_div = 0;
+ for (clk = 0; clk < NUM_CLOCKS; clk++) {
+ for (div = 0; div < NUM_PRE_DIVIDE; div++) {
+ tmp_p = period_n;
+ last_p = tmp_p;
+ for (m = 0; m <= PM8058_PWM_M_MAX; m++) {
+ if (tmp_p <= pt_t[div][clk]) {
+ /* Found local best */
+ if (!m) {
+ better_err = pt_t[div][clk] -
+ tmp_p;
+ better_m = m;
+ } else {
+ last_err = last_p -
+ pt_t[div][clk];
+ cur_err = pt_t[div][clk] -
+ tmp_p;
+
+ if (cur_err < last_err) {
+ better_err = cur_err;
+ better_m = m;
+ } else {
+ better_err = last_err;
+ better_m = m - 1;
+ }
+ }
+
+ if (better_err < min_err) {
+ min_err = better_err;
+ best_m = better_m;
+ best_clk = clk;
+ best_div = div;
+ }
+ break;
+ } else {
+ last_p = tmp_p;
+ tmp_p >>= 1;
+ }
+ }
+ }
+ }
+
+ pwm_conf->pwm_size = n;
+ pwm_conf->clk = best_clk;
+ pwm_conf->pre_div = best_div;
+ pwm_conf->pre_div_exp = best_m;
+
+ pr_debug("%s: period=%u: n=%d, m=%d, clk[%d]=%s, div[%d]=%d\n",
+ __func__, (unsigned)period_us, n, best_m,
+ best_clk, clks[best_clk], best_div, pre_div[best_div]);
+}
+
+static int pm8058_pwm_configure(struct pwm_device *pwm,
+ struct pw8058_pwm_config *pwm_conf)
+{
+ int i, rc, len;
+ u8 reg, ramp_enabled = 0;
+
+ reg = (pwm_conf->pwm_size > 6) ? PM8058_PWM_SIZE_9_BIT : 0;
+ pwm->pwm_ctl[5] = reg;
+
+ reg = ((pwm_conf->clk + 1) << PM8058_PWM_CLK_SEL_SHIFT)
+ & PM8058_PWM_CLK_SEL_MASK;
+ reg |= (pwm_conf->pre_div << PM8058_PWM_PREDIVIDE_SHIFT)
+ & PM8058_PWM_PREDIVIDE_MASK;
+ reg |= pwm_conf->pre_div_exp & PM8058_PWM_M_MASK;
+ pwm->pwm_ctl[4] = reg;
+
+ if (pwm_conf->bypass_lut) {
+ pwm->pwm_ctl[0] &= PM8058_PWM_PWM_START; /* keep enabled */
+ pwm->pwm_ctl[1] = PM8058_PWM_BYPASS_LUT;
+ pwm->pwm_ctl[2] = 0;
+
+ if (pwm_conf->pwm_size > 6) {
+ pwm->pwm_ctl[3] = pwm_conf->pwm_value
+ & PM8058_PWM_VALUE_BIT7_0;
+ pwm->pwm_ctl[4] |= (pwm_conf->pwm_value >> 1)
+ & PM8058_PWM_VALUE_BIT8;
+ } else {
+ pwm->pwm_ctl[3] = pwm_conf->pwm_value
+ & PM8058_PWM_VALUE_BIT5_0;
+ }
+
+ len = 6;
+ } else {
+ int pause_cnt, j;
+
+ /* Linear search for duty time */
+ for (i = 0; i < PM8058_PWM_1KHZ_COUNT_MAX; i++) {
+ if (duty_msec[i] >= pwm_conf->lut_duty_ms)
+ break;
+ }
+
+ ramp_enabled = pwm->pwm_ctl[0] & PM8058_PWM_RAMP_GEN_START;
+ pwm->pwm_ctl[0] &= PM8058_PWM_PWM_START; /* keep enabled */
+ pwm->pwm_ctl[0] |= (i << PM8058_PWM_1KHZ_COUNT_SHIFT) &
+ PM8058_PWM_1KHZ_COUNT_MASK;
+ pwm->pwm_ctl[1] = pwm_conf->lut_hi_index &
+ PM8058_PWM_HIGH_INDEX_MASK;
+ pwm->pwm_ctl[2] = pwm_conf->lut_lo_index &
+ PM8058_PWM_LOW_INDEX_MASK;
+
+ if (pwm_conf->flags & PM_PWM_LUT_REVERSE)
+ pwm->pwm_ctl[1] |= PM8058_PWM_REVERSE_EN;
+ if (pwm_conf->flags & PM_PWM_LUT_RAMP_UP)
+ pwm->pwm_ctl[2] |= PM8058_PWM_RAMP_UP;
+ if (pwm_conf->flags & PM_PWM_LUT_LOOP)
+ pwm->pwm_ctl[2] |= PM8058_PWM_LOOP_EN;
+
+ /* Pause time */
+ if (pwm_conf->flags & PM_PWM_LUT_PAUSE_HI_EN) {
+ /* Linear search for pause time */
+ pause_cnt = (pwm_conf->lut_pause_hi + duty_msec[i] / 2)
+ / duty_msec[i];
+ for (j = 0; j < PM8058_PWM_PAUSE_COUNT_MAX; j++) {
+ if (pause_count[j] >= pause_cnt)
+ break;
+ }
+ pwm->pwm_ctl[5] = (j <<
+ PM8058_PWM_PAUSE_COUNT_HI_SHIFT) &
+ PM8058_PWM_PAUSE_COUNT_HI_MASK;
+ pwm->pwm_ctl[5] |= PM8058_PWM_PAUSE_ENABLE_HIGH;
+ } else
+ pwm->pwm_ctl[5] = 0;
+
+ if (pwm_conf->flags & PM_PWM_LUT_PAUSE_LO_EN) {
+ /* Linear search for pause time */
+ pause_cnt = (pwm_conf->lut_pause_lo + duty_msec[i] / 2)
+ / duty_msec[i];
+ for (j = 0; j < PM8058_PWM_PAUSE_COUNT_MAX; j++) {
+ if (pause_count[j] >= pause_cnt)
+ break;
+ }
+ pwm->pwm_ctl[6] = (j <<
+ PM8058_PWM_PAUSE_COUNT_LO_SHIFT) &
+ PM8058_PWM_PAUSE_COUNT_LO_MASK;
+ pwm->pwm_ctl[6] |= PM8058_PWM_PAUSE_ENABLE_LOW;
+ } else
+ pwm->pwm_ctl[6] = 0;
+
+ len = 7;
+ }
+
+ pm8058_pwm_bank_sel(pwm);
+
+ for (i = 0; i < len; i++) {
+ rc = pm8058_write(pwm->chip->pm_chip,
+ SSBI_REG_ADDR_LPG_CTL(i),
+ &pwm->pwm_ctl[i], 1);
+ if (rc) {
+ pr_err("%s: pm8058_write(): rc=%d (PWM Ctl[%d])\n",
+ __func__, rc, i);
+ break;
+ }
+ }
+
+ if (ramp_enabled) {
+ pwm->pwm_ctl[0] |= ramp_enabled;
+ pm8058_write(pwm->chip->pm_chip, SSBI_REG_ADDR_LPG_CTL(0),
+ &pwm->pwm_ctl[0], 1);
+ }
+
+ return rc;
+}
+
+/* APIs */
+/*
+ * pwm_request - request a PWM device
+ */
+struct pwm_device *pwm_request(int pwm_id, const char *label)
+{
+ struct pwm_device *pwm;
+
+ if (pwm_id > PM8058_PWM_CHANNELS || pwm_id < 0)
+ return ERR_PTR(-EINVAL);
+ if (pwm_chip == NULL)
+ return ERR_PTR(-ENODEV);
+
+ mutex_lock(&pwm_chip->pwm_mutex);
+ pwm = &pwm_chip->pwm_dev[pwm_id];
+ if (!pwm->in_use) {
+ pwm->in_use = 1;
+ pwm->label = label;
+
+ if (pwm_chip->pdata && pwm_chip->pdata->config)
+ pwm_chip->pdata->config(pwm, pwm_id, 1);
+ } else
+ pwm = ERR_PTR(-EBUSY);
+ mutex_unlock(&pwm_chip->pwm_mutex);
+
+ return pwm;
+}
+EXPORT_SYMBOL(pwm_request);
+
+/*
+ * pwm_free - free a PWM device
+ */
+void pwm_free(struct pwm_device *pwm)
+{
+ if (pwm == NULL || IS_ERR(pwm) || pwm->chip == NULL)
+ return;
+
+ mutex_lock(&pwm->chip->pwm_mutex);
+ if (pwm->in_use) {
+ pm8058_pwm_bank_sel(pwm);
+ pm8058_pwm_start(pwm, 0, 0);
+
+ if (pwm->chip->pdata && pwm->chip->pdata->config)
+ pwm->chip->pdata->config(pwm, pwm->pwm_id, 0);
+
+ pwm->in_use = 0;
+ pwm->label = NULL;
+ }
+ pm8058_pwm_bank_enable(pwm, 0);
+ mutex_unlock(&pwm->chip->pwm_mutex);
+}
+EXPORT_SYMBOL(pwm_free);
+
+/*
+ * pwm_config - change a PWM device configuration
+ *
+ * @pwm: the PWM device
+ * @period_us: period in micro second
+ * @duty_us: duty cycle in micro second
+ */
+int pwm_config(struct pwm_device *pwm, int duty_us, int period_us)
+{
+ struct pw8058_pwm_config pwm_conf;
+ unsigned int max_pwm_value, tmp;
+ int rc;
+
+ if (pwm == NULL || IS_ERR(pwm) ||
+ (unsigned)duty_us > (unsigned)period_us ||
+ (unsigned)period_us > PM_PWM_PERIOD_MAX ||
+ (unsigned)period_us < PM_PWM_PERIOD_MIN)
+ return -EINVAL;
+ if (pwm->chip == NULL)
+ return -ENODEV;
+
+ mutex_lock(&pwm->chip->pwm_mutex);
+
+ if (!pwm->in_use) {
+ rc = -EINVAL;
+ goto out_unlock;
+ }
+
+ pm8058_pwm_calc_period(period_us, &pwm_conf);
+
+ /* Figure out pwm_value with overflow handling */
+ if ((unsigned)period_us > (1 << pwm_conf.pwm_size)) {
+ tmp = period_us;
+ tmp >>= pwm_conf.pwm_size;
+ pwm_conf.pwm_value = (unsigned)duty_us / tmp;
+ } else {
+ tmp = duty_us;
+ tmp <<= pwm_conf.pwm_size;
+ pwm_conf.pwm_value = tmp / (unsigned)period_us;
+ }
+ max_pwm_value = (1 << pwm_conf.pwm_size) - 1;
+ if (pwm_conf.pwm_value > max_pwm_value)
+ pwm_conf.pwm_value = max_pwm_value;
+
+ pwm_conf.bypass_lut = 1;
+
+ pr_debug("%s: duty/period=%u/%u usec: pwm_value=%d (of %d)\n",
+ __func__, (unsigned)duty_us, (unsigned)period_us,
+ pwm_conf.pwm_value, 1 << pwm_conf.pwm_size);
+
+ rc = pm8058_pwm_configure(pwm, &pwm_conf);
+
+out_unlock:
+ mutex_unlock(&pwm->chip->pwm_mutex);
+ return rc;
+}
+EXPORT_SYMBOL(pwm_config);
+
+/*
+ * pwm_enable - start a PWM output toggling
+ */
+int pwm_enable(struct pwm_device *pwm)
+{
+ int rc;
+
+ if (pwm == NULL || IS_ERR(pwm))
+ return -EINVAL;
+ if (pwm->chip == NULL)
+ return -ENODEV;
+
+ mutex_lock(&pwm->chip->pwm_mutex);
+ if (!pwm->in_use)
+ rc = -EINVAL;
+ else {
+ if (pwm->chip->pdata && pwm->chip->pdata->enable)
+ pwm->chip->pdata->enable(pwm, pwm->pwm_id, 1);
+
+ rc = pm8058_pwm_bank_enable(pwm, 1);
+
+ pm8058_pwm_bank_sel(pwm);
+ pm8058_pwm_start(pwm, 1, 0);
+ }
+ mutex_unlock(&pwm->chip->pwm_mutex);
+ return rc;
+}
+EXPORT_SYMBOL(pwm_enable);
+
+/*
+ * pwm_disable - stop a PWM output toggling
+ */
+void pwm_disable(struct pwm_device *pwm)
+{
+ if (pwm == NULL || IS_ERR(pwm) || pwm->chip == NULL)
+ return;
+
+ mutex_lock(&pwm->chip->pwm_mutex);
+ if (pwm->in_use) {
+ pm8058_pwm_bank_sel(pwm);
+ pm8058_pwm_start(pwm, 0, 0);
+
+ pm8058_pwm_bank_enable(pwm, 0);
+
+ if (pwm->chip->pdata && pwm->chip->pdata->enable)
+ pwm->chip->pdata->enable(pwm, pwm->pwm_id, 0);
+ }
+ mutex_unlock(&pwm->chip->pwm_mutex);
+}
+EXPORT_SYMBOL(pwm_disable);
+
+/*
+ * pm8058_pwm_lut_config - change a PWM device configuration to use LUT
+ *
+ * @pwm: the PWM device
+ * @period_us: period in micro second
+ * @duty_pct: arrary of duty cycles in percent, like 20, 50.
+ * @duty_time_ms: time for each duty cycle in millisecond
+ * @start_idx: start index in lookup table from 0 to MAX-1
+ * @idx_len: number of index
+ * @pause_lo: pause time in millisecond at low index
+ * @pause_hi: pause time in millisecond at high index
+ * @flags: control flags
+ *
+ */
+int pm8058_pwm_lut_config(struct pwm_device *pwm, int period_us,
+ int duty_pct[], int duty_time_ms, int start_idx,
+ int idx_len, int pause_lo, int pause_hi, int flags)
+{
+ struct pw8058_pwm_config pwm_conf;
+ unsigned int pwm_value, max_pwm_value;
+ u8 cfg0, cfg1;
+ int i, len;
+ int rc;
+
+ if (pwm == NULL || IS_ERR(pwm) || !idx_len)
+ return -EINVAL;
+ if (duty_pct == NULL && !(flags & PM_PWM_LUT_NO_TABLE))
+ return -EINVAL;
+ if (pwm->chip == NULL)
+ return -ENODEV;
+ if (idx_len >= PM_PWM_LUT_SIZE && start_idx)
+ return -EINVAL;
+ if ((start_idx + idx_len) > PM_PWM_LUT_SIZE)
+ return -EINVAL;
+ if ((unsigned)period_us > PM_PWM_PERIOD_MAX ||
+ (unsigned)period_us < PM_PWM_PERIOD_MIN)
+ return -EINVAL;
+
+ mutex_lock(&pwm->chip->pwm_mutex);
+
+ if (!pwm->in_use) {
+ rc = -EINVAL;
+ goto out_unlock;
+ }
+
+ pm8058_pwm_calc_period(period_us, &pwm_conf);
+
+ len = (idx_len > PM_PWM_LUT_SIZE) ? PM_PWM_LUT_SIZE : idx_len;
+
+ if (flags & PM_PWM_LUT_NO_TABLE)
+ goto after_table_write;
+
+ max_pwm_value = (1 << pwm_conf.pwm_size) - 1;
+ for (i = 0; i < len; i++) {
+ pwm_value = (duty_pct[i] << pwm_conf.pwm_size) / 100;
+ /* Avoid overflow */
+ if (pwm_value > max_pwm_value)
+ pwm_value = max_pwm_value;
+ cfg0 = pwm_value & 0xff;
+ cfg1 = (pwm_value >> 1) & 0x80;
+ cfg1 |= start_idx + i;
+
+ pr_debug("%s: %d: pwm=%d\n", __func__, i, pwm_value);
+
+ pm8058_write(pwm->chip->pm_chip,
+ SSBI_REG_ADDR_LPG_LUT_CFG0,
+ &cfg0, 1);
+ pm8058_write(pwm->chip->pm_chip,
+ SSBI_REG_ADDR_LPG_LUT_CFG1,
+ &cfg1, 1);
+ }
+
+after_table_write:
+ pwm_conf.lut_duty_ms = duty_time_ms;
+ pwm_conf.lut_lo_index = start_idx;
+ pwm_conf.lut_hi_index = start_idx + len - 1;
+ pwm_conf.lut_pause_lo = pause_lo;
+ pwm_conf.lut_pause_hi = pause_hi;
+ pwm_conf.flags = flags;
+ pwm_conf.bypass_lut = 0;
+
+ rc = pm8058_pwm_configure(pwm, &pwm_conf);
+
+out_unlock:
+ mutex_unlock(&pwm->chip->pwm_mutex);
+ return rc;
+}
+EXPORT_SYMBOL(pm8058_pwm_lut_config);
+
+/*
+ * pm8058_pwm_lut_enable - control a PWM device to start/stop LUT ramp
+ *
+ * @pwm: the PWM device
+ * @start: to start (1), or stop (0)
+ */
+int pm8058_pwm_lut_enable(struct pwm_device *pwm, int start)
+{
+ if (pwm == NULL || IS_ERR(pwm))
+ return -EINVAL;
+ if (pwm->chip == NULL)
+ return -ENODEV;
+
+ mutex_lock(&pwm->chip->pwm_mutex);
+ if (start) {
+ pm8058_pwm_bank_enable(pwm, 1);
+
+ pm8058_pwm_bank_sel(pwm);
+ pm8058_pwm_start(pwm, 1, 1);
+ } else {
+ pm8058_pwm_bank_sel(pwm);
+ pm8058_pwm_start(pwm, 0, 0);
+
+ pm8058_pwm_bank_enable(pwm, 0);
+ }
+ mutex_unlock(&pwm->chip->pwm_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(pm8058_pwm_lut_enable);
+
+#define SSBI_REG_ADDR_LED_BASE 0x131
+#define SSBI_REG_ADDR_LED(n) (SSBI_REG_ADDR_LED_BASE + (n))
+#define SSBI_REG_ADDR_FLASH_BASE 0x48
+#define SSBI_REG_ADDR_FLASH_DRV_1 0xFB
+#define SSBI_REG_ADDR_FLASH(n) (((n) < 2 ? \
+ SSBI_REG_ADDR_FLASH_BASE + (n) : \
+ SSBI_REG_ADDR_FLASH_DRV_1))
+
+#define PM8058_LED_CURRENT_SHIFT 3
+#define PM8058_LED_MODE_MASK 0x07
+
+#define PM8058_FLASH_CURRENT_SHIFT 4
+#define PM8058_FLASH_MODE_MASK 0x03
+#define PM8058_FLASH_MODE_NONE 0
+#define PM8058_FLASH_MODE_DTEST1 1
+#define PM8058_FLASH_MODE_DTEST2 2
+#define PM8058_FLASH_MODE_PWM 3
+
+int pm8058_pwm_config_led(struct pwm_device *pwm, int id,
+ int mode, int max_current)
+{
+ int rc;
+ u8 conf;
+
+ switch (id) {
+ case PM_PWM_LED_0:
+ case PM_PWM_LED_1:
+ case PM_PWM_LED_2:
+ conf = mode & PM8058_LED_MODE_MASK;
+ conf |= (max_current / 2) << PM8058_LED_CURRENT_SHIFT;
+ rc = pm8058_write(pwm->chip->pm_chip,
+ SSBI_REG_ADDR_LED(id), &conf, 1);
+ break;
+
+ case PM_PWM_LED_KPD:
+ case PM_PWM_LED_FLASH:
+ case PM_PWM_LED_FLASH1:
+ switch (mode) {
+ case PM_PWM_CONF_PWM1:
+ case PM_PWM_CONF_PWM2:
+ case PM_PWM_CONF_PWM3:
+ conf = PM8058_FLASH_MODE_PWM;
+ break;
+ case PM_PWM_CONF_DTEST1:
+ conf = PM8058_FLASH_MODE_DTEST1;
+ break;
+ case PM_PWM_CONF_DTEST2:
+ conf = PM8058_FLASH_MODE_DTEST2;
+ break;
+ default:
+ conf = PM8058_FLASH_MODE_NONE;
+ break;
+ }
+ conf |= (max_current / 20) << PM8058_FLASH_CURRENT_SHIFT;
+ id -= PM_PWM_LED_KPD;
+ rc = pm8058_write(pwm->chip->pm_chip,
+ SSBI_REG_ADDR_FLASH(id), &conf, 1);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(pm8058_pwm_config_led);
+
+int pm8058_pwm_set_dtest(struct pwm_device *pwm, int enable)
+{
+ int rc;
+ u8 reg;
+
+ if (pwm == NULL || IS_ERR(pwm))
+ return -EINVAL;
+ if (pwm->chip == NULL)
+ return -ENODEV;
+
+ if (!pwm->in_use)
+ rc = -EINVAL;
+ else {
+ reg = pwm->pwm_id & PM8058_PWM_DTEST_BANK_MASK;
+ if (enable)
+ /* Only Test 1 available */
+ reg |= (1 << PM8058_PWM_DTEST_SHIFT) &
+ PM8058_PWM_DTEST_MASK;
+ rc = pm8058_write(pwm->chip->pm_chip, SSBI_REG_ADDR_LPG_TEST,
+ ®, 1);
+ if (rc)
+ pr_err("%s: pm8058_write(DTEST=0x%x): rc=%d\n",
+ __func__, reg, rc);
+
+ }
+ return rc;
+}
+EXPORT_SYMBOL(pm8058_pwm_set_dtest);
+
+static int __devinit pmic8058_pwm_probe(struct platform_device *pdev)
+{
+ struct pm8058_chip *pm_chip;
+ struct pm8058_pwm_chip *chip;
+ int i;
+
+ pm_chip = dev_get_drvdata(pdev->dev.parent);
+ if (pm_chip == NULL) {
+ pr_err("%s: no parent data passed in.\n", __func__);
+ return -EFAULT;
+ }
+
+ chip = kzalloc(sizeof *chip, GFP_KERNEL);
+ if (chip == NULL) {
+ pr_err("%s: kzalloc() failed.\n", __func__);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < PM8058_PWM_CHANNELS; i++) {
+ chip->pwm_dev[i].pwm_id = i;
+ chip->pwm_dev[i].chip = chip;
+ }
+
+ mutex_init(&chip->pwm_mutex);
+
+ chip->pdata = pdev->dev.platform_data;
+ chip->pm_chip = pm_chip;
+ pwm_chip = chip;
+ platform_set_drvdata(pdev, chip);
+
+ pr_notice("%s: OK\n", __func__);
+ return 0;
+}
+
+static int __devexit pmic8058_pwm_remove(struct platform_device *pdev)
+{
+ struct pm8058_pwm_chip *chip = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(chip);
+ return 0;
+}
+
+static struct platform_driver pmic8058_pwm_driver = {
+ .probe = pmic8058_pwm_probe,
+ .remove = __devexit_p(pmic8058_pwm_remove),
+ .driver = {
+ .name = "pm8058-pwm",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init pm8058_pwm_init(void)
+{
+ return platform_driver_register(&pmic8058_pwm_driver);
+}
+
+static void __exit pm8058_pwm_exit(void)
+{
+ platform_driver_unregister(&pmic8058_pwm_driver);
+}
+
+subsys_initcall(pm8058_pwm_init);
+module_exit(pm8058_pwm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC8058 PWM driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:pmic8058_pwm");
diff --git a/drivers/misc/pmic8058-upl.c b/drivers/misc/pmic8058-upl.c
new file mode 100644
index 0000000..ae0abd8
--- /dev/null
+++ b/drivers/misc/pmic8058-upl.c
@@ -0,0 +1,363 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * Qualcomm PMIC8058 UPL driver
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/mfd/pmic8058.h>
+#include <linux/pmic8058-upl.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+
+/* PMIC8058 UPL registers */
+#define SSBI_REG_UPL_CTRL 0x17B
+#define SSBI_REG_UPL_TRUTHTABLE1 0x17C
+#define SSBI_REG_UPL_TRUTHTABLE2 0x17D
+
+struct pm8058_upl_device {
+ struct mutex upl_mutex;
+ struct pm8058_chip *pm_chip;
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *dent;
+#endif
+};
+static struct pm8058_upl_device *upl_dev;
+
+/* APIs */
+
+/*
+ * pm8058_upl_request - request a handle to access UPL device
+ */
+struct pm8058_upl_device *pm8058_upl_request(void)
+{
+ return upl_dev;
+}
+EXPORT_SYMBOL(pm8058_upl_request);
+
+/*
+ * pm8058_upl_read_truthtable - read value currently stored in UPL truth table
+ *
+ * @upldev: the UPL device
+ * @truthtable: value read from UPL truth table
+ */
+int pm8058_upl_read_truthtable(struct pm8058_upl_device *upldev,
+ u16 *truthtable)
+{
+ int rc = 0;
+ u8 table[2];
+
+ if (upldev == NULL || IS_ERR(upldev))
+ return -EINVAL;
+ if (upldev->pm_chip == NULL)
+ return -ENODEV;
+
+ mutex_lock(&upldev->upl_mutex);
+
+ rc = pm8058_read(upldev->pm_chip, SSBI_REG_UPL_TRUTHTABLE1,
+ &(table[0]), 1);
+ if (rc) {
+ pr_err("%s: FAIL pm8058_read(0x%X)=0x%02X: rc=%d\n",
+ __func__, SSBI_REG_UPL_TRUTHTABLE1, table[0], rc);
+ goto upl_read_done;
+ }
+
+ rc = pm8058_read(upldev->pm_chip, SSBI_REG_UPL_TRUTHTABLE2,
+ &(table[1]), 1);
+ if (rc)
+ pr_err("%s: FAIL pm8058_read(0x%X)=0x%02X: rc=%d\n",
+ __func__, SSBI_REG_UPL_TRUTHTABLE2, table[1], rc);
+upl_read_done:
+ mutex_unlock(&upldev->upl_mutex);
+ *truthtable = (((u16)table[1]) << 8) | table[0];
+ return rc;
+}
+EXPORT_SYMBOL(pm8058_upl_read_truthtable);
+
+/*
+ * pm8058_upl_writes_truthtable - write value into UPL truth table
+ *
+ * @upldev: the UPL device
+ * @truthtable: value written to UPL truth table
+ *
+ * Each bit in parameter "truthtable" corresponds to the UPL output for a given
+ * set of input pin values. For example, if the input pins have the following
+ * values: A=1, B=1, C=1, D=0, then the UPL would output the value of bit 14
+ * (0b1110) in parameter "truthtable".
+ */
+int pm8058_upl_write_truthtable(struct pm8058_upl_device *upldev,
+ u16 truthtable)
+{
+ int rc = 0;
+ u8 table[2];
+
+ if (upldev == NULL || IS_ERR(upldev))
+ return -EINVAL;
+ if (upldev->pm_chip == NULL)
+ return -ENODEV;
+
+ table[0] = truthtable & 0xFF;
+ table[1] = (truthtable >> 8) & 0xFF;
+
+ mutex_lock(&upldev->upl_mutex);
+
+ rc = pm8058_write(upldev->pm_chip, SSBI_REG_UPL_TRUTHTABLE1,
+ &(table[0]), 1);
+ if (rc) {
+ pr_err("%s: FAIL pm8058_write(0x%X)=0x%04X: rc=%d\n",
+ __func__, SSBI_REG_UPL_TRUTHTABLE1, table[0], rc);
+ goto upl_write_done;
+ }
+
+ rc = pm8058_write(upldev->pm_chip, SSBI_REG_UPL_TRUTHTABLE2,
+ &(table[1]), 1);
+ if (rc)
+ pr_err("%s: FAIL pm8058_write(0x%X)=0x%04X: rc=%d\n",
+ __func__, SSBI_REG_UPL_TRUTHTABLE2, table[1], rc);
+upl_write_done:
+ mutex_unlock(&upldev->upl_mutex);
+ return rc;
+}
+EXPORT_SYMBOL(pm8058_upl_write_truthtable);
+
+/*
+ * pm8058_upl_config - configure UPL I/O settings and UPL enable/disable
+ *
+ * @upldev: the UPL device
+ * @mask: setting mask to configure
+ * @flags: setting flags
+ */
+int pm8058_upl_config(struct pm8058_upl_device *upldev, u32 mask, u32 flags)
+{
+ int rc;
+ u8 upl_ctrl, m, f;
+
+ if (upldev == NULL || IS_ERR(upldev))
+ return -EINVAL;
+ if (upldev->pm_chip == NULL)
+ return -ENODEV;
+
+ mutex_lock(&upldev->upl_mutex);
+
+ rc = pm8058_read(upldev->pm_chip, SSBI_REG_UPL_CTRL, &upl_ctrl, 1);
+ if (rc) {
+ pr_err("%s: FAIL pm8058_read(0x%X)=0x%02X: rc=%d\n",
+ __func__, SSBI_REG_UPL_CTRL, upl_ctrl, rc);
+ goto upl_config_done;
+ }
+
+ m = mask & 0x00ff;
+ f = flags & 0x00ff;
+ upl_ctrl &= ~m;
+ upl_ctrl |= m & f;
+
+ rc = pm8058_write(upldev->pm_chip, SSBI_REG_UPL_CTRL, &upl_ctrl, 1);
+ if (rc)
+ pr_err("%s: FAIL pm8058_write(0x%X)=0x%02X: rc=%d\n",
+ __func__, SSBI_REG_UPL_CTRL, upl_ctrl, rc);
+upl_config_done:
+ mutex_unlock(&upldev->upl_mutex);
+ return rc;
+}
+EXPORT_SYMBOL(pm8058_upl_config);
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int truthtable_set(void *data, u64 val)
+{
+ int rc;
+
+ rc = pm8058_upl_write_truthtable(data, val);
+ if (rc)
+ pr_err("%s: pm8058_upl_write_truthtable: rc=%d, "
+ "truthtable=0x%llX\n", __func__, rc, val);
+ return rc;
+}
+
+static int truthtable_get(void *data, u64 *val)
+{
+ int rc;
+ u16 truthtable;
+
+ rc = pm8058_upl_read_truthtable(data, &truthtable);
+ if (rc)
+ pr_err("%s: pm8058_upl_read_truthtable: rc=%d, "
+ "truthtable=0x%X\n", __func__, rc, truthtable);
+ if (val)
+ *val = truthtable;
+
+ return rc;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(upl_truthtable_fops, truthtable_get,
+ truthtable_set, "0x%04llX\n");
+
+/* enter values as 0xMMMMFFFF where MMMM is the mask and FFFF is the flags */
+static int control_set(void *data, u64 val)
+{
+ u8 mask, flags;
+ int rc;
+
+ flags = val & 0xFFFF;
+ mask = (val >> 16) & 0xFFFF;
+
+ rc = pm8058_upl_config(data, mask, flags);
+ if (rc)
+ pr_err("%s: pm8058_upl_config: rc=%d, mask = 0x%X, "
+ "flags = 0x%X\n", __func__, rc, mask, flags);
+ return rc;
+}
+
+static int control_get(void *data, u64 *val)
+{
+ struct pm8058_upl_device *upldev;
+ int rc = 0;
+ u8 ctrl;
+
+ upldev = data;
+
+ mutex_lock(&upldev->upl_mutex);
+
+ rc = pm8058_read(upldev->pm_chip, SSBI_REG_UPL_CTRL, &ctrl, 1);
+ if (rc)
+ pr_err("%s: FAIL pm8058_read(): rc=%d (ctrl=0x%02X)\n",
+ __func__, rc, ctrl);
+
+ mutex_unlock(&upldev->upl_mutex);
+
+ *val = ctrl;
+
+ return rc;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(upl_control_fops, control_get,
+ control_set, "0x%02llX\n");
+
+static int pm8058_upl_debug_init(struct pm8058_upl_device *upldev)
+{
+ struct dentry *dent;
+ struct dentry *temp;
+
+ dent = debugfs_create_dir("pm8058-upl", NULL);
+ if (dent == NULL || IS_ERR(dent)) {
+ pr_err("%s: ERR debugfs_create_dir: dent=0x%X\n",
+ __func__, (unsigned)dent);
+ return -ENOMEM;
+ }
+
+ temp = debugfs_create_file("truthtable", S_IRUSR | S_IWUSR, dent,
+ upldev, &upl_truthtable_fops);
+ if (temp == NULL || IS_ERR(temp)) {
+ pr_err("%s: ERR debugfs_create_file: dent=0x%X\n",
+ __func__, (unsigned)dent);
+ goto debug_error;
+ }
+
+ temp = debugfs_create_file("control", S_IRUSR | S_IWUSR, dent,
+ upldev, &upl_control_fops);
+ if (temp == NULL || IS_ERR(temp)) {
+ pr_err("%s: ERR debugfs_create_file: dent=0x%X\n",
+ __func__, (unsigned)dent);
+ goto debug_error;
+ }
+
+ upldev->dent = dent;
+ return 0;
+
+debug_error:
+ debugfs_remove_recursive(dent);
+ return -ENOMEM;
+}
+
+static int __devexit pm8058_upl_debug_remove(struct pm8058_upl_device *upldev)
+{
+ debugfs_remove_recursive(upldev->dent);
+ return 0;
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
+static int __devinit pmic8058_upl_probe(struct platform_device *pdev)
+{
+ struct pm8058_chip *pm_chip;
+ struct pm8058_upl_device *upldev;
+
+ pm_chip = dev_get_drvdata(pdev->dev.parent);
+ if (pm_chip == NULL) {
+ pr_err("%s: no parent data passed in.\n", __func__);
+ return -EFAULT;
+ }
+
+ upldev = kzalloc(sizeof *upldev, GFP_KERNEL);
+ if (upldev == NULL) {
+ pr_err("%s: kzalloc() failed.\n", __func__);
+ return -ENOMEM;
+ }
+
+ mutex_init(&upldev->upl_mutex);
+
+ upldev->pm_chip = pm_chip;
+ upl_dev = upldev;
+ platform_set_drvdata(pdev, upldev);
+
+#if defined(CONFIG_DEBUG_FS)
+ pm8058_upl_debug_init(upl_dev);
+#endif
+ pr_notice("%s: OK\n", __func__);
+ return 0;
+}
+
+static int __devexit pmic8058_upl_remove(struct platform_device *pdev)
+{
+ struct pm8058_upl_device *upldev = platform_get_drvdata(pdev);
+
+#if defined(CONFIG_DEBUG_FS)
+ pm8058_upl_debug_remove(upldev);
+#endif
+
+ platform_set_drvdata(pdev, upldev->pm_chip);
+ kfree(upldev);
+ pr_notice("%s: OK\n", __func__);
+
+ return 0;
+}
+
+static struct platform_driver pmic8058_upl_driver = {
+ .probe = pmic8058_upl_probe,
+ .remove = __devexit_p(pmic8058_upl_remove),
+ .driver = {
+ .name = "pm8058-upl",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init pm8058_upl_init(void)
+{
+ return platform_driver_register(&pmic8058_upl_driver);
+}
+
+static void __exit pm8058_upl_exit(void)
+{
+ platform_driver_unregister(&pmic8058_upl_driver);
+}
+
+module_init(pm8058_upl_init);
+module_exit(pm8058_upl_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC8058 UPL driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:pmic8058-upl");
diff --git a/drivers/misc/pmic8058-vibrator.c b/drivers/misc/pmic8058-vibrator.c
new file mode 100644
index 0000000..1b15b18
--- /dev/null
+++ b/drivers/misc/pmic8058-vibrator.c
@@ -0,0 +1,307 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/pmic8058-vibrator.h>
+#include <linux/mfd/pmic8058.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include "../staging/android/timed_output.h"
+
+#define VIB_DRV 0x4A
+
+#define VIB_DRV_SEL_MASK 0xf8
+#define VIB_DRV_SEL_SHIFT 0x03
+#define VIB_DRV_EN_MANUAL_MASK 0xfc
+
+#define VIB_MAX_LEVEL_mV 3100
+#define VIB_MIN_LEVEL_mV 1200
+
+struct pmic8058_vib {
+ struct hrtimer vib_timer;
+ struct timed_output_dev timed_dev;
+ spinlock_t lock;
+ struct work_struct work;
+
+ struct device *dev;
+ struct pmic8058_vibrator_pdata *pdata;
+ int state;
+ int level;
+ u8 reg_vib_drv;
+
+ struct pm8058_chip *pm_chip;
+};
+
+/* REVISIT: just for debugging, will be removed in final working version */
+static void __dump_vib_regs(struct pmic8058_vib *vib, char *msg)
+{
+ u8 temp;
+
+ dev_dbg(vib->dev, "%s\n", msg);
+
+ pm8058_read(vib->pm_chip, VIB_DRV, &temp, 1);
+ dev_dbg(vib->dev, "VIB_DRV - %X\n", temp);
+}
+
+static int pmic8058_vib_read_u8(struct pmic8058_vib *vib,
+ u8 *data, u16 reg)
+{
+ int rc;
+
+ rc = pm8058_read(vib->pm_chip, reg, data, 1);
+ if (rc < 0)
+ dev_warn(vib->dev, "Error reading pmic8058: %X - ret %X\n",
+ reg, rc);
+
+ return rc;
+}
+
+static int pmic8058_vib_write_u8(struct pmic8058_vib *vib,
+ u8 data, u16 reg)
+{
+ int rc;
+
+ rc = pm8058_write(vib->pm_chip, reg, &data, 1);
+ if (rc < 0)
+ dev_warn(vib->dev, "Error writing pmic8058: %X - ret %X\n",
+ reg, rc);
+ return rc;
+}
+
+static int pmic8058_vib_set(struct pmic8058_vib *vib, int on)
+{
+ int rc;
+ u8 val;
+
+ if (on) {
+ rc = pm_runtime_resume(vib->dev);
+ if (rc < 0)
+ dev_dbg(vib->dev, "pm_runtime_resume failed\n");
+
+ val = vib->reg_vib_drv;
+ val |= ((vib->level << VIB_DRV_SEL_SHIFT) & VIB_DRV_SEL_MASK);
+ rc = pmic8058_vib_write_u8(vib, val, VIB_DRV);
+ if (rc < 0)
+ return rc;
+ vib->reg_vib_drv = val;
+ } else {
+ val = vib->reg_vib_drv;
+ val &= ~VIB_DRV_SEL_MASK;
+ rc = pmic8058_vib_write_u8(vib, val, VIB_DRV);
+ if (rc < 0)
+ return rc;
+ vib->reg_vib_drv = val;
+
+ rc = pm_runtime_suspend(vib->dev);
+ if (rc < 0)
+ dev_dbg(vib->dev, "pm_runtime_suspend failed\n");
+ }
+ __dump_vib_regs(vib, "vib_set_end");
+
+ return rc;
+}
+
+static void pmic8058_vib_enable(struct timed_output_dev *dev, int value)
+{
+ struct pmic8058_vib *vib = container_of(dev, struct pmic8058_vib,
+ timed_dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vib->lock, flags);
+ hrtimer_cancel(&vib->vib_timer);
+
+ if (value == 0)
+ vib->state = 0;
+ else {
+ value = (value > vib->pdata->max_timeout_ms ?
+ vib->pdata->max_timeout_ms : value);
+ vib->state = 1;
+ hrtimer_start(&vib->vib_timer,
+ ktime_set(value / 1000, (value % 1000) * 1000000),
+ HRTIMER_MODE_REL);
+ }
+ spin_unlock_irqrestore(&vib->lock, flags);
+ schedule_work(&vib->work);
+}
+
+static void pmic8058_vib_update(struct work_struct *work)
+{
+ struct pmic8058_vib *vib = container_of(work, struct pmic8058_vib,
+ work);
+
+ pmic8058_vib_set(vib, vib->state);
+}
+
+static int pmic8058_vib_get_time(struct timed_output_dev *dev)
+{
+ struct pmic8058_vib *vib = container_of(dev, struct pmic8058_vib,
+ timed_dev);
+
+ if (hrtimer_active(&vib->vib_timer)) {
+ ktime_t r = hrtimer_get_remaining(&vib->vib_timer);
+ return (int) ktime_to_us(r);
+ } else
+ return 0;
+}
+
+static enum hrtimer_restart pmic8058_vib_timer_func(struct hrtimer *timer)
+{
+ struct pmic8058_vib *vib = container_of(timer, struct pmic8058_vib,
+ vib_timer);
+ vib->state = 0;
+ schedule_work(&vib->work);
+ return HRTIMER_NORESTART;
+}
+
+#ifdef CONFIG_PM
+static int pmic8058_vib_suspend(struct device *dev)
+{
+ struct pmic8058_vib *vib = dev_get_drvdata(dev);
+
+ hrtimer_cancel(&vib->vib_timer);
+ cancel_work_sync(&vib->work);
+ /* turn-off vibrator */
+ pmic8058_vib_set(vib, 0);
+ return 0;
+}
+
+static struct dev_pm_ops pmic8058_vib_pm_ops = {
+ .suspend = pmic8058_vib_suspend,
+};
+#endif
+
+static int __devinit pmic8058_vib_probe(struct platform_device *pdev)
+
+{
+ struct pmic8058_vibrator_pdata *pdata = pdev->dev.platform_data;
+ struct pmic8058_vib *vib;
+ u8 val;
+ int rc;
+
+ struct pm8058_chip *pm_chip;
+
+ pm_chip = dev_get_drvdata(pdev->dev.parent);
+ if (pm_chip == NULL) {
+ dev_err(&pdev->dev, "no parent data passed in\n");
+ return -EFAULT;
+ }
+
+ if (!pdata)
+ return -EINVAL;
+
+ if (pdata->level_mV < VIB_MIN_LEVEL_mV ||
+ pdata->level_mV > VIB_MAX_LEVEL_mV)
+ return -EINVAL;
+
+ vib = kzalloc(sizeof(*vib), GFP_KERNEL);
+ if (!vib)
+ return -ENOMEM;
+
+ /* Enable runtime PM ops, start in ACTIVE mode */
+ rc = pm_runtime_set_active(&pdev->dev);
+ if (rc < 0)
+ dev_dbg(&pdev->dev, "unable to set runtime pm state\n");
+ pm_runtime_enable(&pdev->dev);
+
+ vib->pm_chip = pm_chip;
+ vib->pdata = pdata;
+ vib->level = pdata->level_mV / 100;
+ vib->dev = &pdev->dev;
+
+ spin_lock_init(&vib->lock);
+ INIT_WORK(&vib->work, pmic8058_vib_update);
+
+ hrtimer_init(&vib->vib_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ vib->vib_timer.function = pmic8058_vib_timer_func;
+
+ vib->timed_dev.name = "vibrator";
+ vib->timed_dev.get_time = pmic8058_vib_get_time;
+ vib->timed_dev.enable = pmic8058_vib_enable;
+
+ __dump_vib_regs(vib, "boot_vib_default");
+
+ /* operate in manual mode */
+ rc = pmic8058_vib_read_u8(vib, &val, VIB_DRV);
+ if (rc < 0)
+ goto err_read_vib;
+ val &= ~VIB_DRV_EN_MANUAL_MASK;
+ rc = pmic8058_vib_write_u8(vib, val, VIB_DRV);
+ if (rc < 0)
+ goto err_read_vib;
+
+ vib->reg_vib_drv = val;
+
+ rc = timed_output_dev_register(&vib->timed_dev);
+ if (rc < 0)
+ goto err_read_vib;
+
+ pmic8058_vib_enable(&vib->timed_dev, pdata->initial_vibrate_ms);
+
+ platform_set_drvdata(pdev, vib);
+
+ pm_runtime_set_suspended(&pdev->dev);
+ return 0;
+
+err_read_vib:
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ kfree(vib);
+ return rc;
+}
+
+static int __devexit pmic8058_vib_remove(struct platform_device *pdev)
+{
+ struct pmic8058_vib *vib = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+ cancel_work_sync(&vib->work);
+ hrtimer_cancel(&vib->vib_timer);
+ timed_output_dev_unregister(&vib->timed_dev);
+ kfree(vib);
+
+ return 0;
+}
+
+static struct platform_driver pmic8058_vib_driver = {
+ .probe = pmic8058_vib_probe,
+ .remove = __devexit_p(pmic8058_vib_remove),
+ .driver = {
+ .name = "pm8058-vib",
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &pmic8058_vib_pm_ops,
+#endif
+ },
+};
+
+static int __init pmic8058_vib_init(void)
+{
+ return platform_driver_register(&pmic8058_vib_driver);
+}
+module_init(pmic8058_vib_init);
+
+static void __exit pmic8058_vib_exit(void)
+{
+ platform_driver_unregister(&pmic8058_vib_driver);
+}
+module_exit(pmic8058_vib_exit);
+
+MODULE_ALIAS("platform:pmic8058_vib");
+MODULE_DESCRIPTION("PMIC8058 vibrator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/pmic8058-xoadc.c b/drivers/misc/pmic8058-xoadc.c
new file mode 100644
index 0000000..d2d8cba
--- /dev/null
+++ b/drivers/misc/pmic8058-xoadc.c
@@ -0,0 +1,770 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/msm_adc.h>
+#include <linux/pmic8058-xoadc.h>
+#include <linux/mfd/pmic8058.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/ratelimit.h>
+#include <linux/delay.h>
+
+#include <mach/mpp.h>
+#include <mach/msm_xo.h>
+
+#define ADC_DRIVER_NAME "pm8058-xoadc"
+
+#define MAX_QUEUE_LENGTH 0X15
+#define MAX_CHANNEL_PROPERTIES_QUEUE 0X7
+#define MAX_QUEUE_SLOT 0x1
+
+/* User Processor */
+#define ADC_ARB_USRP_CNTRL 0x197
+ #define ADC_ARB_USRP_CNTRL_EN_ARB BIT(0)
+ #define ADC_ARB_USRP_CNTRL_RSV1 BIT(1)
+ #define ADC_ARB_USRP_CNTRL_RSV2 BIT(2)
+ #define ADC_ARB_USRP_CNTRL_RSV3 BIT(3)
+ #define ADC_ARB_USRP_CNTRL_RSV4 BIT(4)
+ #define ADC_ARB_USRP_CNTRL_RSV5 BIT(5)
+ #define ADC_ARB_USRP_CNTRL_EOC BIT(6)
+ #define ADC_ARB_USRP_CNTRL_REQ BIT(7)
+
+#define ADC_ARB_USRP_AMUX_CNTRL 0x198
+#define ADC_ARB_USRP_ANA_PARAM 0x199
+#define ADC_ARB_USRP_DIG_PARAM 0x19A
+#define ADC_ARB_USRP_RSV 0x19B
+
+#define ADC_ARB_USRP_DATA0 0x19D
+#define ADC_ARB_USRP_DATA1 0x19C
+
+struct pmic8058_adc {
+ struct xoadc_platform_data *pdata;
+ struct pm8058_chip *pm_chip;
+ struct adc_properties *adc_prop;
+ struct xoadc_conv_state conv[2];
+ int xoadc_queue_count;
+ int adc_irq;
+ struct linear_graph *adc_graph;
+ struct xoadc_conv_state *conv_slot_request;
+ struct xoadc_conv_state *conv_queue_list;
+ struct adc_conv_slot conv_queue_elements[MAX_QUEUE_LENGTH];
+ int xoadc_num;
+ struct msm_xo_voter *adc_voter;
+};
+
+static struct pmic8058_adc *pmic_adc[XOADC_PMIC_0 + 1];
+
+static bool xoadc_initialized, xoadc_calib_first_adc;
+
+DEFINE_RATELIMIT_STATE(pm8058_xoadc_msg_ratelimit,
+ DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
+
+static inline int pm8058_xoadc_can_print(void)
+{
+ return __ratelimit(&pm8058_xoadc_msg_ratelimit);
+}
+
+int32_t pm8058_xoadc_registered(void)
+{
+ return xoadc_initialized;
+}
+EXPORT_SYMBOL(pm8058_xoadc_registered);
+
+void pm8058_xoadc_restore_slot(uint32_t adc_instance,
+ struct adc_conv_slot *slot)
+{
+ struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance];
+ struct xoadc_conv_state *slot_state = adc_pmic->conv_slot_request;
+
+ mutex_lock(&slot_state->list_lock);
+ list_add(&slot->list, &slot_state->slots);
+ mutex_unlock(&slot_state->list_lock);
+}
+EXPORT_SYMBOL(pm8058_xoadc_restore_slot);
+
+void pm8058_xoadc_slot_request(uint32_t adc_instance,
+ struct adc_conv_slot **slot)
+{
+ struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance];
+ struct xoadc_conv_state *slot_state = adc_pmic->conv_slot_request;
+
+ mutex_lock(&slot_state->list_lock);
+
+ if (!list_empty(&slot_state->slots)) {
+ *slot = list_first_entry(&slot_state->slots,
+ struct adc_conv_slot, list);
+ list_del(&(*slot)->list);
+ } else
+ *slot = NULL;
+
+ mutex_unlock(&slot_state->list_lock);
+}
+EXPORT_SYMBOL(pm8058_xoadc_slot_request);
+
+static int32_t pm8058_xoadc_arb_cntrl(uint32_t arb_cntrl,
+ uint32_t adc_instance)
+{
+ struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance];
+ int i, rc;
+ u8 data_arb_cntrl;
+
+ data_arb_cntrl = ADC_ARB_USRP_CNTRL_EOC |
+ ADC_ARB_USRP_CNTRL_RSV5 |
+ ADC_ARB_USRP_CNTRL_RSV4;
+
+ if (arb_cntrl) {
+ data_arb_cntrl |= ADC_ARB_USRP_CNTRL_EN_ARB;
+ msm_xo_mode_vote(adc_pmic->adc_voter, MSM_XO_MODE_ON);
+ adc_pmic->pdata->xoadc_mpp_config();
+ }
+
+ /* Write twice to the CNTRL register for the arbiter settings
+ to take into effect */
+ for (i = 0; i < 2; i++) {
+ rc = pm8058_write(adc_pmic->pm_chip, ADC_ARB_USRP_CNTRL,
+ &data_arb_cntrl, 1);
+ if (rc < 0) {
+ pr_debug("%s: PM8058 write failed\n", __func__);
+ return rc;
+ }
+ }
+
+ if (!arb_cntrl)
+ msm_xo_mode_vote(adc_pmic->adc_voter, MSM_XO_MODE_OFF);
+
+ return 0;
+}
+
+static int32_t pm8058_xoadc_configure(uint32_t adc_instance,
+ struct adc_conv_slot *slot)
+{
+
+ struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance];
+ u8 data_arb_cntrl, data_amux_chan, data_arb_rsv, data_ana_param;
+ u8 data_dig_param, data_ana_param2;
+ int rc;
+
+ rc = pm8058_xoadc_arb_cntrl(1, adc_instance);
+ if (rc < 0) {
+ pr_debug("%s: Configuring ADC Arbiter"
+ "enable failed\n", __func__);
+ return rc;
+ }
+
+ switch (slot->chan_path) {
+
+ case CHAN_PATH_TYPE1:
+ data_amux_chan = CHANNEL_VCOIN << 4;
+ data_arb_rsv = 0x20;
+ slot->chan_properties.gain_numerator = 1;
+ slot->chan_properties.gain_denominator = 2;
+ slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+ break;
+
+ case CHAN_PATH_TYPE2:
+ data_amux_chan = CHANNEL_VBAT << 4;
+ data_arb_rsv = 0x20;
+ slot->chan_properties.gain_numerator = 1;
+ slot->chan_properties.gain_denominator = 3;
+ slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+ break;
+
+ case CHAN_PATH_TYPE3:
+ data_amux_chan = CHANNEL_VCHG << 4;
+ data_arb_rsv = 0x20;
+ slot->chan_properties.gain_numerator = 1;
+ slot->chan_properties.gain_denominator = 10;
+ slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+ break;
+
+ case CHAN_PATH_TYPE4:
+ data_amux_chan = CHANNEL_CHG_MONITOR << 4;
+ data_arb_rsv = 0x20;
+ slot->chan_properties.gain_numerator = 1;
+ slot->chan_properties.gain_denominator = 1;
+ slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+ break;
+
+ case CHAN_PATH_TYPE5:
+ data_amux_chan = CHANNEL_VPH_PWR << 4;
+ data_arb_rsv = 0x20;
+ slot->chan_properties.gain_numerator = 1;
+ slot->chan_properties.gain_denominator = 3;
+ slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+ break;
+
+ case CHAN_PATH_TYPE6:
+ data_amux_chan = CHANNEL_MPP5 << 4;
+ data_arb_rsv = 0x20;
+ slot->chan_properties.gain_numerator = 1;
+ slot->chan_properties.gain_denominator = 1;
+ slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+ break;
+
+ case CHAN_PATH_TYPE7:
+ data_amux_chan = CHANNEL_MPP6 << 4;
+ data_arb_rsv = 0x20;
+ slot->chan_properties.gain_numerator = 1;
+ slot->chan_properties.gain_denominator = 1;
+ slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+ break;
+
+ case CHAN_PATH_TYPE8:
+ data_amux_chan = CHANNEL_MPP7 << 4;
+ data_arb_rsv = 0x20;
+ slot->chan_properties.gain_numerator = 1;
+ slot->chan_properties.gain_denominator = 2;
+ slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+ break;
+
+ case CHAN_PATH_TYPE9:
+ data_amux_chan = CHANNEL_MPP8 << 4;
+ data_arb_rsv = 0x20;
+ slot->chan_properties.gain_numerator = 1;
+ slot->chan_properties.gain_denominator = 2;
+ slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+ break;
+
+ case CHAN_PATH_TYPE10:
+ data_amux_chan = CHANNEL_MPP9 << 4;
+ data_arb_rsv = 0x20;
+ slot->chan_properties.gain_numerator = 1;
+ slot->chan_properties.gain_denominator = 3;
+ slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+ break;
+
+ case CHAN_PATH_TYPE11:
+ data_amux_chan = CHANNEL_USB_VBUS << 4;
+ data_arb_rsv = 0x20;
+ slot->chan_properties.gain_numerator = 1;
+ slot->chan_properties.gain_denominator = 3;
+ slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+ break;
+
+ case CHAN_PATH_TYPE12:
+ data_amux_chan = CHANNEL_DIE_TEMP << 4;
+ data_arb_rsv = 0x20;
+ slot->chan_properties.gain_numerator = 1;
+ slot->chan_properties.gain_denominator = 1;
+ slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+ break;
+
+ case CHAN_PATH_TYPE13:
+ data_amux_chan = CHANNEL_125V << 4;
+ data_arb_rsv = 0x20;
+ slot->chan_properties.gain_numerator = 1;
+ slot->chan_properties.gain_denominator = 1;
+ slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+ break;
+
+ case CHAN_PATH_TYPE14:
+ data_amux_chan = CHANNEL_INTERNAL_2 << 4;
+ data_arb_rsv = 0x20;
+ slot->chan_properties.gain_numerator = 1;
+ slot->chan_properties.gain_denominator = 1;
+ slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+ break;
+
+ case CHAN_PATH_TYPE_NONE:
+ data_amux_chan = CHANNEL_MUXOFF << 4;
+ data_arb_rsv = 0x10;
+ slot->chan_properties.gain_numerator = 1;
+ slot->chan_properties.gain_denominator = 1;
+ slot->chan_properties.adc_graph = &adc_pmic->adc_graph[1];
+ break;
+
+ case CHAN_PATH_TYPE15:
+ data_amux_chan = CHANNEL_INTERNAL << 4;
+ data_arb_rsv = 0x20;
+ slot->chan_properties.gain_numerator = 1;
+ slot->chan_properties.gain_denominator = 1;
+ slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+ break;
+ }
+
+ rc = pm8058_write(adc_pmic->pm_chip,
+ ADC_ARB_USRP_AMUX_CNTRL, &data_amux_chan, 1);
+ if (rc < 0) {
+ pr_debug("%s: PM8058 write failed\n", __func__);
+ return rc;
+ }
+
+ rc = pm8058_write(adc_pmic->pm_chip,
+ ADC_ARB_USRP_RSV, &data_arb_rsv, 1);
+ if (rc < 0) {
+ pr_debug("%s: PM8058 write failed\n", __func__);
+ return rc;
+ }
+
+ /* Set default clock rate to 2.4 MHz XO ADC clock digital */
+ switch (slot->chan_adc_config) {
+
+ case ADC_CONFIG_TYPE1:
+ data_ana_param = 0xFE;
+ data_dig_param = 0x23;
+ data_ana_param2 = 0xFF;
+ /* AMUX register data to start the ADC conversion */
+ data_arb_cntrl = 0xF1;
+ break;
+
+ case ADC_CONFIG_TYPE2:
+ data_ana_param = 0xFE;
+ data_dig_param = 0x03;
+ data_ana_param2 = 0xFF;
+ /* AMUX register data to start the ADC conversion */
+ data_arb_cntrl = 0xF1;
+ break;
+ }
+
+ rc = pm8058_write(adc_pmic->pm_chip,
+ ADC_ARB_USRP_ANA_PARAM, &data_ana_param, 1);
+ if (rc < 0) {
+ pr_debug("%s: PM8058 write failed\n", __func__);
+ return rc;
+ }
+
+ rc = pm8058_write(adc_pmic->pm_chip,
+ ADC_ARB_USRP_DIG_PARAM, &data_dig_param, 1);
+ if (rc < 0) {
+ pr_debug("%s: PM8058 write failed\n", __func__);
+ return rc;
+ }
+
+ rc = pm8058_write(adc_pmic->pm_chip,
+ ADC_ARB_USRP_ANA_PARAM, &data_ana_param2, 1);
+ if (rc < 0) {
+ pr_debug("%s: PM8058 write failed\n", __func__);
+ return rc;
+ }
+
+ enable_irq(adc_pmic->adc_irq);
+
+ rc = pm8058_write(adc_pmic->pm_chip,
+ ADC_ARB_USRP_CNTRL, &data_arb_cntrl, 1);
+ if (rc < 0) {
+ pr_debug("%s: PM8058 write failed\n", __func__);
+ return rc;
+ }
+
+ return 0;
+}
+
+int32_t pm8058_xoadc_select_chan_and_start_conv(uint32_t adc_instance,
+ struct adc_conv_slot *slot)
+{
+ struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance];
+ struct xoadc_conv_state *slot_state = adc_pmic->conv_queue_list;
+
+ if (!xoadc_initialized)
+ return -ENODEV;
+
+ mutex_lock(&slot_state->list_lock);
+ list_add_tail(&slot->list, &slot_state->slots);
+ if (adc_pmic->xoadc_queue_count == 0) {
+ if (adc_pmic->pdata->xoadc_vreg_set != NULL)
+ adc_pmic->pdata->xoadc_vreg_set(1);
+ pm8058_xoadc_configure(adc_instance, slot);
+ }
+ adc_pmic->xoadc_queue_count++;
+ mutex_unlock(&slot_state->list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(pm8058_xoadc_select_chan_and_start_conv);
+
+static int32_t pm8058_xoadc_dequeue_slot_request(uint32_t adc_instance,
+ struct adc_conv_slot **slot)
+{
+ struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance];
+ struct xoadc_conv_state *slot_state = adc_pmic->conv_queue_list;
+ int rc = 0;
+
+ mutex_lock(&slot_state->list_lock);
+ if (adc_pmic->xoadc_queue_count > 0 &&
+ !list_empty(&slot_state->slots)) {
+ *slot = list_first_entry(&slot_state->slots,
+ struct adc_conv_slot, list);
+ list_del(&(*slot)->list);
+ } else
+ rc = -EINVAL;
+ mutex_unlock(&slot_state->list_lock);
+
+ if (rc < 0) {
+ if (pm8058_xoadc_can_print())
+ pr_err("Pmic 8058 xoadc spurious interrupt detected\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+int32_t pm8058_xoadc_read_adc_code(uint32_t adc_instance, int32_t *data)
+{
+ struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance];
+ struct xoadc_conv_state *slot_state = adc_pmic->conv_queue_list;
+ uint8_t rslt_lsb, rslt_msb;
+ struct adc_conv_slot *slot;
+ int32_t rc, max_ideal_adc_code = 1 << adc_pmic->adc_prop->bitresolution;
+
+ if (!xoadc_initialized)
+ return -ENODEV;
+
+ rc = pm8058_read(adc_pmic->pm_chip, ADC_ARB_USRP_DATA0, &rslt_lsb, 1);
+ if (rc < 0) {
+ pr_debug("%s: PM8058 read failed\n", __func__);
+ return rc;
+ }
+
+ rc = pm8058_read(adc_pmic->pm_chip, ADC_ARB_USRP_DATA1, &rslt_msb, 1);
+ if (rc < 0) {
+ pr_debug("%s: PM8058 read failed\n", __func__);
+ return rc;
+ }
+
+ *data = (rslt_msb << 8) | rslt_lsb;
+
+ /* Use the midpoint to determine underflow or overflow */
+ if (*data > max_ideal_adc_code + (max_ideal_adc_code >> 1))
+ *data |= ((1 << (8 * sizeof(*data) -
+ adc_pmic->adc_prop->bitresolution)) - 1) <<
+ adc_pmic->adc_prop->bitresolution;
+ /* Return if this is a calibration run since there
+ * is no need to check requests in the waiting queue */
+ if (xoadc_calib_first_adc)
+ return 0;
+
+ mutex_lock(&slot_state->list_lock);
+ adc_pmic->xoadc_queue_count--;
+ if (adc_pmic->xoadc_queue_count > 0) {
+ slot = list_first_entry(&slot_state->slots,
+ struct adc_conv_slot, list);
+ pm8058_xoadc_configure(adc_instance, slot);
+ }
+ mutex_unlock(&slot_state->list_lock);
+
+ mutex_lock(&slot_state->list_lock);
+ /* Default value for switching off the arbiter after reading
+ the ADC value. Bit 0 set to 0. */
+ if (adc_pmic->xoadc_queue_count == 0) {
+ rc = pm8058_xoadc_arb_cntrl(0, adc_instance);
+ if (rc < 0) {
+ pr_debug("%s: Configuring ADC Arbiter disable"
+ "failed\n", __func__);
+ return rc;
+ }
+ if (adc_pmic->pdata->xoadc_vreg_set != NULL)
+ adc_pmic->pdata->xoadc_vreg_set(0);
+ }
+ mutex_unlock(&slot_state->list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(pm8058_xoadc_read_adc_code);
+
+static irqreturn_t pm8058_xoadc(int irq, void *dev_id)
+{
+ struct pmic8058_adc *xoadc_8058 = dev_id;
+ struct adc_conv_slot *slot = NULL;
+ int rc;
+
+ disable_irq_nosync(xoadc_8058->adc_irq);
+
+ if (xoadc_calib_first_adc)
+ return IRQ_HANDLED;
+
+ rc = pm8058_xoadc_dequeue_slot_request(xoadc_8058->xoadc_num, &slot);
+
+ if (rc < 0)
+ return IRQ_NONE;
+
+ if (rc == 0)
+ msm_adc_conv_cb(slot, 0, NULL, 0);
+
+ return IRQ_HANDLED;
+}
+
+struct adc_properties *pm8058_xoadc_get_properties(uint32_t dev_instance)
+{
+ struct pmic8058_adc *xoadc_8058 = pmic_adc[dev_instance];
+
+ return xoadc_8058->adc_prop;
+}
+EXPORT_SYMBOL(pm8058_xoadc_get_properties);
+
+int32_t pm8058_xoadc_calib_device(uint32_t adc_instance)
+{
+ struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance];
+ struct adc_conv_slot *slot;
+ int rc, offset_xoadc, slope_xoadc, calib_read_1, calib_read_2;
+
+ if (adc_pmic->pdata->xoadc_vreg_set != NULL)
+ adc_pmic->pdata->xoadc_vreg_set(1);
+
+ pm8058_xoadc_slot_request(adc_instance, &slot);
+ if (slot) {
+ slot->chan_path = CHAN_PATH_TYPE13;
+ slot->chan_adc_config = ADC_CONFIG_TYPE2;
+ slot->chan_adc_calib = ADC_CONFIG_TYPE2;
+ xoadc_calib_first_adc = true;
+ rc = pm8058_xoadc_configure(adc_instance, slot);
+ if (rc) {
+ pr_err("pm8058_xoadc configure failed\n");
+ goto fail;
+ }
+ } else {
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ msleep(3);
+
+ rc = pm8058_xoadc_read_adc_code(adc_instance, &calib_read_1);
+ if (rc) {
+ pr_err("pm8058_xoadc read adc failed\n");
+ xoadc_calib_first_adc = false;
+ goto fail;
+ }
+ xoadc_calib_first_adc = false;
+
+ pm8058_xoadc_slot_request(adc_instance, &slot);
+ if (slot) {
+ slot->chan_path = CHAN_PATH_TYPE15;
+ slot->chan_adc_config = ADC_CONFIG_TYPE2;
+ slot->chan_adc_calib = ADC_CONFIG_TYPE2;
+ xoadc_calib_first_adc = true;
+ rc = pm8058_xoadc_configure(adc_instance, slot);
+ if (rc) {
+ pr_err("pm8058_xoadc configure failed\n");
+ goto fail;
+ }
+ } else {
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ msleep(3);
+
+ rc = pm8058_xoadc_read_adc_code(adc_instance, &calib_read_2);
+ if (rc) {
+ pr_err("pm8058_xoadc read adc failed\n");
+ xoadc_calib_first_adc = false;
+ goto fail;
+ }
+ xoadc_calib_first_adc = false;
+
+ pm8058_xoadc_restore_slot(adc_instance, slot);
+
+ slope_xoadc = (((calib_read_1 - calib_read_2) << 10)/
+ CHANNEL_ADC_625_MV);
+ offset_xoadc = calib_read_2 -
+ ((slope_xoadc * CHANNEL_ADC_625_MV) >> 10);
+
+ printk(KERN_INFO"pmic8058_xoadc:The offset for AMUX calibration"
+ "was %d\n", offset_xoadc);
+
+ adc_pmic->adc_graph[0].offset = offset_xoadc;
+ adc_pmic->adc_graph[0].dy = (calib_read_1 - calib_read_2);
+ adc_pmic->adc_graph[0].dx = CHANNEL_ADC_625_MV;
+
+ /* Retain ideal calibration settings for therm readings */
+ adc_pmic->adc_graph[1].offset = 0 ;
+ adc_pmic->adc_graph[1].dy = (1 << 15) - 1;
+ adc_pmic->adc_graph[1].dx = 2200;
+
+ if (adc_pmic->pdata->xoadc_vreg_set != NULL)
+ adc_pmic->pdata->xoadc_vreg_set(0);
+
+ return 0;
+fail:
+ if (adc_pmic->pdata->xoadc_vreg_set != NULL)
+ adc_pmic->pdata->xoadc_vreg_set(0);
+
+ return rc;
+}
+EXPORT_SYMBOL(pm8058_xoadc_calib_device);
+
+int32_t pm8058_xoadc_calibrate(uint32_t dev_instance,
+ struct adc_conv_slot *slot, int *calib_status)
+{
+ *calib_status = CALIB_NOT_REQUIRED;
+
+ return 0;
+}
+EXPORT_SYMBOL(pm8058_xoadc_calibrate);
+
+static int __devexit pm8058_xoadc_teardown(struct platform_device *pdev)
+{
+ struct pmic8058_adc *adc_pmic = platform_get_drvdata(pdev);
+
+ if (adc_pmic->pdata->xoadc_vreg_shutdown != NULL)
+ adc_pmic->pdata->xoadc_vreg_shutdown();
+
+ msm_xo_put(adc_pmic->adc_voter);
+ platform_set_drvdata(pdev, adc_pmic->pm_chip);
+ device_init_wakeup(&pdev->dev, 0);
+ kfree(adc_pmic);
+ xoadc_initialized = false;
+
+ return 0;
+}
+
+static int __devinit pm8058_xoadc_probe(struct platform_device *pdev)
+{
+ struct xoadc_platform_data *pdata = pdev->dev.platform_data;
+ struct pm8058_chip *pm_chip;
+ struct pmic8058_adc *adc_pmic;
+ int i, rc = 0;
+
+ pm_chip = dev_get_drvdata(pdev->dev.parent);
+ if (pm_chip == NULL) {
+ dev_err(&pdev->dev, "no parent data passed in\n");
+ return -EFAULT;
+ }
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data?\n");
+ return -EINVAL;
+ }
+
+ adc_pmic = kzalloc(sizeof(struct pmic8058_adc), GFP_KERNEL);
+ if (!adc_pmic) {
+ dev_err(&pdev->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ adc_pmic->pm_chip = pm_chip;
+ adc_pmic->adc_prop = pdata->xoadc_prop;
+ adc_pmic->xoadc_num = pdata->xoadc_num;
+ adc_pmic->xoadc_queue_count = 0;
+
+ platform_set_drvdata(pdev, adc_pmic);
+
+ if (adc_pmic->xoadc_num > XOADC_PMIC_0) {
+ dev_err(&pdev->dev, "ADC device not supported\n");
+ rc = -EINVAL;
+ goto err_cleanup;
+ }
+
+ adc_pmic->pdata = pdata;
+ adc_pmic->adc_graph = kzalloc(sizeof(struct linear_graph)
+ * MAX_CHANNEL_PROPERTIES_QUEUE, GFP_KERNEL);
+ if (!adc_pmic->adc_graph) {
+ dev_err(&pdev->dev, "Unable to allocate memory\n");
+ rc = -ENOMEM;
+ goto err_cleanup;
+ }
+
+ /* Will be replaced by individual channel calibration */
+ for (i = 0; i < MAX_CHANNEL_PROPERTIES_QUEUE; i++) {
+ adc_pmic->adc_graph[i].offset = 0 ;
+ adc_pmic->adc_graph[i].dy = (1 << 15) - 1;
+ adc_pmic->adc_graph[i].dx = 2200;
+ }
+
+ if (pdata->xoadc_mpp_config != NULL)
+ pdata->xoadc_mpp_config();
+
+ adc_pmic->conv_slot_request = &adc_pmic->conv[0];
+ adc_pmic->conv_slot_request->context =
+ &adc_pmic->conv_queue_elements[0];
+
+ mutex_init(&adc_pmic->conv_slot_request->list_lock);
+ INIT_LIST_HEAD(&adc_pmic->conv_slot_request->slots);
+
+ /* tie each slot and initwork them */
+ for (i = 0; i < MAX_QUEUE_LENGTH; i++) {
+ list_add(&adc_pmic->conv_slot_request->context[i].list,
+ &adc_pmic->conv_slot_request->slots);
+ INIT_WORK(&adc_pmic->conv_slot_request->context[i].work,
+ msm_adc_wq_work);
+ init_completion(&adc_pmic->conv_slot_request->context[i].comp);
+ adc_pmic->conv_slot_request->context[i].idx = i;
+ }
+
+ adc_pmic->conv_queue_list = &adc_pmic->conv[1];
+
+ mutex_init(&adc_pmic->conv_queue_list->list_lock);
+ INIT_LIST_HEAD(&adc_pmic->conv_queue_list->slots);
+
+ adc_pmic->adc_irq = platform_get_irq(pdev, 0);
+ if (adc_pmic->adc_irq < 0) {
+ rc = -ENXIO;
+ goto err_cleanup;
+ }
+
+ rc = request_threaded_irq(adc_pmic->adc_irq,
+ NULL, pm8058_xoadc,
+ IRQF_TRIGGER_RISING, "pm8058_adc_interrupt", adc_pmic);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to request adc irq\n");
+ goto err_cleanup;
+ }
+
+ disable_irq(adc_pmic->adc_irq);
+
+ device_init_wakeup(&pdev->dev, pdata->xoadc_wakeup);
+
+ if (adc_pmic->adc_voter == NULL) {
+ adc_pmic->adc_voter = msm_xo_get(MSM_XO_TCXO_D1,
+ "pmic8058_xoadc");
+ if (IS_ERR(adc_pmic->adc_voter)) {
+ dev_err(&pdev->dev, "Failed to get XO vote\n");
+ goto err_cleanup;
+ }
+ }
+
+ pmic_adc[adc_pmic->xoadc_num] = adc_pmic;
+
+ if (pdata->xoadc_vreg_setup != NULL)
+ pdata->xoadc_vreg_setup();
+
+ xoadc_initialized = true;
+ xoadc_calib_first_adc = false;
+
+ return 0;
+
+err_cleanup:
+ pm8058_xoadc_teardown(pdev);
+
+ return rc;
+}
+
+static struct platform_driver pm8058_xoadc_driver = {
+ .probe = pm8058_xoadc_probe,
+ .remove = __devexit_p(pm8058_xoadc_teardown),
+ .driver = {
+ .name = "pm8058-xoadc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init pm8058_xoadc_init(void)
+{
+ return platform_driver_register(&pm8058_xoadc_driver);
+}
+module_init(pm8058_xoadc_init);
+
+static void __exit pm8058_xoadc_exit(void)
+{
+ platform_driver_unregister(&pm8058_xoadc_driver);
+}
+module_exit(pm8058_xoadc_exit);
+
+MODULE_ALIAS("platform:pmic8058_xoadc");
+MODULE_DESCRIPTION("PMIC8058 XOADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/qfp_fuse.c b/drivers/misc/qfp_fuse.c
new file mode 100644
index 0000000..341e5b2
--- /dev/null
+++ b/drivers/misc/qfp_fuse.c
@@ -0,0 +1,410 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/delay.h>
+#include <linux/qfp_fuse.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/regulator/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+
+/*
+ * Time QFPROM requires to reliably burn a fuse.
+ */
+#define QFPROM_BLOW_TIMEOUT_US 10
+#define QFPROM_BLOW_TIMER_OFFSET 0x2038
+/*
+ * Denotes number of cycles required to blow the fuse.
+ */
+#define QFPROM_BLOW_TIMER_VALUE (QFPROM_BLOW_TIMEOUT_US * 83)
+
+#define QFPROM_BLOW_STATUS_OFFSET 0x204C
+#define QFPROM_BLOW_STATUS_BUSY 0x01
+#define QFPROM_BLOW_STATUS_ERROR 0x02
+
+#define QFP_FUSE_READY 0x01
+#define QFP_FUSE_OFF 0x00
+
+struct qfp_priv_t {
+ uint32_t base;
+ uint32_t end;
+ struct mutex lock;
+ struct regulator *fuse_vdd;
+ u8 state;
+};
+
+/* We need only one instance of this for the driver */
+static struct qfp_priv_t *qfp_priv;
+
+
+static int qfp_fuse_open(struct inode *inode, struct file *filp)
+{
+ if (qfp_priv == NULL)
+ return -ENODEV;
+
+ filp->private_data = qfp_priv;
+
+ return 0;
+}
+
+static int qfp_fuse_release(struct inode *inode, struct file *filp)
+{
+
+ filp->private_data = NULL;
+
+ return 0;
+}
+
+static inline int qfp_fuse_wait_for_fuse_blow(u32 *status)
+{
+ u32 timeout = QFPROM_BLOW_TIMEOUT_US;
+ /* wait for 400us before checking for the first time */
+ udelay(400);
+ do {
+ *status = readl_relaxed(
+ qfp_priv->base + QFPROM_BLOW_STATUS_OFFSET);
+
+ if (!(*status & QFPROM_BLOW_STATUS_BUSY))
+ return 0;
+
+ timeout--;
+ udelay(1);
+ } while (timeout);
+ pr_err("Timeout waiting for FUSE blow, status = %x\n", *status);
+ return -ETIMEDOUT;
+}
+
+static inline int qfp_fuse_enable_regulator(void)
+{
+ int err;
+ err = regulator_enable(qfp_priv->fuse_vdd);
+ if (err != 0)
+ pr_err("Error (%d) enabling regulator\n", err);
+ return err;
+}
+
+static inline int qfp_fuse_disable_regulator(void)
+{
+ int err;
+ err = regulator_disable(qfp_priv->fuse_vdd);
+ if (err != 0)
+ pr_err("Error (%d) disabling regulator\n", err);
+ return err;
+}
+
+static int qfp_fuse_write_word(u32 *addr, u32 data)
+{
+ u32 blow_status = 0;
+ u32 read_data;
+ int err;
+
+ /* Set QFPROM blow timer register */
+ writel_relaxed(QFPROM_BLOW_TIMER_VALUE,
+ qfp_priv->base + QFPROM_BLOW_TIMER_OFFSET);
+ mb();
+
+ /* Enable LVS0 regulator */
+ err = qfp_fuse_enable_regulator();
+ if (err != 0)
+ return err;
+
+ /*
+ * Wait for about 1ms. However msleep(1) can sleep for
+ * up to 20ms as per Documentation/timers/timers-howto.txt.
+ * Time is not a constraint here.
+ */
+
+ msleep(20);
+
+ /* Write data */
+ __raw_writel(data, addr);
+ mb();
+
+ /* blow_status = QFPROM_BLOW_STATUS_BUSY; */
+ err = qfp_fuse_wait_for_fuse_blow(&blow_status);
+ if (err) {
+ qfp_fuse_disable_regulator();
+ return err;
+ }
+
+ /* Check error status */
+ if (blow_status & QFPROM_BLOW_STATUS_ERROR) {
+ pr_err("Fuse blow status error: %d\n", blow_status);
+ qfp_fuse_disable_regulator();
+ return -EFAULT;
+ }
+
+ /* Disable regulator */
+ qfp_fuse_disable_regulator();
+ /*
+ * Wait for about 1ms. However msleep(1) can sleep for
+ * up to 20ms as per Documentation/timers/timers-howto.txt.
+ * Time is not a constraint here.
+ */
+ msleep(20);
+
+ /* Verify written data */
+ read_data = readl_relaxed(addr);
+ if (read_data != data) {
+ pr_err("Error: read/write data mismatch\n");
+ pr_err("Address = %p written data = %x read data = %x\n",
+ addr, data, read_data);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static long
+qfp_fuse_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int err = 0;
+ struct qfp_fuse_req req;
+ u32 *buf = NULL;
+ int i;
+
+ /* Verify user arguments. */
+ if (_IOC_TYPE(cmd) != QFP_FUSE_IOC_MAGIC)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case QFP_FUSE_IOC_READ:
+ if (arg == 0) {
+ pr_err("user space arg not supplied\n");
+ err = -EFAULT;
+ break;
+ }
+
+ if (copy_from_user(&req, (void __user *)arg, sizeof(req))) {
+ pr_err("Error copying req from user space\n");
+ err = -EFAULT;
+ break;
+ }
+
+ /* Check for limits */
+ if (!req.size) {
+ pr_err("Request size zero.\n");
+ err = -EFAULT;
+ break;
+ }
+
+ if (qfp_priv->base + req.offset + (req.size - 1) * 4 >
+ qfp_priv->end) {
+ pr_err("Req size exceeds QFPROM addr space\n");
+ err = -EFAULT;
+ break;
+ }
+
+ /* Allocate memory for buffer */
+ buf = kzalloc(req.size * 4, GFP_KERNEL);
+ if (buf == NULL) {
+ pr_alert("No memory for data\n");
+ err = -ENOMEM;
+ break;
+ }
+
+ if (mutex_lock_interruptible(&qfp_priv->lock)) {
+ err = -ERESTARTSYS;
+ break;
+ }
+
+ /* Read data */
+ for (i = 0; i < req.size; i++)
+ buf[i] = readl_relaxed(
+ ((u32 *) (qfp_priv->base + req.offset)) + i);
+
+ if (copy_to_user((void __user *)req.data, buf, 4*(req.size))) {
+ pr_err("Error copying to user space\n");
+ err = -EFAULT;
+ }
+
+ mutex_unlock(&qfp_priv->lock);
+ break;
+
+ case QFP_FUSE_IOC_WRITE:
+ if (arg == 0) {
+ pr_err("user space arg not supplied\n");
+ err = -EFAULT;
+ break;
+ }
+
+ if (copy_from_user(&req, (void __user *)arg, sizeof(req))) {
+ pr_err("Error copying req from user space\n");
+ err = -EFAULT;
+ break;
+ }
+ /* Check for limits */
+ if (!req.size) {
+ pr_err("Request size zero.\n");
+ err = -EFAULT;
+ break;
+ }
+ if (qfp_priv->base + req.offset + (req.size - 1) * 4 >
+ qfp_priv->end) {
+ pr_err("Req size exceeds QFPROM space\n");
+ err = -EFAULT;
+ break;
+ }
+
+ /* Allocate memory for buffer */
+ buf = kzalloc(4 * (req.size), GFP_KERNEL);
+ if (buf == NULL) {
+ pr_alert("No memory for data\n");
+ err = -ENOMEM;
+ break;
+ }
+
+ /* Copy user data to local buffer */
+ if (copy_from_user(buf, (void __user *)req.data,
+ 4 * (req.size))) {
+ pr_err("Error copying data from user space\n");
+ err = -EFAULT;
+ break;
+ }
+
+ if (mutex_lock_interruptible(&qfp_priv->lock)) {
+ err = -ERESTARTSYS;
+ break;
+ }
+
+ /* Write data word at a time */
+ for (i = 0; i < req.size && !err; i++) {
+ err = qfp_fuse_write_word(((u32 *) (
+ qfp_priv->base + req.offset) + i), buf[i]);
+ }
+
+ mutex_unlock(&qfp_priv->lock);
+ break;
+ default:
+ pr_err("Invalid ioctl command.\n");
+ return -ENOTTY;
+ }
+ kfree(buf);
+ return err;
+}
+
+static const struct file_operations qfp_fuse_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = qfp_fuse_ioctl,
+ .open = qfp_fuse_open,
+ .release = qfp_fuse_release
+};
+
+static struct miscdevice qfp_fuse_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "qfpfuse",
+ .fops = &qfp_fuse_fops
+};
+
+
+static int qfp_fuse_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res;
+ const char *regulator_name = pdev->dev.platform_data;
+
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ if (!regulator_name)
+ return -EINVAL;
+
+ /* Initialize */
+ qfp_priv = kzalloc(sizeof(struct qfp_priv_t), GFP_KERNEL);
+
+ if (qfp_priv == NULL) {
+ pr_alert("Not enough memory to initialize device\n");
+ return -ENOMEM;
+ }
+
+ /* The driver is passed ioremapped address */
+ qfp_priv->base = res->start;
+ qfp_priv->end = res->end;
+
+ /* Get regulator for QFPROM writes */
+ qfp_priv->fuse_vdd = regulator_get(NULL, regulator_name);
+ if (IS_ERR(qfp_priv->fuse_vdd)) {
+ ret = PTR_ERR(qfp_priv->fuse_vdd);
+ pr_err("Err (%d) getting %s\n", ret, regulator_name);
+ qfp_priv->fuse_vdd = NULL;
+ goto err;
+ }
+
+ mutex_init(&qfp_priv->lock);
+
+ ret = misc_register(&qfp_fuse_dev);
+ if (ret < 0)
+ goto err;
+
+ pr_info("Fuse driver base:%x end:%x\n", qfp_priv->base, qfp_priv->end);
+ return 0;
+
+err:
+ if (qfp_priv->fuse_vdd)
+ regulator_put(qfp_priv->fuse_vdd);
+
+ kfree(qfp_priv);
+ qfp_priv = NULL;
+
+ return ret;
+
+}
+
+static int __devexit qfp_fuse_remove(struct platform_device *plat)
+{
+ if (qfp_priv && qfp_priv->fuse_vdd)
+ regulator_put(qfp_priv->fuse_vdd);
+
+ kfree(qfp_priv);
+ qfp_priv = NULL;
+
+ misc_deregister(&qfp_fuse_dev);
+ pr_info("Removing Fuse driver\n");
+ return 0;
+}
+
+static struct platform_driver qfp_fuse_driver = {
+ .probe = qfp_fuse_probe,
+ .remove = qfp_fuse_remove,
+ .driver = {
+ .name = "qfp_fuse_driver",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init qfp_fuse_init(void)
+{
+ return platform_driver_register(&qfp_fuse_driver);
+}
+
+static void __exit qfp_fuse_exit(void)
+{
+ platform_driver_unregister(&qfp_fuse_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Rohit Vaswani <rvaswani@codeaurora.org>");
+MODULE_DESCRIPTION("Driver to read/write to QFPROM fuses.");
+MODULE_VERSION("1.01");
+
+module_init(qfp_fuse_init);
+module_exit(qfp_fuse_exit);
diff --git a/drivers/misc/tsif.c b/drivers/misc/tsif.c
new file mode 100644
index 0000000..53d4ef2
--- /dev/null
+++ b/drivers/misc/tsif.c
@@ -0,0 +1,1564 @@
+/*
+ * TSIF Driver
+ *
+ * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h> /* Needed by all modules */
+#include <linux/kernel.h> /* Needed for KERN_INFO */
+#include <linux/init.h> /* Needed for the macros */
+#include <linux/err.h> /* IS_ERR etc. */
+#include <linux/platform_device.h>
+
+#include <linux/ioport.h> /* XXX_mem_region */
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h> /* dma_XXX */
+#include <linux/delay.h> /* msleep */
+
+#include <linux/io.h> /* ioXXX */
+#include <linux/uaccess.h> /* copy_from_user */
+#include <linux/clk.h>
+#include <linux/wakelock.h>
+#include <linux/tsif_api.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h> /* kfree, kzalloc */
+
+#include <mach/gpio.h>
+#include <mach/dma.h>
+#include <mach/msm_tsif.h>
+
+/*
+ * TSIF register offsets
+ */
+#define TSIF_STS_CTL_OFF (0x0)
+#define TSIF_TIME_LIMIT_OFF (0x4)
+#define TSIF_CLK_REF_OFF (0x8)
+#define TSIF_LPBK_FLAGS_OFF (0xc)
+#define TSIF_LPBK_DATA_OFF (0x10)
+#define TSIF_TEST_CTL_OFF (0x14)
+#define TSIF_TEST_MODE_OFF (0x18)
+#define TSIF_TEST_RESET_OFF (0x1c)
+#define TSIF_TEST_EXPORT_OFF (0x20)
+#define TSIF_TEST_CURRENT_OFF (0x24)
+
+#define TSIF_DATA_PORT_OFF (0x100)
+
+/* bits for TSIF_STS_CTL register */
+#define TSIF_STS_CTL_EN_IRQ (1 << 28)
+#define TSIF_STS_CTL_PACK_AVAIL (1 << 27)
+#define TSIF_STS_CTL_1ST_PACKET (1 << 26)
+#define TSIF_STS_CTL_OVERFLOW (1 << 25)
+#define TSIF_STS_CTL_LOST_SYNC (1 << 24)
+#define TSIF_STS_CTL_TIMEOUT (1 << 23)
+#define TSIF_STS_CTL_INV_SYNC (1 << 21)
+#define TSIF_STS_CTL_INV_NULL (1 << 20)
+#define TSIF_STS_CTL_INV_ERROR (1 << 19)
+#define TSIF_STS_CTL_INV_ENABLE (1 << 18)
+#define TSIF_STS_CTL_INV_DATA (1 << 17)
+#define TSIF_STS_CTL_INV_CLOCK (1 << 16)
+#define TSIF_STS_CTL_SPARE (1 << 15)
+#define TSIF_STS_CTL_EN_NULL (1 << 11)
+#define TSIF_STS_CTL_EN_ERROR (1 << 10)
+#define TSIF_STS_CTL_LAST_BIT (1 << 9)
+#define TSIF_STS_CTL_EN_TIME_LIM (1 << 8)
+#define TSIF_STS_CTL_EN_TCR (1 << 7)
+#define TSIF_STS_CTL_TEST_MODE (3 << 5)
+#define TSIF_STS_CTL_EN_DM (1 << 4)
+#define TSIF_STS_CTL_STOP (1 << 3)
+#define TSIF_STS_CTL_START (1 << 0)
+
+/*
+ * Data buffering parameters
+ *
+ * Data stored in cyclic buffer;
+ *
+ * Data organized in chunks of packets.
+ * One chunk processed at a time by the data mover
+ *
+ */
+#define TSIF_PKTS_IN_CHUNK_DEFAULT (16) /**< packets in one DM chunk */
+#define TSIF_CHUNKS_IN_BUF_DEFAULT (8)
+#define TSIF_PKTS_IN_CHUNK (tsif_device->pkts_per_chunk)
+#define TSIF_CHUNKS_IN_BUF (tsif_device->chunks_per_buf)
+#define TSIF_PKTS_IN_BUF (TSIF_PKTS_IN_CHUNK * TSIF_CHUNKS_IN_BUF)
+#define TSIF_BUF_SIZE (TSIF_PKTS_IN_BUF * TSIF_PKT_SIZE)
+
+#define ROW_RESET (MSM_CLK_CTL_BASE + 0x214)
+#define GLBL_CLK_ENA (MSM_CLK_CTL_BASE + 0x000)
+#define CLK_HALT_STATEB (MSM_CLK_CTL_BASE + 0x104)
+#define TSIF_NS_REG (MSM_CLK_CTL_BASE + 0x0b4)
+#define TV_NS_REG (MSM_CLK_CTL_BASE + 0x0bc)
+
+/* used to create debugfs entries */
+static const struct {
+ const char *name;
+ mode_t mode;
+ int offset;
+} debugfs_tsif_regs[] = {
+ {"sts_ctl", S_IRUGO | S_IWUSR, TSIF_STS_CTL_OFF},
+ {"time_limit", S_IRUGO | S_IWUSR, TSIF_TIME_LIMIT_OFF},
+ {"clk_ref", S_IRUGO | S_IWUSR, TSIF_CLK_REF_OFF},
+ {"lpbk_flags", S_IRUGO | S_IWUSR, TSIF_LPBK_FLAGS_OFF},
+ {"lpbk_data", S_IRUGO | S_IWUSR, TSIF_LPBK_DATA_OFF},
+ {"test_ctl", S_IRUGO | S_IWUSR, TSIF_TEST_CTL_OFF},
+ {"test_mode", S_IRUGO | S_IWUSR, TSIF_TEST_MODE_OFF},
+ {"test_reset", S_IWUSR, TSIF_TEST_RESET_OFF},
+ {"test_export", S_IRUGO | S_IWUSR, TSIF_TEST_EXPORT_OFF},
+ {"test_current", S_IRUGO, TSIF_TEST_CURRENT_OFF},
+ {"data_port", S_IRUSR, TSIF_DATA_PORT_OFF},
+};
+
+/* structures for Data Mover */
+struct tsif_dmov_cmd {
+ dmov_box box;
+ dma_addr_t box_ptr;
+};
+
+struct msm_tsif_device;
+
+struct tsif_xfer {
+ struct msm_dmov_cmd hdr;
+ struct msm_tsif_device *tsif_device;
+ int busy;
+ int wi; /**< set devices's write index after xfer */
+};
+
+struct msm_tsif_device {
+ struct list_head devlist;
+ struct platform_device *pdev;
+ struct resource *memres;
+ void __iomem *base;
+ unsigned int irq;
+ int mode;
+ u32 time_limit;
+ enum tsif_state state;
+ struct wake_lock wake_lock;
+ /* clocks */
+ struct clk *tsif_clk;
+ struct clk *tsif_pclk;
+ struct clk *tsif_ref_clk;
+ /* debugfs */
+ struct dentry *dent_tsif;
+ struct dentry *debugfs_tsif_regs[ARRAY_SIZE(debugfs_tsif_regs)];
+ struct dentry *debugfs_gpio;
+ struct dentry *debugfs_action;
+ struct dentry *debugfs_dma;
+ struct dentry *debugfs_databuf;
+ struct debugfs_blob_wrapper blob_wrapper_databuf;
+ /* DMA related */
+ int dma;
+ int crci;
+ void *data_buffer;
+ dma_addr_t data_buffer_dma;
+ u32 pkts_per_chunk;
+ u32 chunks_per_buf;
+ int ri;
+ int wi;
+ int dmwi; /**< DataMover write index */
+ struct tsif_dmov_cmd *dmov_cmd[2];
+ dma_addr_t dmov_cmd_dma[2];
+ struct tsif_xfer xfer[2];
+ struct tasklet_struct dma_refill;
+ /* statistics */
+ u32 stat_rx;
+ u32 stat_overflow;
+ u32 stat_lost_sync;
+ u32 stat_timeout;
+ u32 stat_dmov_err;
+ u32 stat_soft_drop;
+ int stat_ifi; /* inter frame interval */
+ u32 stat0, stat1;
+ /* client */
+ void *client_data;
+ void (*client_notify)(void *client_data);
+};
+
+/* ===clocks begin=== */
+
+static void tsif_put_clocks(struct msm_tsif_device *tsif_device)
+{
+ if (tsif_device->tsif_clk) {
+ clk_put(tsif_device->tsif_clk);
+ tsif_device->tsif_clk = NULL;
+ }
+ if (tsif_device->tsif_pclk) {
+ clk_put(tsif_device->tsif_pclk);
+ tsif_device->tsif_pclk = NULL;
+ }
+
+ if (tsif_device->tsif_ref_clk) {
+ clk_put(tsif_device->tsif_ref_clk);
+ tsif_device->tsif_ref_clk = NULL;
+ }
+}
+
+static int tsif_get_clocks(struct msm_tsif_device *tsif_device)
+{
+ struct msm_tsif_platform_data *pdata =
+ tsif_device->pdev->dev.platform_data;
+ int rc = 0;
+
+ if (pdata->tsif_clk) {
+ tsif_device->tsif_clk = clk_get(NULL, pdata->tsif_clk);
+ if (IS_ERR(tsif_device->tsif_clk)) {
+ dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
+ pdata->tsif_clk);
+ rc = PTR_ERR(tsif_device->tsif_clk);
+ tsif_device->tsif_clk = NULL;
+ goto ret;
+ }
+ }
+ if (pdata->tsif_pclk) {
+ tsif_device->tsif_pclk = clk_get(NULL, pdata->tsif_pclk);
+ if (IS_ERR(tsif_device->tsif_pclk)) {
+ dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
+ pdata->tsif_pclk);
+ rc = PTR_ERR(tsif_device->tsif_pclk);
+ tsif_device->tsif_pclk = NULL;
+ goto ret;
+ }
+ }
+ if (pdata->tsif_ref_clk) {
+ tsif_device->tsif_ref_clk = clk_get(NULL, pdata->tsif_ref_clk);
+ if (IS_ERR(tsif_device->tsif_ref_clk)) {
+ dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
+ pdata->tsif_ref_clk);
+ rc = PTR_ERR(tsif_device->tsif_ref_clk);
+ tsif_device->tsif_ref_clk = NULL;
+ goto ret;
+ }
+ }
+ return 0;
+ret:
+ tsif_put_clocks(tsif_device);
+ return rc;
+}
+
+static void tsif_clock(struct msm_tsif_device *tsif_device, int on)
+{
+ if (on) {
+ if (tsif_device->tsif_clk)
+ clk_enable(tsif_device->tsif_clk);
+ if (tsif_device->tsif_pclk)
+ clk_enable(tsif_device->tsif_pclk);
+ clk_enable(tsif_device->tsif_ref_clk);
+ } else {
+ if (tsif_device->tsif_clk)
+ clk_disable(tsif_device->tsif_clk);
+ if (tsif_device->tsif_pclk)
+ clk_disable(tsif_device->tsif_pclk);
+ clk_disable(tsif_device->tsif_ref_clk);
+ }
+}
+/* ===clocks end=== */
+/* ===gpio begin=== */
+
+static void tsif_gpios_free(const struct msm_gpio *table, int size)
+{
+ int i;
+ const struct msm_gpio *g;
+ for (i = size-1; i >= 0; i--) {
+ g = table + i;
+ gpio_free(GPIO_PIN(g->gpio_cfg));
+ }
+}
+
+static int tsif_gpios_request(const struct msm_gpio *table, int size)
+{
+ int rc;
+ int i;
+ const struct msm_gpio *g;
+ for (i = 0; i < size; i++) {
+ g = table + i;
+ rc = gpio_request(GPIO_PIN(g->gpio_cfg), g->label);
+ if (rc) {
+ pr_err("gpio_request(%d) <%s> failed: %d\n",
+ GPIO_PIN(g->gpio_cfg), g->label ?: "?", rc);
+ goto err;
+ }
+ }
+ return 0;
+err:
+ tsif_gpios_free(table, i);
+ return rc;
+}
+
+static int tsif_gpios_disable(const struct msm_gpio *table, int size)
+{
+ int rc = 0;
+ int i;
+ const struct msm_gpio *g;
+ for (i = size-1; i >= 0; i--) {
+ int tmp;
+ g = table + i;
+ tmp = gpio_tlmm_config(g->gpio_cfg, GPIO_CFG_DISABLE);
+ if (tmp) {
+ pr_err("gpio_tlmm_config(0x%08x, GPIO_CFG_DISABLE)"
+ " <%s> failed: %d\n",
+ g->gpio_cfg, g->label ?: "?", rc);
+ pr_err("pin %d func %d dir %d pull %d drvstr %d\n",
+ GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
+ GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg),
+ GPIO_DRVSTR(g->gpio_cfg));
+ if (!rc)
+ rc = tmp;
+ }
+ }
+
+ return rc;
+}
+
+static int tsif_gpios_enable(const struct msm_gpio *table, int size)
+{
+ int rc;
+ int i;
+ const struct msm_gpio *g;
+ for (i = 0; i < size; i++) {
+ g = table + i;
+ rc = gpio_tlmm_config(g->gpio_cfg, GPIO_CFG_ENABLE);
+ if (rc) {
+ pr_err("gpio_tlmm_config(0x%08x, GPIO_CFG_ENABLE)"
+ " <%s> failed: %d\n",
+ g->gpio_cfg, g->label ?: "?", rc);
+ pr_err("pin %d func %d dir %d pull %d drvstr %d\n",
+ GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
+ GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg),
+ GPIO_DRVSTR(g->gpio_cfg));
+ goto err;
+ }
+ }
+ return 0;
+err:
+ tsif_gpios_disable(table, i);
+ return rc;
+}
+
+static int tsif_gpios_request_enable(const struct msm_gpio *table, int size)
+{
+ int rc = tsif_gpios_request(table, size);
+ if (rc)
+ return rc;
+ rc = tsif_gpios_enable(table, size);
+ if (rc)
+ tsif_gpios_free(table, size);
+ return rc;
+}
+
+static void tsif_gpios_disable_free(const struct msm_gpio *table, int size)
+{
+ tsif_gpios_disable(table, size);
+ tsif_gpios_free(table, size);
+}
+
+static int tsif_start_gpios(struct msm_tsif_device *tsif_device)
+{
+ struct msm_tsif_platform_data *pdata =
+ tsif_device->pdev->dev.platform_data;
+ return tsif_gpios_request_enable(pdata->gpios, pdata->num_gpios);
+}
+
+static void tsif_stop_gpios(struct msm_tsif_device *tsif_device)
+{
+ struct msm_tsif_platform_data *pdata =
+ tsif_device->pdev->dev.platform_data;
+ tsif_gpios_disable_free(pdata->gpios, pdata->num_gpios);
+}
+
+/* ===gpio end=== */
+
+static int tsif_start_hw(struct msm_tsif_device *tsif_device)
+{
+ u32 ctl = TSIF_STS_CTL_EN_IRQ |
+ TSIF_STS_CTL_EN_TIME_LIM |
+ TSIF_STS_CTL_EN_TCR |
+ TSIF_STS_CTL_EN_DM;
+ dev_info(&tsif_device->pdev->dev, "%s\n", __func__);
+ switch (tsif_device->mode) {
+ case 1: /* mode 1 */
+ ctl |= (0 << 5);
+ break;
+ case 2: /* mode 2 */
+ ctl |= (1 << 5);
+ break;
+ case 3: /* manual - control from debugfs */
+ return 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ iowrite32(ctl, tsif_device->base + TSIF_STS_CTL_OFF);
+ iowrite32(tsif_device->time_limit,
+ tsif_device->base + TSIF_TIME_LIMIT_OFF);
+ wmb();
+ iowrite32(ctl | TSIF_STS_CTL_START,
+ tsif_device->base + TSIF_STS_CTL_OFF);
+ wmb();
+ ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF);
+ return (ctl & TSIF_STS_CTL_START) ? 0 : -EFAULT;
+}
+
+static void tsif_stop_hw(struct msm_tsif_device *tsif_device)
+{
+ iowrite32(TSIF_STS_CTL_STOP, tsif_device->base + TSIF_STS_CTL_OFF);
+ wmb();
+}
+
+/* ===DMA begin=== */
+/**
+ * TSIF DMA theory of operation
+ *
+ * Circular memory buffer \a tsif_mem_buffer allocated;
+ * 4 pointers points to and moved forward on:
+ * - \a ri index of first ready to read packet.
+ * Updated by client's call to tsif_reclaim_packets()
+ * - \a wi points to the next packet to be written by DM.
+ * Data below is valid and will not be overriden by DMA.
+ * Moved on DM callback
+ * - \a dmwi points to the next packet not scheduled yet for DM
+ * moved when packet scheduled for DM
+ *
+ * In addition, DM xfer keep internal \a wi - copy of \a tsif_device->dmwi
+ * at time immediately after scheduling.
+ *
+ * Initially, 2 packets get scheduled for the DM.
+ *
+ * Upon packet receive, DM writes packet to the pre-programmed
+ * location and invoke its callback.
+ *
+ * DM callback moves sets wi pointer to \a xfer->wi;
+ * then it schedules next packet for DM and moves \a dmwi pointer.
+ *
+ * Buffer overflow handling
+ *
+ * If \a dmwi == \a ri-1, buffer is full and \a dmwi can't be advanced.
+ * DMA re-scheduled to the same index.
+ * Callback check and not move \a wi to become equal to \a ri
+ *
+ * On \a read request, data between \a ri and \a wi pointers may be read;
+ * \ri pointer moved accordingly.
+ *
+ * It is always granted, on modulo sizeof(tsif_mem_buffer), that
+ * \a wi is between [\a ri, \a dmwi]
+ *
+ * Amount of data available is (wi-ri)*TSIF_PKT_SIZE
+ *
+ * Number of scheduled packets for DM: (dmwi-wi)
+ */
+
+/**
+ * tsif_dma_schedule - schedule DMA transfers
+ *
+ * @tsif_device: device
+ *
+ * Executed from process context on init, or from tasklet when
+ * re-scheduling upon DMA completion.
+ * This prevent concurrent execution from several CPU's
+ */
+static void tsif_dma_schedule(struct msm_tsif_device *tsif_device)
+{
+ int i, dmwi0, dmwi1, found = 0;
+ /* find free entry */
+ for (i = 0; i < 2; i++) {
+ struct tsif_xfer *xfer = &tsif_device->xfer[i];
+ if (xfer->busy)
+ continue;
+ found++;
+ xfer->busy = 1;
+ dmwi0 = tsif_device->dmwi;
+ tsif_device->dmov_cmd[i]->box.dst_row_addr =
+ tsif_device->data_buffer_dma + TSIF_PKT_SIZE * dmwi0;
+ /* proposed value for dmwi */
+ dmwi1 = (dmwi0 + TSIF_PKTS_IN_CHUNK) % TSIF_PKTS_IN_BUF;
+ /**
+ * If dmwi going to overlap with ri,
+ * overflow occurs because data was not read.
+ * Still get this packet, to not interrupt TSIF
+ * hardware, but do not advance dmwi.
+ *
+ * Upon receive, packet will be dropped.
+ */
+ if (dmwi1 != tsif_device->ri) {
+ tsif_device->dmwi = dmwi1;
+ } else {
+ dev_info(&tsif_device->pdev->dev,
+ "Overflow detected\n");
+ }
+ xfer->wi = tsif_device->dmwi;
+#ifdef CONFIG_TSIF_DEBUG
+ dev_info(&tsif_device->pdev->dev,
+ "schedule xfer[%d] -> [%2d]{%2d}\n",
+ i, dmwi0, xfer->wi);
+#endif
+ /* complete all the writes to box */
+ dma_coherent_pre_ops();
+ msm_dmov_enqueue_cmd(tsif_device->dma, &xfer->hdr);
+ }
+ if (!found)
+ dev_info(&tsif_device->pdev->dev,
+ "All xfer entries are busy\n");
+}
+
+/**
+ * tsif_dmov_complete_func - DataMover completion callback
+ *
+ * @cmd: original DM command
+ * @result: DM result
+ * @err: optional error buffer
+ *
+ * Executed in IRQ context (Data Mover's IRQ)
+ * DataMover's spinlock @msm_dmov_lock held.
+ */
+static void tsif_dmov_complete_func(struct msm_dmov_cmd *cmd,
+ unsigned int result,
+ struct msm_dmov_errdata *err)
+{
+ int i;
+ u32 data_offset;
+ struct tsif_xfer *xfer;
+ struct msm_tsif_device *tsif_device;
+ int reschedule = 0;
+ if (!(result & DMOV_RSLT_VALID)) { /* can I trust to @cmd? */
+ pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
+ return;
+ }
+ /* restore original context */
+ xfer = container_of(cmd, struct tsif_xfer, hdr);
+ tsif_device = xfer->tsif_device;
+ i = xfer - tsif_device->xfer;
+ data_offset = tsif_device->dmov_cmd[i]->box.dst_row_addr -
+ tsif_device->data_buffer_dma;
+
+ /* order reads from the xferred buffer */
+ dma_coherent_post_ops();
+ if (result & DMOV_RSLT_DONE) {
+ int w = data_offset / TSIF_PKT_SIZE;
+ tsif_device->stat_rx++;
+ /*
+ * sowtware overflow when I was scheduled?
+ *
+ * @w is where this xfer was actually written to;
+ * @xfer->wi is where device's @wi will be set;
+ *
+ * if these 2 are equal, we are short in space and
+ * going to overwrite this xfer - this is "soft drop"
+ */
+ if (w == xfer->wi)
+ tsif_device->stat_soft_drop++;
+ reschedule = (tsif_device->state == tsif_state_running);
+#ifdef CONFIG_TSIF_DEBUG
+ /* IFI calculation */
+ /*
+ * update stat_ifi (inter frame interval)
+ *
+ * Calculate time difference between last and 1-st
+ * packets in chunk
+ *
+ * To be removed after tuning
+ */
+ if (TSIF_PKTS_IN_CHUNK > 1) {
+ void *ptr = tsif_device->data_buffer + data_offset;
+ u32 *p0 = ptr;
+ u32 *p1 = ptr + (TSIF_PKTS_IN_CHUNK - 1) *
+ TSIF_PKT_SIZE;
+ u32 tts0 = TSIF_STATUS_TTS(tsif_device->stat0 =
+ tsif_pkt_status(p0));
+ u32 tts1 = TSIF_STATUS_TTS(tsif_device->stat1 =
+ tsif_pkt_status(p1));
+ tsif_device->stat_ifi = (tts1 - tts0) /
+ (TSIF_PKTS_IN_CHUNK - 1);
+ }
+#endif
+ } else {
+ /**
+ * Error or flush
+ *
+ * To recover - re-open TSIF device.
+ */
+ /* mark status "not valid" in data buffer */
+ int n;
+ void *ptr = tsif_device->data_buffer + data_offset;
+ for (n = 0; n < TSIF_PKTS_IN_CHUNK; n++) {
+ u32 *p = ptr + (n * TSIF_PKT_SIZE);
+ /* last dword is status + TTS */
+ p[TSIF_PKT_SIZE / sizeof(*p) - 1] = 0;
+ }
+ if (result & DMOV_RSLT_ERROR) {
+ dev_err(&tsif_device->pdev->dev,
+ "DMA error (0x%08x)\n", result);
+ tsif_device->stat_dmov_err++;
+ /* force device close */
+ if (tsif_device->state == tsif_state_running) {
+ tsif_stop_hw(tsif_device);
+ /*
+ * Clocks _may_ be stopped right from IRQ
+ * context. This is far from optimal w.r.t
+ * latency.
+ *
+ * But, this branch taken only in case of
+ * severe hardware problem (I don't even know
+ * what should happens for DMOV_RSLT_ERROR);
+ * thus I prefer code simplicity over
+ * performance.
+ */
+ tsif_clock(tsif_device, 0);
+ tsif_device->state = tsif_state_flushing;
+ }
+ }
+ if (result & DMOV_RSLT_FLUSH) {
+ /*
+ * Flushing normally happens in process of
+ * @tsif_stop(), when we are waiting for outstanding
+ * DMA commands to be flushed.
+ */
+ dev_info(&tsif_device->pdev->dev,
+ "DMA channel flushed (0x%08x)\n", result);
+ if (tsif_device->state == tsif_state_flushing) {
+ if ((!tsif_device->xfer[0].busy) &&
+ (!tsif_device->xfer[1].busy)) {
+ tsif_device->state = tsif_state_stopped;
+ }
+ }
+ }
+ if (err)
+ dev_err(&tsif_device->pdev->dev,
+ "Flush data: %08x %08x %08x %08x %08x %08x\n",
+ err->flush[0], err->flush[1], err->flush[2],
+ err->flush[3], err->flush[4], err->flush[5]);
+ }
+ tsif_device->wi = xfer->wi;
+ xfer->busy = 0;
+ if (tsif_device->client_notify)
+ tsif_device->client_notify(tsif_device->client_data);
+ /*
+ * Can't schedule next DMA -
+ * DataMover driver still hold its semaphore,
+ * deadlock will occur.
+ */
+ if (reschedule)
+ tasklet_schedule(&tsif_device->dma_refill);
+}
+
+/**
+ * tsif_dma_refill - tasklet function for tsif_device->dma_refill
+ *
+ * @data: tsif_device
+ *
+ * Reschedule DMA requests
+ *
+ * Executed in tasklet
+ */
+static void tsif_dma_refill(unsigned long data)
+{
+ struct msm_tsif_device *tsif_device = (struct msm_tsif_device *) data;
+ if (tsif_device->state == tsif_state_running)
+ tsif_dma_schedule(tsif_device);
+}
+
+/**
+ * tsif_dma_flush - flush DMA channel
+ *
+ * @tsif_device:
+ *
+ * busy wait till DMA flushed
+ */
+static void tsif_dma_flush(struct msm_tsif_device *tsif_device)
+{
+ if (tsif_device->xfer[0].busy || tsif_device->xfer[1].busy) {
+ tsif_device->state = tsif_state_flushing;
+ while (tsif_device->xfer[0].busy ||
+ tsif_device->xfer[1].busy) {
+ msm_dmov_flush(tsif_device->dma);
+ msleep(10);
+ }
+ }
+ tsif_device->state = tsif_state_stopped;
+ if (tsif_device->client_notify)
+ tsif_device->client_notify(tsif_device->client_data);
+}
+
+static void tsif_dma_exit(struct msm_tsif_device *tsif_device)
+{
+ int i;
+ tsif_device->state = tsif_state_flushing;
+ tasklet_kill(&tsif_device->dma_refill);
+ tsif_dma_flush(tsif_device);
+ for (i = 0; i < 2; i++) {
+ if (tsif_device->dmov_cmd[i]) {
+ dma_free_coherent(NULL, sizeof(struct tsif_dmov_cmd),
+ tsif_device->dmov_cmd[i],
+ tsif_device->dmov_cmd_dma[i]);
+ tsif_device->dmov_cmd[i] = NULL;
+ }
+ }
+ if (tsif_device->data_buffer) {
+ tsif_device->blob_wrapper_databuf.data = NULL;
+ tsif_device->blob_wrapper_databuf.size = 0;
+ dma_free_coherent(NULL, TSIF_BUF_SIZE,
+ tsif_device->data_buffer,
+ tsif_device->data_buffer_dma);
+ tsif_device->data_buffer = NULL;
+ }
+}
+
+static int tsif_dma_init(struct msm_tsif_device *tsif_device)
+{
+ int i;
+ /* TODO: allocate all DMA memory in one buffer */
+ /* Note: don't pass device,
+ it require coherent_dma_mask id device definition */
+ tsif_device->data_buffer = dma_alloc_coherent(NULL, TSIF_BUF_SIZE,
+ &tsif_device->data_buffer_dma, GFP_KERNEL);
+ if (!tsif_device->data_buffer)
+ goto err;
+ dev_info(&tsif_device->pdev->dev, "data_buffer: %p phys 0x%08x\n",
+ tsif_device->data_buffer, tsif_device->data_buffer_dma);
+ tsif_device->blob_wrapper_databuf.data = tsif_device->data_buffer;
+ tsif_device->blob_wrapper_databuf.size = TSIF_BUF_SIZE;
+ tsif_device->ri = 0;
+ tsif_device->wi = 0;
+ tsif_device->dmwi = 0;
+ for (i = 0; i < 2; i++) {
+ dmov_box *box;
+ struct msm_dmov_cmd *hdr;
+ tsif_device->dmov_cmd[i] = dma_alloc_coherent(NULL,
+ sizeof(struct tsif_dmov_cmd),
+ &tsif_device->dmov_cmd_dma[i], GFP_KERNEL);
+ if (!tsif_device->dmov_cmd[i])
+ goto err;
+ dev_info(&tsif_device->pdev->dev, "dma[%i]: %p phys 0x%08x\n",
+ i, tsif_device->dmov_cmd[i],
+ tsif_device->dmov_cmd_dma[i]);
+ /* dst in 16 LSB, src in 16 MSB */
+ box = &(tsif_device->dmov_cmd[i]->box);
+ box->cmd = CMD_MODE_BOX | CMD_LC |
+ CMD_SRC_CRCI(tsif_device->crci);
+ box->src_row_addr =
+ tsif_device->memres->start + TSIF_DATA_PORT_OFF;
+ box->src_dst_len = (TSIF_PKT_SIZE << 16) | TSIF_PKT_SIZE;
+ box->num_rows = (TSIF_PKTS_IN_CHUNK << 16) | TSIF_PKTS_IN_CHUNK;
+ box->row_offset = (0 << 16) | TSIF_PKT_SIZE;
+
+ tsif_device->dmov_cmd[i]->box_ptr = CMD_PTR_LP |
+ DMOV_CMD_ADDR(tsif_device->dmov_cmd_dma[i] +
+ offsetof(struct tsif_dmov_cmd, box));
+ tsif_device->xfer[i].tsif_device = tsif_device;
+ hdr = &tsif_device->xfer[i].hdr;
+ hdr->cmdptr = DMOV_CMD_ADDR(tsif_device->dmov_cmd_dma[i] +
+ offsetof(struct tsif_dmov_cmd, box_ptr));
+ hdr->complete_func = tsif_dmov_complete_func;
+ }
+ msm_dmov_flush(tsif_device->dma);
+ return 0;
+err:
+ dev_err(&tsif_device->pdev->dev, "Failed to allocate DMA buffers\n");
+ tsif_dma_exit(tsif_device);
+ return -ENOMEM;
+}
+
+/* ===DMA end=== */
+
+/* ===IRQ begin=== */
+
+static irqreturn_t tsif_irq(int irq, void *dev_id)
+{
+ struct msm_tsif_device *tsif_device = dev_id;
+ u32 sts_ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF);
+ if (!(sts_ctl & (TSIF_STS_CTL_PACK_AVAIL |
+ TSIF_STS_CTL_OVERFLOW |
+ TSIF_STS_CTL_LOST_SYNC |
+ TSIF_STS_CTL_TIMEOUT))) {
+ dev_warn(&tsif_device->pdev->dev, "Spurious interrupt\n");
+ return IRQ_NONE;
+ }
+ if (sts_ctl & TSIF_STS_CTL_PACK_AVAIL) {
+ dev_info(&tsif_device->pdev->dev, "TSIF IRQ: PACK_AVAIL\n");
+ tsif_device->stat_rx++;
+ }
+ if (sts_ctl & TSIF_STS_CTL_OVERFLOW) {
+ dev_info(&tsif_device->pdev->dev, "TSIF IRQ: OVERFLOW\n");
+ tsif_device->stat_overflow++;
+ }
+ if (sts_ctl & TSIF_STS_CTL_LOST_SYNC) {
+ dev_info(&tsif_device->pdev->dev, "TSIF IRQ: LOST SYNC\n");
+ tsif_device->stat_lost_sync++;
+ }
+ if (sts_ctl & TSIF_STS_CTL_TIMEOUT) {
+ dev_info(&tsif_device->pdev->dev, "TSIF IRQ: TIMEOUT\n");
+ tsif_device->stat_timeout++;
+ }
+ iowrite32(sts_ctl, tsif_device->base + TSIF_STS_CTL_OFF);
+ wmb();
+ return IRQ_HANDLED;
+}
+
+/* ===IRQ end=== */
+
+/* ===Device attributes begin=== */
+
+static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
+ char *state_string;
+ switch (tsif_device->state) {
+ case tsif_state_stopped:
+ state_string = "stopped";
+ break;
+ case tsif_state_running:
+ state_string = "running";
+ break;
+ case tsif_state_flushing:
+ state_string = "flushing";
+ break;
+ default:
+ state_string = "???";
+ }
+ return snprintf(buf, PAGE_SIZE,
+ "Device %s\n"
+ "Mode = %d\n"
+ "Time limit = %d\n"
+ "State %s\n"
+ "Client = %p\n"
+ "Pkt/Buf = %d\n"
+ "Pkt/chunk = %d\n"
+ "--statistics--\n"
+ "Rx chunks = %d\n"
+ "Overflow = %d\n"
+ "Lost sync = %d\n"
+ "Timeout = %d\n"
+ "DMA error = %d\n"
+ "Soft drop = %d\n"
+ "IFI = %d\n"
+ "(0x%08x - 0x%08x) / %d\n"
+ "--debug--\n"
+ "GLBL_CLK_ENA = 0x%08x\n"
+ "ROW_RESET = 0x%08x\n"
+ "CLK_HALT_STATEB = 0x%08x\n"
+ "TV_NS_REG = 0x%08x\n"
+ "TSIF_NS_REG = 0x%08x\n",
+ dev_name(dev),
+ tsif_device->mode,
+ tsif_device->time_limit,
+ state_string,
+ tsif_device->client_data,
+ TSIF_PKTS_IN_BUF,
+ TSIF_PKTS_IN_CHUNK,
+ tsif_device->stat_rx,
+ tsif_device->stat_overflow,
+ tsif_device->stat_lost_sync,
+ tsif_device->stat_timeout,
+ tsif_device->stat_dmov_err,
+ tsif_device->stat_soft_drop,
+ tsif_device->stat_ifi,
+ tsif_device->stat1,
+ tsif_device->stat0,
+ TSIF_PKTS_IN_CHUNK - 1,
+ ioread32(GLBL_CLK_ENA),
+ ioread32(ROW_RESET),
+ ioread32(CLK_HALT_STATEB),
+ ioread32(TV_NS_REG),
+ ioread32(TSIF_NS_REG)
+ );
+}
+/**
+ * set_stats - reset statistics on write
+ *
+ * @dev:
+ * @attr:
+ * @buf:
+ * @count:
+ */
+static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
+ tsif_device->stat_rx = 0;
+ tsif_device->stat_overflow = 0;
+ tsif_device->stat_lost_sync = 0;
+ tsif_device->stat_timeout = 0;
+ tsif_device->stat_dmov_err = 0;
+ tsif_device->stat_soft_drop = 0;
+ tsif_device->stat_ifi = 0;
+ return count;
+}
+static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
+
+static ssize_t show_mode(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
+ return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->mode);
+}
+
+static ssize_t set_mode(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
+ int value;
+ int rc;
+ if (1 != sscanf(buf, "%d", &value)) {
+ dev_err(&tsif_device->pdev->dev,
+ "Failed to parse integer: <%s>\n", buf);
+ return -EINVAL;
+ }
+ rc = tsif_set_mode(tsif_device, value);
+ if (!rc)
+ rc = count;
+ return rc;
+}
+static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, show_mode, set_mode);
+
+static ssize_t show_time_limit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
+ return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->time_limit);
+}
+
+static ssize_t set_time_limit(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
+ int value;
+ int rc;
+ if (1 != sscanf(buf, "%d", &value)) {
+ dev_err(&tsif_device->pdev->dev,
+ "Failed to parse integer: <%s>\n", buf);
+ return -EINVAL;
+ }
+ rc = tsif_set_time_limit(tsif_device, value);
+ if (!rc)
+ rc = count;
+ return rc;
+}
+static DEVICE_ATTR(time_limit, S_IRUGO | S_IWUSR,
+ show_time_limit, set_time_limit);
+
+static ssize_t show_buf_config(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
+ return snprintf(buf, PAGE_SIZE, "%d * %d\n",
+ tsif_device->pkts_per_chunk,
+ tsif_device->chunks_per_buf);
+}
+
+static ssize_t set_buf_config(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
+ u32 p, c;
+ int rc;
+ if (2 != sscanf(buf, "%d * %d", &p, &c)) {
+ dev_err(&tsif_device->pdev->dev,
+ "Failed to parse integer: <%s>\n", buf);
+ return -EINVAL;
+ }
+ rc = tsif_set_buf_config(tsif_device, p, c);
+ if (!rc)
+ rc = count;
+ return rc;
+}
+static DEVICE_ATTR(buf_config, S_IRUGO | S_IWUSR,
+ show_buf_config, set_buf_config);
+
+static struct attribute *dev_attrs[] = {
+ &dev_attr_stats.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_time_limit.attr,
+ &dev_attr_buf_config.attr,
+ NULL,
+};
+static struct attribute_group dev_attr_grp = {
+ .attrs = dev_attrs,
+};
+/* ===Device attributes end=== */
+
+/* ===debugfs begin=== */
+
+static int debugfs_iomem_x32_set(void *data, u64 val)
+{
+ iowrite32(val, data);
+ wmb();
+ return 0;
+}
+
+static int debugfs_iomem_x32_get(void *data, u64 *val)
+{
+ *val = ioread32(data);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
+ debugfs_iomem_x32_set, "0x%08llx\n");
+
+struct dentry *debugfs_create_iomem_x32(const char *name, mode_t mode,
+ struct dentry *parent, u32 *value)
+{
+ return debugfs_create_file(name, mode, parent, value, &fops_iomem_x32);
+}
+
+static int action_open(struct msm_tsif_device *tsif_device)
+{
+ int rc = -EINVAL;
+ int result;
+
+ struct msm_tsif_platform_data *pdata =
+ tsif_device->pdev->dev.platform_data;
+ dev_info(&tsif_device->pdev->dev, "%s\n", __func__);
+ if (tsif_device->state != tsif_state_stopped)
+ return -EAGAIN;
+ rc = tsif_dma_init(tsif_device);
+ if (rc) {
+ dev_err(&tsif_device->pdev->dev, "failed to init DMA\n");
+ return rc;
+ }
+ tsif_device->state = tsif_state_running;
+ /*
+ * DMA should be scheduled prior to TSIF hardware initialization,
+ * otherwise "bus error" will be reported by Data Mover
+ */
+ enable_irq(tsif_device->irq);
+ tsif_clock(tsif_device, 1);
+ tsif_dma_schedule(tsif_device);
+ /*
+ * init the device if required
+ */
+ if (pdata->init)
+ pdata->init(pdata);
+ rc = tsif_start_hw(tsif_device);
+ if (rc) {
+ dev_err(&tsif_device->pdev->dev, "Unable to start HW\n");
+ tsif_dma_exit(tsif_device);
+ tsif_clock(tsif_device, 0);
+ return rc;
+ }
+
+ result = pm_runtime_get(&tsif_device->pdev->dev);
+ if (result < 0) {
+ dev_err(&tsif_device->pdev->dev,
+ "Runtime PM: Unable to wake up the device, rc = %d\n",
+ result);
+ return result;
+ }
+
+ wake_lock(&tsif_device->wake_lock);
+ return rc;
+}
+
+static int action_close(struct msm_tsif_device *tsif_device)
+{
+ dev_info(&tsif_device->pdev->dev, "%s, state %d\n", __func__,
+ (int)tsif_device->state);
+ /*
+ * DMA should be flushed/stopped prior to TSIF hardware stop,
+ * otherwise "bus error" will be reported by Data Mover
+ */
+ tsif_stop_hw(tsif_device);
+ tsif_dma_exit(tsif_device);
+ tsif_clock(tsif_device, 0);
+ disable_irq(tsif_device->irq);
+
+ pm_runtime_put(&tsif_device->pdev->dev);
+ wake_unlock(&tsif_device->wake_lock);
+ return 0;
+}
+
+
+static struct {
+ int (*func)(struct msm_tsif_device *);
+ const char *name;
+} actions[] = {
+ { action_open, "open"},
+ { action_close, "close"},
+};
+
+static ssize_t tsif_debugfs_action_write(struct file *filp,
+ const char __user *userbuf,
+ size_t count, loff_t *f_pos)
+{
+ int i;
+ struct msm_tsif_device *tsif_device = filp->private_data;
+ char s[40];
+ int len = min(sizeof(s) - 1, count);
+ if (copy_from_user(s, userbuf, len))
+ return -EFAULT;
+ s[len] = '\0';
+ dev_info(&tsif_device->pdev->dev, "%s:%s\n", __func__, s);
+ for (i = 0; i < ARRAY_SIZE(actions); i++) {
+ if (!strncmp(s, actions[i].name,
+ min(count, strlen(actions[i].name)))) {
+ int rc = actions[i].func(tsif_device);
+ if (!rc)
+ rc = count;
+ return rc;
+ }
+ }
+ return -EINVAL;
+}
+
+static int tsif_debugfs_generic_open(struct inode *inode, struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations fops_debugfs_action = {
+ .open = tsif_debugfs_generic_open,
+ .write = tsif_debugfs_action_write,
+};
+
+static ssize_t tsif_debugfs_dma_read(struct file *filp, char __user *userbuf,
+ size_t count, loff_t *f_pos)
+{
+ static char bufa[200];
+ static char *buf = bufa;
+ int sz = sizeof(bufa);
+ struct msm_tsif_device *tsif_device = filp->private_data;
+ int len = 0;
+ if (tsif_device) {
+ int i;
+ len += snprintf(buf + len, sz - len,
+ "ri %3d | wi %3d | dmwi %3d |",
+ tsif_device->ri, tsif_device->wi,
+ tsif_device->dmwi);
+ for (i = 0; i < 2; i++) {
+ struct tsif_xfer *xfer = &tsif_device->xfer[i];
+ if (xfer->busy) {
+ u32 dst =
+ tsif_device->dmov_cmd[i]->box.dst_row_addr;
+ u32 base = tsif_device->data_buffer_dma;
+ int w = (dst - base) / TSIF_PKT_SIZE;
+ len += snprintf(buf + len, sz - len,
+ " [%3d]{%3d}",
+ w, xfer->wi);
+ } else {
+ len += snprintf(buf + len, sz - len,
+ " ---idle---");
+ }
+ }
+ len += snprintf(buf + len, sz - len, "\n");
+ } else {
+ len += snprintf(buf + len, sz - len, "No TSIF device???\n");
+ }
+ return simple_read_from_buffer(userbuf, count, f_pos, buf, len);
+}
+
+static const struct file_operations fops_debugfs_dma = {
+ .open = tsif_debugfs_generic_open,
+ .read = tsif_debugfs_dma_read,
+};
+
+static ssize_t tsif_debugfs_gpios_read(struct file *filp, char __user *userbuf,
+ size_t count, loff_t *f_pos)
+{
+ static char bufa[300];
+ static char *buf = bufa;
+ int sz = sizeof(bufa);
+ struct msm_tsif_device *tsif_device = filp->private_data;
+ int len = 0;
+ if (tsif_device) {
+ struct msm_tsif_platform_data *pdata =
+ tsif_device->pdev->dev.platform_data;
+ int i;
+ for (i = 0; i < pdata->num_gpios; i++) {
+ if (pdata->gpios[i].gpio_cfg) {
+ int x = !!gpio_get_value(GPIO_PIN(
+ pdata->gpios[i].gpio_cfg));
+ len += snprintf(buf + len, sz - len,
+ "%15s: %d\n",
+ pdata->gpios[i].label, x);
+ }
+ }
+ } else {
+ len += snprintf(buf + len, sz - len, "No TSIF device???\n");
+ }
+ return simple_read_from_buffer(userbuf, count, f_pos, buf, len);
+}
+
+static const struct file_operations fops_debugfs_gpios = {
+ .open = tsif_debugfs_generic_open,
+ .read = tsif_debugfs_gpios_read,
+};
+
+
+static void tsif_debugfs_init(struct msm_tsif_device *tsif_device)
+{
+ tsif_device->dent_tsif = debugfs_create_dir(
+ dev_name(&tsif_device->pdev->dev), NULL);
+ if (tsif_device->dent_tsif) {
+ int i;
+ void __iomem *base = tsif_device->base;
+ for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++) {
+ tsif_device->debugfs_tsif_regs[i] =
+ debugfs_create_iomem_x32(
+ debugfs_tsif_regs[i].name,
+ debugfs_tsif_regs[i].mode,
+ tsif_device->dent_tsif,
+ base + debugfs_tsif_regs[i].offset);
+ }
+ tsif_device->debugfs_gpio = debugfs_create_file("gpios",
+ S_IRUGO,
+ tsif_device->dent_tsif, tsif_device, &fops_debugfs_gpios);
+ tsif_device->debugfs_action = debugfs_create_file("action",
+ S_IWUSR,
+ tsif_device->dent_tsif, tsif_device, &fops_debugfs_action);
+ tsif_device->debugfs_dma = debugfs_create_file("dma",
+ S_IRUGO,
+ tsif_device->dent_tsif, tsif_device, &fops_debugfs_dma);
+ tsif_device->debugfs_databuf = debugfs_create_blob("data_buf",
+ S_IRUGO,
+ tsif_device->dent_tsif, &tsif_device->blob_wrapper_databuf);
+ }
+}
+
+static void tsif_debugfs_exit(struct msm_tsif_device *tsif_device)
+{
+ if (tsif_device->dent_tsif) {
+ int i;
+ debugfs_remove_recursive(tsif_device->dent_tsif);
+ tsif_device->dent_tsif = NULL;
+ for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++)
+ tsif_device->debugfs_tsif_regs[i] = NULL;
+ tsif_device->debugfs_gpio = NULL;
+ tsif_device->debugfs_action = NULL;
+ tsif_device->debugfs_dma = NULL;
+ tsif_device->debugfs_databuf = NULL;
+ }
+}
+/* ===debugfs end=== */
+
+/* ===module begin=== */
+static LIST_HEAD(tsif_devices);
+
+static struct msm_tsif_device *tsif_find_by_id(int id)
+{
+ struct msm_tsif_device *tsif_device;
+ list_for_each_entry(tsif_device, &tsif_devices, devlist) {
+ if (tsif_device->pdev->id == id)
+ return tsif_device;
+ }
+ return NULL;
+}
+
+static int __devinit msm_tsif_probe(struct platform_device *pdev)
+{
+ int rc = -ENODEV;
+ struct msm_tsif_platform_data *plat = pdev->dev.platform_data;
+ struct msm_tsif_device *tsif_device;
+ struct resource *res;
+ /* check device validity */
+ /* must have platform data */
+ if (!plat) {
+ dev_err(&pdev->dev, "Platform data not available\n");
+ rc = -EINVAL;
+ goto out;
+ }
+/*TODO macro for max. id*/
+ if ((pdev->id < 0) || (pdev->id > 0)) {
+ dev_err(&pdev->dev, "Invalid device ID %d\n", pdev->id);
+ rc = -EINVAL;
+ goto out;
+ }
+ /* OK, we will use this device */
+ tsif_device = kzalloc(sizeof(struct msm_tsif_device), GFP_KERNEL);
+ if (!tsif_device) {
+ dev_err(&pdev->dev, "Failed to allocate memory for device\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ /* cross links */
+ tsif_device->pdev = pdev;
+ platform_set_drvdata(pdev, tsif_device);
+ tsif_device->mode = 1;
+ tsif_device->pkts_per_chunk = TSIF_PKTS_IN_CHUNK_DEFAULT;
+ tsif_device->chunks_per_buf = TSIF_CHUNKS_IN_BUF_DEFAULT;
+ tasklet_init(&tsif_device->dma_refill, tsif_dma_refill,
+ (unsigned long)tsif_device);
+ if (tsif_get_clocks(tsif_device))
+ goto err_clocks;
+/* map I/O memory */
+ tsif_device->memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!tsif_device->memres) {
+ dev_err(&pdev->dev, "Missing MEM resource\n");
+ rc = -ENXIO;
+ goto err_rgn;
+ }
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Missing DMA resource\n");
+ rc = -ENXIO;
+ goto err_rgn;
+ }
+ tsif_device->dma = res->start;
+ tsif_device->crci = res->end;
+ tsif_device->base = ioremap(tsif_device->memres->start,
+ resource_size(tsif_device->memres));
+ if (!tsif_device->base) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ goto err_ioremap;
+ }
+ dev_info(&pdev->dev, "remapped phys 0x%08x => virt %p\n",
+ tsif_device->memres->start, tsif_device->base);
+ rc = tsif_start_gpios(tsif_device);
+ if (rc)
+ goto err_gpio;
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ tsif_debugfs_init(tsif_device);
+ rc = platform_get_irq(pdev, 0);
+ if (rc > 0) {
+ tsif_device->irq = rc;
+ rc = request_irq(tsif_device->irq, tsif_irq, IRQF_SHARED,
+ dev_name(&pdev->dev), tsif_device);
+ disable_irq(tsif_device->irq);
+ }
+ if (rc) {
+ dev_err(&pdev->dev, "failed to request IRQ %d : %d\n",
+ tsif_device->irq, rc);
+ goto err_irq;
+ }
+ rc = sysfs_create_group(&pdev->dev.kobj, &dev_attr_grp);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
+ goto err_attrs;
+ }
+ wake_lock_init(&tsif_device->wake_lock, WAKE_LOCK_SUSPEND,
+ dev_name(&pdev->dev));
+ dev_info(&pdev->dev, "Configured irq %d memory 0x%08x DMA %d CRCI %d\n",
+ tsif_device->irq, tsif_device->memres->start,
+ tsif_device->dma, tsif_device->crci);
+ list_add(&tsif_device->devlist, &tsif_devices);
+ return 0;
+/* error path */
+ sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
+err_attrs:
+ free_irq(tsif_device->irq, tsif_device);
+err_irq:
+ tsif_debugfs_exit(tsif_device);
+ tsif_stop_gpios(tsif_device);
+err_gpio:
+ iounmap(tsif_device->base);
+err_ioremap:
+err_rgn:
+ tsif_put_clocks(tsif_device);
+err_clocks:
+ kfree(tsif_device);
+out:
+ return rc;
+}
+
+static int __devexit msm_tsif_remove(struct platform_device *pdev)
+{
+ struct msm_tsif_device *tsif_device = platform_get_drvdata(pdev);
+ dev_info(&pdev->dev, "Unload\n");
+ list_del(&tsif_device->devlist);
+ wake_lock_destroy(&tsif_device->wake_lock);
+ sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
+ free_irq(tsif_device->irq, tsif_device);
+ tsif_debugfs_exit(tsif_device);
+ tsif_dma_exit(tsif_device);
+ tsif_stop_gpios(tsif_device);
+ iounmap(tsif_device->base);
+ tsif_put_clocks(tsif_device);
+
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ kfree(tsif_device);
+ return 0;
+}
+
+static int tsif_runtime_suspend(struct device *dev)
+{
+ dev_dbg(dev, "pm_runtime: suspending...\n");
+ return 0;
+}
+
+static int tsif_runtime_resume(struct device *dev)
+{
+ dev_dbg(dev, "pm_runtime: resuming...\n");
+ return 0;
+}
+
+static const struct dev_pm_ops tsif_dev_pm_ops = {
+ .runtime_suspend = tsif_runtime_suspend,
+ .runtime_resume = tsif_runtime_resume,
+};
+
+
+static struct platform_driver msm_tsif_driver = {
+ .probe = msm_tsif_probe,
+ .remove = __exit_p(msm_tsif_remove),
+ .driver = {
+ .name = "msm_tsif",
+ .pm = &tsif_dev_pm_ops,
+ },
+};
+
+static int __init mod_init(void)
+{
+ int rc = platform_driver_register(&msm_tsif_driver);
+ if (rc)
+ pr_err("TSIF: platform_driver_register failed: %d\n", rc);
+ return rc;
+}
+
+static void __exit mod_exit(void)
+{
+ platform_driver_unregister(&msm_tsif_driver);
+}
+/* ===module end=== */
+
+/* public API */
+
+void *tsif_attach(int id, void (*notify)(void *client_data), void *data)
+{
+ struct msm_tsif_device *tsif_device = tsif_find_by_id(id);
+ if (tsif_device->client_notify || tsif_device->client_data)
+ return ERR_PTR(-EBUSY);
+ tsif_device->client_notify = notify;
+ tsif_device->client_data = data;
+ /* prevent from unloading */
+ get_device(&tsif_device->pdev->dev);
+ return tsif_device;
+}
+EXPORT_SYMBOL(tsif_attach);
+
+void tsif_detach(void *cookie)
+{
+ struct msm_tsif_device *tsif_device = cookie;
+ tsif_device->client_notify = NULL;
+ tsif_device->client_data = NULL;
+ put_device(&tsif_device->pdev->dev);
+}
+EXPORT_SYMBOL(tsif_detach);
+
+void tsif_get_info(void *cookie, void **pdata, int *psize)
+{
+ struct msm_tsif_device *tsif_device = cookie;
+ if (pdata)
+ *pdata = tsif_device->data_buffer;
+ if (psize)
+ *psize = TSIF_PKTS_IN_BUF;
+}
+EXPORT_SYMBOL(tsif_get_info);
+
+int tsif_set_mode(void *cookie, int mode)
+{
+ struct msm_tsif_device *tsif_device = cookie;
+ if (tsif_device->state != tsif_state_stopped) {
+ dev_err(&tsif_device->pdev->dev,
+ "Can't change mode while device is active\n");
+ return -EBUSY;
+ }
+ switch (mode) {
+ case 1:
+ case 2:
+ case 3:
+ tsif_device->mode = mode;
+ break;
+ default:
+ dev_err(&tsif_device->pdev->dev, "Invalid mode: %d\n", mode);
+ return -EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(tsif_set_mode);
+
+int tsif_set_time_limit(void *cookie, u32 value)
+{
+ struct msm_tsif_device *tsif_device = cookie;
+ if (tsif_device->state != tsif_state_stopped) {
+ dev_err(&tsif_device->pdev->dev,
+ "Can't change time limit while device is active\n");
+ return -EBUSY;
+ }
+ if (value != (value & 0xFFFFFF)) {
+ dev_err(&tsif_device->pdev->dev,
+ "Invalid time limit (should be 24 bit): %#x\n", value);
+ return -EINVAL;
+ }
+ tsif_device->time_limit = value;
+ return 0;
+}
+EXPORT_SYMBOL(tsif_set_time_limit);
+
+int tsif_set_buf_config(void *cookie, u32 pkts_in_chunk, u32 chunks_in_buf)
+{
+ struct msm_tsif_device *tsif_device = cookie;
+ if (tsif_device->data_buffer) {
+ dev_err(&tsif_device->pdev->dev,
+ "Data buffer already allocated: %p\n",
+ tsif_device->data_buffer);
+ return -EBUSY;
+ }
+ /* check for crazy user */
+ if (pkts_in_chunk * chunks_in_buf > 10240) {
+ dev_err(&tsif_device->pdev->dev,
+ "Buffer requested is too large: %d * %d\n",
+ pkts_in_chunk,
+ chunks_in_buf);
+ return -EINVAL;
+ }
+ /* parameters are OK, execute */
+ tsif_device->pkts_per_chunk = pkts_in_chunk;
+ tsif_device->chunks_per_buf = chunks_in_buf;
+ return 0;
+}
+EXPORT_SYMBOL(tsif_set_buf_config);
+
+void tsif_get_state(void *cookie, int *ri, int *wi, enum tsif_state *state)
+{
+ struct msm_tsif_device *tsif_device = cookie;
+ if (ri)
+ *ri = tsif_device->ri;
+ if (wi)
+ *wi = tsif_device->wi;
+ if (state)
+ *state = tsif_device->state;
+}
+EXPORT_SYMBOL(tsif_get_state);
+
+int tsif_start(void *cookie)
+{
+ struct msm_tsif_device *tsif_device = cookie;
+ return action_open(tsif_device);
+}
+EXPORT_SYMBOL(tsif_start);
+
+void tsif_stop(void *cookie)
+{
+ struct msm_tsif_device *tsif_device = cookie;
+ action_close(tsif_device);
+}
+EXPORT_SYMBOL(tsif_stop);
+
+void tsif_reclaim_packets(void *cookie, int read_index)
+{
+ struct msm_tsif_device *tsif_device = cookie;
+ tsif_device->ri = read_index;
+}
+EXPORT_SYMBOL(tsif_reclaim_packets);
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_DESCRIPTION("TSIF (Transport Stream Interface)"
+ " Driver for the MSM chipset");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/misc/tsif_chrdev.c b/drivers/misc/tsif_chrdev.c
new file mode 100644
index 0000000..4068ac3
--- /dev/null
+++ b/drivers/misc/tsif_chrdev.c
@@ -0,0 +1,225 @@
+/**
+ * TSIF driver client
+ *
+ * Character device that, being read
+ * returns stream of TSIF packets.
+ *
+ * Copyright (c) 2009-2010, Code Aurora Forum. All rights
+ * reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h> /* Needed by all modules */
+#include <linux/kernel.h> /* Needed for KERN_INFO */
+#include <linux/cdev.h>
+#include <linux/err.h> /* IS_ERR etc. */
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/sched.h> /* TASK_INTERRUPTIBLE */
+
+#include <linux/uaccess.h> /* copy_to_user */
+
+#include <linux/tsif_api.h>
+
+struct tsif_chrdev {
+ struct cdev cdev;
+ struct device *dev;
+ wait_queue_head_t wq_read;
+ void *cookie;
+ /* mirror for tsif data */
+ void *data_buffer;
+ unsigned buf_size_packets; /**< buffer size in packets */
+ unsigned ri, wi;
+ enum tsif_state state;
+ unsigned rptr;
+};
+
+static ssize_t tsif_open(struct inode *inode, struct file *file)
+{
+ int rc;
+ struct tsif_chrdev *the_dev =
+ container_of(inode->i_cdev, struct tsif_chrdev, cdev);
+ if (!the_dev->cookie) /* not bound yet */
+ return -ENODEV;
+ file->private_data = the_dev;
+ rc = tsif_start(the_dev->cookie);
+ if (rc)
+ return rc;
+ tsif_get_info(the_dev->cookie, &the_dev->data_buffer,
+ &the_dev->buf_size_packets);
+ the_dev->rptr = 0;
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t tsif_release(struct inode *inode, struct file *filp)
+{
+ struct tsif_chrdev *the_dev = filp->private_data;
+ tsif_stop(the_dev->cookie);
+ return 0;
+}
+
+static ssize_t tsif_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ int avail = 0;
+ int wi;
+ struct tsif_chrdev *the_dev = filp->private_data;
+ tsif_get_state(the_dev->cookie, &the_dev->ri, &the_dev->wi,
+ &the_dev->state);
+ /* consistency check */
+ if (the_dev->ri != (the_dev->rptr / TSIF_PKT_SIZE)) {
+ dev_err(the_dev->dev,
+ "%s: inconsistent read pointers: ri %d rptr %d\n",
+ __func__, the_dev->ri, the_dev->rptr);
+ the_dev->rptr = the_dev->ri * TSIF_PKT_SIZE;
+ }
+ /* ri == wi if no data */
+ if (the_dev->ri == the_dev->wi) {
+ /* shall I block waiting for data? */
+ if (filp->f_flags & O_NONBLOCK) {
+ if (the_dev->state == tsif_state_running) {
+ return -EAGAIN;
+ } else {
+ /* not running -> EOF */
+ return 0;
+ }
+ }
+ if (wait_event_interruptible(the_dev->wq_read,
+ (the_dev->ri != the_dev->wi) ||
+ (the_dev->state != tsif_state_running))) {
+ /* got signal -> tell FS to handle it */
+ return -ERESTARTSYS;
+ }
+ if (the_dev->ri == the_dev->wi) {
+ /* still no data -> EOF */
+ return 0;
+ }
+ }
+ /* contiguous chunk last up to wi or end of buffer */
+ wi = (the_dev->wi > the_dev->ri) ?
+ the_dev->wi : the_dev->buf_size_packets;
+ avail = min(wi * TSIF_PKT_SIZE - the_dev->rptr, count);
+ if (copy_to_user(buf, the_dev->data_buffer + the_dev->rptr, avail))
+ return -EFAULT;
+ the_dev->rptr = (the_dev->rptr + avail) %
+ (TSIF_PKT_SIZE * the_dev->buf_size_packets);
+ the_dev->ri = the_dev->rptr / TSIF_PKT_SIZE;
+ *f_pos += avail;
+ tsif_reclaim_packets(the_dev->cookie, the_dev->ri);
+ return avail;
+}
+
+static void tsif_notify(void *data)
+{
+ struct tsif_chrdev *the_dev = data;
+ tsif_get_state(the_dev->cookie, &the_dev->ri, &the_dev->wi,
+ &the_dev->state);
+ wake_up_interruptible(&the_dev->wq_read);
+}
+
+static const struct file_operations tsif_fops = {
+ .owner = THIS_MODULE,
+ .read = tsif_read,
+ .open = tsif_open,
+ .release = tsif_release,
+};
+
+static struct class *tsif_class;
+static dev_t tsif_dev; /**< 1-st dev_t from allocated range */
+static dev_t tsif_dev0; /**< next not yet assigned dev_t */
+
+static int tsif_init_one(struct tsif_chrdev *the_dev, int index)
+{
+ int rc;
+ pr_info("%s[%d]\n", __func__, index);
+ cdev_init(&the_dev->cdev, &tsif_fops);
+ the_dev->cdev.owner = THIS_MODULE;
+ init_waitqueue_head(&the_dev->wq_read);
+ rc = cdev_add(&the_dev->cdev, tsif_dev0++, 1);
+ the_dev->dev = device_create(tsif_class, NULL, the_dev->cdev.dev,
+ the_dev, "tsif%d", index);
+ if (IS_ERR(the_dev->dev)) {
+ rc = PTR_ERR(the_dev->dev);
+ pr_err("device_create failed: %d\n", rc);
+ goto err_create;
+ }
+ the_dev->cookie = tsif_attach(index, tsif_notify, the_dev);
+ if (IS_ERR(the_dev->cookie)) {
+ rc = PTR_ERR(the_dev->cookie);
+ pr_err("tsif_attach failed: %d\n", rc);
+ goto err_attach;
+ }
+ /* now data buffer is not allocated yet */
+ tsif_get_info(the_dev->cookie, &the_dev->data_buffer, NULL);
+ dev_info(the_dev->dev,
+ "Device %d.%d attached to TSIF, buffer size %d\n",
+ MAJOR(the_dev->cdev.dev), MINOR(the_dev->cdev.dev),
+ the_dev->buf_size_packets);
+ return 0;
+err_attach:
+ device_destroy(tsif_class, the_dev->cdev.dev);
+err_create:
+ cdev_del(&the_dev->cdev);
+ return rc;
+}
+
+static void tsif_exit_one(struct tsif_chrdev *the_dev)
+{
+ dev_info(the_dev->dev, "%s\n", __func__);
+ tsif_detach(the_dev->cookie);
+ device_destroy(tsif_class, the_dev->cdev.dev);
+ cdev_del(&the_dev->cdev);
+}
+
+#define TSIF_NUM_DEVS 1 /**< support this many devices */
+
+struct tsif_chrdev the_devices[TSIF_NUM_DEVS];
+
+static int __init mod_init(void)
+{
+ int rc;
+ rc = alloc_chrdev_region(&tsif_dev, 0, TSIF_NUM_DEVS, "tsif");
+ if (rc) {
+ pr_err("alloc_chrdev_region failed: %d\n", rc);
+ goto err_devrgn;
+ }
+ tsif_dev0 = tsif_dev;
+ tsif_class = class_create(THIS_MODULE, "tsif");
+ if (IS_ERR(tsif_class)) {
+ rc = PTR_ERR(tsif_class);
+ pr_err("Error creating tsif class: %d\n", rc);
+ goto err_class;
+ }
+ rc = tsif_init_one(&the_devices[0], 0);
+ if (rc)
+ goto err_init1;
+ return 0;
+err_init1:
+ class_destroy(tsif_class);
+err_class:
+ unregister_chrdev_region(tsif_dev, TSIF_NUM_DEVS);
+err_devrgn:
+ return rc;
+}
+
+static void __exit mod_exit(void)
+{
+ tsif_exit_one(&the_devices[0]);
+ class_destroy(tsif_class);
+ unregister_chrdev_region(tsif_dev, TSIF_NUM_DEVS);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_DESCRIPTION("TSIF character device interface");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/misc/tzcom.c b/drivers/misc/tzcom.c
new file mode 100644
index 0000000..e947dee
--- /dev/null
+++ b/drivers/misc/tzcom.c
@@ -0,0 +1,910 @@
+/* Qualcomm TrustZone communicator driver
+ *
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define KMSG_COMPONENT "TZCOM"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/android_pmem.h>
+#include <linux/io.h>
+#include <mach/scm.h>
+#include <mach/peripheral-loader.h>
+#include <linux/tzcom.h>
+#include "tzcomi.h"
+
+#define TZCOM_DEV "tzcom"
+
+#define TZSCHEDULER_CMD_ID 1 /* CMD id of the trustzone scheduler */
+
+#undef PDEBUG
+#define PDEBUG(fmt, args...) pr_debug("%s(%i, %s): " fmt "\n", \
+ __func__, current->pid, current->comm, ## args)
+
+#undef PERR
+#define PERR(fmt, args...) pr_err("%s(%i, %s): " fmt "\n", \
+ __func__, current->pid, current->comm, ## args)
+
+
+static struct class *driver_class;
+static dev_t tzcom_device_no;
+static struct cdev tzcom_cdev;
+
+static u8 *sb_in_virt;
+static s32 sb_in_phys;
+static size_t sb_in_length = 20 * SZ_1K;
+static u8 *sb_out_virt;
+static s32 sb_out_phys;
+static size_t sb_out_length = 20 * SZ_1K;
+
+static void *pil;
+
+static atomic_t svc_instance_ctr = ATOMIC_INIT(0);
+static DEFINE_MUTEX(sb_in_lock);
+static DEFINE_MUTEX(sb_out_lock);
+static DEFINE_MUTEX(send_cmd_lock);
+
+struct tzcom_callback_list {
+ struct list_head list;
+ struct tzcom_callback callback;
+};
+
+struct tzcom_registered_svc_list {
+ struct list_head list;
+ struct tzcom_register_svc_op_req svc;
+ wait_queue_head_t next_cmd_wq;
+ int next_cmd_flag;
+};
+
+struct tzcom_data_t {
+ struct list_head callback_list_head;
+ struct mutex callback_list_lock;
+ struct list_head registered_svc_list_head;
+ spinlock_t registered_svc_list_lock;
+ wait_queue_head_t cont_cmd_wq;
+ int cont_cmd_flag;
+ u32 handled_cmd_svc_instance_id;
+};
+
+static int tzcom_scm_call(const void *cmd_buf, size_t cmd_len,
+ void *resp_buf, size_t resp_len)
+{
+ return scm_call(SCM_SVC_TZSCHEDULER, TZSCHEDULER_CMD_ID,
+ cmd_buf, cmd_len, resp_buf, resp_len);
+}
+
+static s32 tzcom_virt_to_phys(u8 *virt)
+{
+ if (virt >= sb_in_virt &&
+ virt < (sb_in_virt + sb_in_length)) {
+ return sb_in_phys + (virt - sb_in_virt);
+ } else if (virt >= sb_out_virt &&
+ virt < (sb_out_virt + sb_out_length)) {
+ return sb_out_phys + (virt - sb_out_virt);
+ } else {
+ return virt_to_phys(virt);
+ }
+}
+
+static u8 *tzcom_phys_to_virt(s32 phys)
+{
+ if (phys >= sb_in_phys &&
+ phys < (sb_in_phys + sb_in_length)) {
+ return sb_in_virt + (phys - sb_in_phys);
+ } else if (phys >= sb_out_phys &&
+ phys < (sb_out_phys + sb_out_length)) {
+ return sb_out_virt + (phys - sb_out_phys);
+ } else {
+ return phys_to_virt(phys);
+ }
+}
+
+static int __tzcom_is_svc_unique(struct tzcom_data_t *data,
+ struct tzcom_register_svc_op_req svc)
+{
+ struct tzcom_registered_svc_list *ptr;
+ int unique = 1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&data->registered_svc_list_lock, flags);
+ list_for_each_entry(ptr, &data->registered_svc_list_head, list) {
+ if (ptr->svc.svc_id == svc.svc_id) {
+ PERR("Service id: %u is already registered",
+ ptr->svc.svc_id);
+ unique = 0;
+ break;
+ } else if (svc.cmd_id_low >= ptr->svc.cmd_id_low &&
+ svc.cmd_id_low <= ptr->svc.cmd_id_high) {
+ PERR("Cmd id low falls in the range of another"
+ "registered service");
+ unique = 0;
+ break;
+ } else if (svc.cmd_id_high >= ptr->svc.cmd_id_low &&
+ svc.cmd_id_high <= ptr->svc.cmd_id_high) {
+ PERR("Cmd id high falls in the range of another"
+ "registered service");
+ unique = 0;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&data->registered_svc_list_lock, flags);
+ return unique;
+}
+
+static int tzcom_register_service(struct tzcom_data_t *data, void __user *argp)
+{
+ int ret;
+ unsigned long flags;
+ struct tzcom_register_svc_op_req rcvd_svc;
+ struct tzcom_registered_svc_list *new_entry;
+
+ ret = copy_from_user(&rcvd_svc, argp, sizeof(rcvd_svc));
+
+ if (ret) {
+ PDEBUG("copy_from_user failed");
+ return ret;
+ }
+
+ PDEBUG("svc_id: %u, cmd_id_low: %u, cmd_id_high: %u",
+ rcvd_svc.svc_id, rcvd_svc.cmd_id_low,
+ rcvd_svc.cmd_id_high);
+ if (!__tzcom_is_svc_unique(data, rcvd_svc)) {
+ PDEBUG("Provided service is not unique");
+ return -EINVAL;
+ }
+
+ rcvd_svc.instance_id = atomic_inc_return(&svc_instance_ctr);
+
+ ret = copy_to_user(argp, &rcvd_svc, sizeof(rcvd_svc));
+ if (ret) {
+ PDEBUG("copy_to_user failed");
+ return ret;
+ }
+
+ new_entry = kmalloc(sizeof(*new_entry), GFP_KERNEL);
+ if (!new_entry) {
+ pr_err("%s: kmalloc failed\n", __func__);
+ return -ENOMEM;
+ }
+ memcpy(&new_entry->svc, &rcvd_svc, sizeof(rcvd_svc));
+ new_entry->next_cmd_flag = 0;
+ init_waitqueue_head(&new_entry->next_cmd_wq);
+
+ spin_lock_irqsave(&data->registered_svc_list_lock, flags);
+ list_add_tail(&new_entry->list, &data->registered_svc_list_head);
+ spin_unlock_irqrestore(&data->registered_svc_list_lock, flags);
+
+
+ return ret;
+}
+
+static int tzcom_unregister_service(struct tzcom_data_t *data,
+ void __user *argp)
+{
+ int ret = 0;
+ unsigned long flags;
+ struct tzcom_unregister_svc_op_req req;
+ struct tzcom_registered_svc_list *ptr;
+ ret = copy_from_user(&req, argp, sizeof(req));
+ if (ret) {
+ PDEBUG("copy_from_user failed");
+ return ret;
+ }
+
+ spin_lock_irqsave(&data->registered_svc_list_lock, flags);
+ list_for_each_entry(ptr, &data->registered_svc_list_head, list) {
+ if (req.svc_id == ptr->svc.svc_id &&
+ req.instance_id == ptr->svc.instance_id) {
+ wake_up_all(&ptr->next_cmd_wq);
+ list_del(&ptr->list);
+ kfree(ptr);
+ spin_unlock_irqrestore(&data->registered_svc_list_lock,
+ flags);
+ return 0;
+ }
+ }
+ spin_unlock_irqrestore(&data->registered_svc_list_lock, flags);
+
+ return -EINVAL;
+}
+
+/**
+ * +---------+ +-----+ +-----------------+
+ * | TZCOM | | SCM | | TZCOM_SCHEDULER |
+ * +----+----+ +--+--+ +--------+--------+
+ * | | |
+ * | scm_call | |
+ * |------------------------------------->| |
+ * | cmd_buf = struct tzcom_command { | |
+ * | cmd_type, |------------------>|
+ * +------+------------- sb_in_cmd_addr, | |
+ * | | sb_in_cmd_len | |
+ * | | } | |
+ * | | resp_buf = struct tzcom_response { | |
+ * | cmd_status, | |
+ * | +---------- sb_in_rsp_addr, | |
+ * | | sb_in_rsp_len |<------------------|
+ * | | }
+ * | | struct tzcom_callback {---------+
+ * | | uint32_t cmd_id; |
+ * | | uint32_t sb_out_cb_data_len;|
+ * | +---------------+ uint32_t sb_out_cb_data_off;|
+ * | | } |
+ * | _________________________|_______________________________ |
+ * | +-----------------------+| +----------------------+ |
+ * +--->+ copy from req.cmd_buf |+>| copy to req.resp_buf | |
+ * +-----------------------+ +----------------------+ |
+ * _________________________________________________________ |
+ * INPUT SHARED BUFFER |
+ * +------------------------------------------------------------------------+
+ * | _________________________________________________________
+ * | +---------------------------------------------+
+ * +->| cmd_id | data_len | data_off | data... |
+ * +---------------------------------------------+
+ * |<------------>|copy to next_cmd.req_buf
+ * _________________________________________________________
+ * OUTPUT SHARED BUFFER
+ */
+static int tzcom_send_cmd(struct tzcom_data_t *data, void __user *argp)
+{
+ int ret = 0;
+ unsigned long flags;
+ u32 reqd_len_sb_in = 0;
+ u32 reqd_len_sb_out = 0;
+ struct tzcom_send_cmd_op_req req;
+ struct tzcom_command cmd;
+ struct tzcom_response resp;
+ struct tzcom_callback *next_callback;
+ void *cb_data = NULL;
+ struct tzcom_callback_list *new_entry;
+ struct tzcom_callback *cb;
+ size_t new_entry_len = 0;
+ struct tzcom_registered_svc_list *ptr_svc;
+
+ ret = copy_from_user(&req, argp, sizeof(req));
+ if (ret) {
+ PDEBUG("copy_from_user failed");
+ return ret;
+ }
+
+ if (req.cmd_buf == NULL || req.resp_buf == NULL) {
+ PDEBUG("cmd buffer or response buffer is null");
+ return -EINVAL;
+ }
+
+ if (req.cmd_len <= 0 || req.resp_len <= 0) {
+ PDEBUG("cmd buffer length or "
+ "response buffer length not valid");
+ return -EINVAL;
+ }
+ PDEBUG("received cmd_req.req: 0x%p",
+ req.cmd_buf);
+ PDEBUG("received cmd_req.rsp size: %u, ptr: 0x%p",
+ req.resp_len,
+ req.resp_buf);
+
+ reqd_len_sb_in = req.cmd_len + req.resp_len;
+ if (reqd_len_sb_in > sb_in_length) {
+ PDEBUG("Not enough memory to fit cmd_buf and "
+ "resp_buf. Required: %u, Available: %u",
+ reqd_len_sb_in, sb_in_length);
+ return -ENOMEM;
+ }
+
+ /* Copy req.cmd_buf to SB in and set req.resp_buf to SB in + cmd_len */
+ mutex_lock(&sb_in_lock);
+ PDEBUG("Before memcpy on sb_in");
+ memcpy(sb_in_virt, req.cmd_buf, req.cmd_len);
+ PDEBUG("After memcpy on sb_in");
+
+ /* cmd_type will always be a new here */
+ cmd.cmd_type = TZ_SCHED_CMD_NEW;
+ cmd.sb_in_cmd_addr = (u8 *) tzcom_virt_to_phys(sb_in_virt);
+ cmd.sb_in_cmd_len = req.cmd_len;
+
+ resp.cmd_status = TZ_SCHED_STATUS_INCOMPLETE;
+ resp.sb_in_rsp_addr = (u8 *) tzcom_virt_to_phys(sb_in_virt +
+ req.cmd_len);
+ resp.sb_in_rsp_len = req.resp_len;
+
+ PDEBUG("before call tzcom_scm_call, cmd_id = : %u", req.cmd_id);
+ PDEBUG("before call tzcom_scm_call, sizeof(cmd) = : %u", sizeof(cmd));
+
+ tzcom_scm_call((const void *) &cmd, sizeof(cmd), &resp, sizeof(resp));
+ mutex_unlock(&sb_in_lock);
+
+ while (resp.cmd_status != TZ_SCHED_STATUS_COMPLETE) {
+ /*
+ * If cmd is incomplete, get the callback cmd out from SB out
+ * and put it on the list
+ */
+ PDEBUG("cmd_status is incomplete.");
+ next_callback = (struct tzcom_callback *)sb_out_virt;
+
+ mutex_lock(&sb_out_lock);
+ reqd_len_sb_out = sizeof(*next_callback)
+ + next_callback->sb_out_cb_data_len;
+ if (reqd_len_sb_out > sb_out_length) {
+ PDEBUG("Not enough memory to"
+ " fit tzcom_callback buffer."
+ " Required: %u, Available: %u",
+ reqd_len_sb_out, sb_out_length);
+ mutex_unlock(&sb_out_lock);
+ return -ENOMEM;
+ }
+
+ /* Assumption is cb_data_off is sizeof(tzcom_callback) */
+ new_entry_len = sizeof(*new_entry)
+ + next_callback->sb_out_cb_data_len;
+ new_entry = kmalloc(new_entry_len, GFP_KERNEL);
+ if (!new_entry) {
+ PERR("kmalloc failed");
+ mutex_unlock(&sb_out_lock);
+ return -ENOMEM;
+ }
+
+ cb = &new_entry->callback;
+ cb->cmd_id = next_callback->cmd_id;
+ cb->sb_out_cb_data_len = next_callback->sb_out_cb_data_len;
+ cb->sb_out_cb_data_off = next_callback->sb_out_cb_data_off;
+
+ cb_data = (u8 *)next_callback
+ + next_callback->sb_out_cb_data_off;
+ memcpy((u8 *)cb + cb->sb_out_cb_data_off, cb_data,
+ next_callback->sb_out_cb_data_len);
+ mutex_unlock(&sb_out_lock);
+
+ mutex_lock(&data->callback_list_lock);
+ list_add_tail(&new_entry->list, &data->callback_list_head);
+ mutex_unlock(&data->callback_list_lock);
+
+ /*
+ * We don't know which service can handle the command. so we
+ * wake up all blocking services and let them figure out if
+ * they can handle the given command.
+ */
+ spin_lock_irqsave(&data->registered_svc_list_lock, flags);
+ list_for_each_entry(ptr_svc,
+ &data->registered_svc_list_head, list) {
+ ptr_svc->next_cmd_flag = 1;
+ wake_up_interruptible(&ptr_svc->next_cmd_wq);
+ }
+ spin_unlock_irqrestore(&data->registered_svc_list_lock,
+ flags);
+
+ PDEBUG("waking up next_cmd_wq and "
+ "waiting for cont_cmd_wq");
+ if (wait_event_interruptible(data->cont_cmd_wq,
+ data->cont_cmd_flag != 0)) {
+ PDEBUG("Interrupted: exiting send_cmd loop");
+ return -ERESTARTSYS;
+ }
+ data->cont_cmd_flag = 0;
+ cmd.cmd_type = TZ_SCHED_CMD_PENDING;
+ mutex_lock(&sb_in_lock);
+ tzcom_scm_call((const void *) &cmd, sizeof(cmd), &resp,
+ sizeof(resp));
+ mutex_unlock(&sb_in_lock);
+ }
+
+ mutex_lock(&sb_in_lock);
+ resp.sb_in_rsp_addr = sb_in_virt + cmd.sb_in_cmd_len;
+ resp.sb_in_rsp_len = req.resp_len;
+ mutex_unlock(&sb_in_lock);
+
+ /* Cmd is done now. Copy the response from SB in to user */
+ if (req.resp_len >= resp.sb_in_rsp_len) {
+ PDEBUG("Before memcpy resp_buf");
+ mutex_lock(&sb_in_lock);
+ memcpy(req.resp_buf, resp.sb_in_rsp_addr, resp.sb_in_rsp_len);
+ mutex_unlock(&sb_in_lock);
+ } else {
+ PDEBUG("Provided response buffer is smaller"
+ " than required. Required: %u,"
+ " Provided: %u",
+ resp.sb_in_rsp_len, req.resp_len);
+ ret = -ENOMEM;
+ }
+
+ PDEBUG("sending cmd_req.rsp "
+ "size: %u, ptr: 0x%p", req.resp_len,
+ req.resp_buf);
+ ret = copy_to_user(argp, &req, sizeof(req));
+ if (ret) {
+ PDEBUG("copy_to_user failed");
+ return ret;
+ }
+
+ return ret;
+}
+
+static struct tzcom_registered_svc_list *__tzcom_find_svc(
+ struct tzcom_data_t *data,
+ uint32_t instance_id)
+{
+ struct tzcom_registered_svc_list *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&data->registered_svc_list_lock, flags);
+ list_for_each_entry(entry,
+ &data->registered_svc_list_head, list) {
+ if (entry->svc.instance_id == instance_id)
+ break;
+ }
+ spin_unlock_irqrestore(&data->registered_svc_list_lock, flags);
+
+ return entry;
+}
+
+static int __tzcom_copy_cmd(struct tzcom_data_t *data,
+ struct tzcom_next_cmd_op_req *req,
+ struct tzcom_registered_svc_list *ptr_svc)
+{
+ int found = 0;
+ int ret = -EAGAIN;
+ struct tzcom_callback_list *entry;
+ struct tzcom_callback *cb;
+
+ PDEBUG("In here");
+ mutex_lock(&data->callback_list_lock);
+ PDEBUG("Before looping through cmd and svc lists.");
+ list_for_each_entry(entry, &data->callback_list_head, list) {
+ cb = &entry->callback;
+ if (req->svc_id == ptr_svc->svc.svc_id &&
+ req->instance_id == ptr_svc->svc.instance_id &&
+ cb->cmd_id >= ptr_svc->svc.cmd_id_low &&
+ cb->cmd_id <= ptr_svc->svc.cmd_id_high) {
+ PDEBUG("Found matching entry");
+ found = 1;
+ if (cb->sb_out_cb_data_len <= req->req_len) {
+ PDEBUG("copying cmd buffer %p to req "
+ "buffer %p, length: %u",
+ (u8 *)cb + cb->sb_out_cb_data_off,
+ req->req_buf, cb->sb_out_cb_data_len);
+ req->cmd_id = cb->cmd_id;
+ ret = copy_to_user(req->req_buf,
+ (u8 *)cb + cb->sb_out_cb_data_off,
+ cb->sb_out_cb_data_len);
+ if (ret) {
+ PDEBUG("copy_to_user failed");
+ break;
+ }
+ list_del(&entry->list);
+ kfree(entry);
+ ret = 0;
+ } else {
+ PDEBUG("callback data buffer is "
+ "larger than provided buffer."
+ "Required: %u, Provided: %u",
+ cb->sb_out_cb_data_len,
+ req->req_len);
+ ret = -ENOMEM;
+ }
+ break;
+ }
+ }
+ PDEBUG("After looping through cmd and svc lists.");
+ mutex_unlock(&data->callback_list_lock);
+ return ret;
+}
+
+static int tzcom_read_next_cmd(struct tzcom_data_t *data, void __user *argp)
+{
+ int ret = 0;
+ struct tzcom_next_cmd_op_req req;
+ struct tzcom_registered_svc_list *this_svc;
+
+ ret = copy_from_user(&req, argp, sizeof(req));
+ if (ret) {
+ PDEBUG("copy_from_user failed");
+ return ret;
+ }
+
+ if (req.instance_id > atomic_read(&svc_instance_ctr)) {
+ PDEBUG("Invalid instance_id for the request");
+ return -EINVAL;
+ }
+
+ if (!req.req_buf || req.req_len == 0) {
+ PDEBUG("Invalid request buffer or buffer length");
+ return -EINVAL;
+ }
+
+ PDEBUG("Before next_cmd loop");
+ this_svc = __tzcom_find_svc(data, req.instance_id);
+
+ while (1) {
+ PDEBUG("Before wait_event next_cmd.");
+ if (wait_event_interruptible(this_svc->next_cmd_wq,
+ this_svc->next_cmd_flag != 0)) {
+ PDEBUG("Interrupted: exiting wait_next_cmd loop");
+ /* woken up for different reason */
+ return -ERESTARTSYS;
+ }
+ PDEBUG("After wait_event next_cmd.");
+ this_svc->next_cmd_flag = 0;
+
+ ret = __tzcom_copy_cmd(data, &req, this_svc);
+ if (ret == 0) {
+ PDEBUG("Successfully found svc for cmd");
+ data->handled_cmd_svc_instance_id = req.instance_id;
+ break;
+ } else if (ret == -ENOMEM) {
+ PDEBUG("Not enough memory");
+ return ret;
+ }
+ }
+ ret = copy_to_user(argp, &req, sizeof(req));
+ if (ret) {
+ PDEBUG("copy_to_user failed");
+ return ret;
+ }
+ PDEBUG("copy_to_user is done.");
+ return ret;
+}
+
+static int tzcom_cont_cmd(struct tzcom_data_t *data, void __user *argp)
+{
+ int ret = 0;
+ struct tzcom_cont_cmd_op_req req;
+ ret = copy_from_user(&req, argp, sizeof(req));
+ if (ret) {
+ PDEBUG("copy_from_user failed");
+ return ret;
+ }
+
+ /*
+ * Only the svc instance that handled the cmd (in read_next_cmd method)
+ * can call continue cmd
+ */
+ if (data->handled_cmd_svc_instance_id != req.instance_id) {
+ PDEBUG("Only the service instance that handled the last "
+ "callback can continue cmd. "
+ "Expected: %u, Received: %u",
+ data->handled_cmd_svc_instance_id,
+ req.instance_id);
+ return -EINVAL;
+ }
+
+ if (req.resp_buf) {
+ mutex_lock(&sb_out_lock);
+ memcpy(sb_out_virt, req.resp_buf, req.resp_len);
+ mutex_unlock(&sb_out_lock);
+ }
+
+ data->cont_cmd_flag = 1;
+ wake_up_interruptible(&data->cont_cmd_wq);
+ return ret;
+}
+
+static long tzcom_ioctl(struct file *file, unsigned cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+ struct tzcom_data_t *tzcom_data = file->private_data;
+ void __user *argp = (void __user *) arg;
+ PDEBUG("enter tzcom_ioctl()");
+ switch (cmd) {
+ case TZCOM_IOCTL_REGISTER_SERVICE_REQ: {
+ PDEBUG("ioctl register_service_req()");
+ ret = tzcom_register_service(tzcom_data, argp);
+ if (ret)
+ PDEBUG("failed tzcom_register_service: %d", ret);
+ break;
+ }
+ case TZCOM_IOCTL_UNREGISTER_SERVICE_REQ: {
+ PDEBUG("ioctl unregister_service_req()");
+ ret = tzcom_unregister_service(tzcom_data, argp);
+ if (ret)
+ PDEBUG("failed tzcom_unregister_service: %d", ret);
+ break;
+ }
+ case TZCOM_IOCTL_SEND_CMD_REQ: {
+ PDEBUG("ioctl send_cmd_req()");
+ /* Only one client allowed here at a time */
+ mutex_lock(&send_cmd_lock);
+ ret = tzcom_send_cmd(tzcom_data, argp);
+ mutex_unlock(&send_cmd_lock);
+ if (ret)
+ PDEBUG("failed tzcom_send_cmd: %d", ret);
+ break;
+ }
+ case TZCOM_IOCTL_READ_NEXT_CMD_REQ: {
+ PDEBUG("ioctl read_next_cmd_req()");
+ ret = tzcom_read_next_cmd(tzcom_data, argp);
+ if (ret)
+ PDEBUG("failed tzcom_read_next: %d", ret);
+ break;
+ }
+ case TZCOM_IOCTL_CONTINUE_CMD_REQ: {
+ PDEBUG("ioctl continue_cmd_req()");
+ ret = tzcom_cont_cmd(tzcom_data, argp);
+ if (ret)
+ PDEBUG("failed tzcom_cont_cmd: %d", ret);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ return ret;
+}
+
+static int tzcom_open(struct inode *inode, struct file *file)
+{
+ long pil_error;
+ struct tz_pr_init_sb_req_s sb_out_init_req;
+ struct tz_pr_init_sb_rsp_s sb_out_init_rsp;
+ void *rsp_addr_virt;
+ struct tzcom_command cmd;
+ struct tzcom_response resp;
+ struct tzcom_data_t *tzcom_data;
+
+ PDEBUG("In here");
+ if (pil == NULL) {
+ pil = pil_get("playrdy");
+ if (IS_ERR(pil)) {
+ PERR("Playready PIL image load failed");
+ pil_error = PTR_ERR(pil);
+ pil = NULL;
+ return pil_error;
+ }
+ PDEBUG("playrdy image loaded successfully");
+ }
+
+ sb_out_init_req.pr_cmd = TZ_SCHED_CMD_ID_INIT_SB_OUT;
+ sb_out_init_req.sb_len = sb_out_length;
+ sb_out_init_req.sb_ptr = tzcom_virt_to_phys(sb_out_virt);
+ PDEBUG("sb_out_init_req { pr_cmd: %d, sb_len: %u, "
+ "sb_ptr (phys): 0x%x }",
+ sb_out_init_req.pr_cmd,
+ sb_out_init_req.sb_len,
+ sb_out_init_req.sb_ptr);
+
+ mutex_lock(&sb_in_lock);
+ PDEBUG("Before memcpy on sb_in");
+ memcpy(sb_in_virt, &sb_out_init_req, sizeof(sb_out_init_req));
+ PDEBUG("After memcpy on sb_in");
+
+ /* It will always be a new cmd from this method */
+ cmd.cmd_type = TZ_SCHED_CMD_NEW;
+ cmd.sb_in_cmd_addr = (u8 *) tzcom_virt_to_phys(sb_in_virt);
+ cmd.sb_in_cmd_len = sizeof(sb_out_init_req);
+ PDEBUG("tzcom_command { cmd_type: %u, sb_in_cmd_addr: %p, "
+ "sb_in_cmd_len: %u }",
+ cmd.cmd_type, cmd.sb_in_cmd_addr, cmd.sb_in_cmd_len);
+
+ resp.cmd_status = 0;
+ resp.sb_in_rsp_addr = (u8 *)cmd.sb_in_cmd_addr + cmd.sb_in_cmd_len;
+ resp.sb_in_rsp_len = sizeof(sb_out_init_rsp);
+ PDEBUG("tzcom_response before scm { cmd_status: %u, "
+ "sb_in_rsp_addr: %p, sb_in_rsp_len: %u }",
+ resp.cmd_status, resp.sb_in_rsp_addr,
+ resp.sb_in_rsp_len);
+
+ PDEBUG("Before scm_call for sb_init");
+ tzcom_scm_call(&cmd, sizeof(cmd), &resp, sizeof(resp));
+ PDEBUG("After scm_call for sb_init");
+ PDEBUG("tzcom_response after scm { cmd_status: %u, "
+ "sb_in_rsp_addr: %p, sb_in_rsp_len: %u }",
+ resp.cmd_status, resp.sb_in_rsp_addr,
+ resp.sb_in_rsp_len);
+
+ if (resp.sb_in_rsp_addr) {
+ rsp_addr_virt = tzcom_phys_to_virt((unsigned long)
+ resp.sb_in_rsp_addr);
+ PDEBUG("Received response phys: %p, virt: %p",
+ resp.sb_in_rsp_addr,
+ rsp_addr_virt);
+ memcpy(&sb_out_init_rsp, rsp_addr_virt, resp.sb_in_rsp_len);
+ } else {
+ PERR("Error with SB initialization");
+ mutex_unlock(&sb_in_lock);
+ return -EPERM;
+ }
+ mutex_unlock(&sb_in_lock);
+
+ PDEBUG("sb_out_init_rsp { pr_cmd: %d, ret: %d }",
+ sb_out_init_rsp.pr_cmd, sb_out_init_rsp.ret);
+
+ if (sb_out_init_rsp.ret) {
+ PERR("sb_out_init_req failed: %d", sb_out_init_rsp.ret);
+ return -EPERM;
+ }
+
+ tzcom_data = kmalloc(sizeof(*tzcom_data), GFP_KERNEL);
+ if (!tzcom_data) {
+ PERR("kmalloc failed");
+ return -ENOMEM;
+ }
+ file->private_data = tzcom_data;
+
+ INIT_LIST_HEAD(&tzcom_data->callback_list_head);
+ mutex_init(&tzcom_data->callback_list_lock);
+
+ INIT_LIST_HEAD(&tzcom_data->registered_svc_list_head);
+ spin_lock_init(&tzcom_data->registered_svc_list_lock);
+
+ init_waitqueue_head(&tzcom_data->cont_cmd_wq);
+ tzcom_data->cont_cmd_flag = 0;
+ tzcom_data->handled_cmd_svc_instance_id = 0;
+ return 0;
+}
+
+static int tzcom_release(struct inode *inode, struct file *file)
+{
+ struct tzcom_data_t *tzcom_data = file->private_data;
+ struct tzcom_callback_list *lcb, *ncb;
+ struct tzcom_registered_svc_list *lsvc, *nsvc;
+ PDEBUG("In here");
+
+ wake_up_all(&tzcom_data->cont_cmd_wq);
+
+ list_for_each_entry_safe(lcb, ncb,
+ &tzcom_data->callback_list_head, list) {
+ list_del(&lcb->list);
+ kfree(lcb);
+ }
+
+ list_for_each_entry_safe(lsvc, nsvc,
+ &tzcom_data->registered_svc_list_head, list) {
+ wake_up_all(&lsvc->next_cmd_wq);
+ list_del(&lsvc->list);
+ kfree(lsvc);
+ }
+
+ kfree(tzcom_data);
+ return 0;
+}
+
+static const struct file_operations tzcom_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = tzcom_ioctl,
+ .open = tzcom_open,
+ .release = tzcom_release
+};
+
+static int __init tzcom_init(void)
+{
+ int rc;
+ struct device *class_dev;
+
+ PDEBUG("Hello tzcom");
+
+ rc = alloc_chrdev_region(&tzcom_device_no, 0, 1, TZCOM_DEV);
+ if (rc < 0) {
+ PERR("alloc_chrdev_region failed %d", rc);
+ return rc;
+ }
+
+ driver_class = class_create(THIS_MODULE, TZCOM_DEV);
+ if (IS_ERR(driver_class)) {
+ rc = -ENOMEM;
+ PERR("class_create failed %d", rc);
+ goto unregister_chrdev_region;
+ }
+
+ class_dev = device_create(driver_class, NULL, tzcom_device_no, NULL,
+ TZCOM_DEV);
+ if (!class_dev) {
+ PERR("class_device_create failed %d", rc);
+ rc = -ENOMEM;
+ goto class_destroy;
+ }
+
+ cdev_init(&tzcom_cdev, &tzcom_fops);
+ tzcom_cdev.owner = THIS_MODULE;
+
+ rc = cdev_add(&tzcom_cdev, MKDEV(MAJOR(tzcom_device_no), 0), 1);
+ if (rc < 0) {
+ PERR("cdev_add failed %d", rc);
+ goto class_device_destroy;
+ }
+
+ sb_in_phys = pmem_kalloc(sb_in_length, PMEM_MEMTYPE_EBI1 |
+ PMEM_ALIGNMENT_4K);
+ if (IS_ERR((void *)sb_in_phys)) {
+ PERR("could not allocte in kernel pmem buffers for sb_in");
+ rc = -ENOMEM;
+ goto class_device_destroy;
+ }
+ PDEBUG("physical_addr for sb_in: 0x%x", sb_in_phys);
+
+ sb_in_virt = (u8 *) ioremap((unsigned long)sb_in_phys,
+ sb_in_length);
+ if (!sb_in_virt) {
+ PERR("Shared buffer IN allocation failed.");
+ rc = -ENOMEM;
+ goto class_device_destroy;
+ }
+ PDEBUG("sb_in virt address: %p, phys address: 0x%x",
+ sb_in_virt, tzcom_virt_to_phys(sb_in_virt));
+
+ sb_out_phys = pmem_kalloc(sb_out_length, PMEM_MEMTYPE_EBI1 |
+ PMEM_ALIGNMENT_4K);
+ if (IS_ERR((void *)sb_out_phys)) {
+ PERR("could not allocte in kernel pmem buffers for sb_out");
+ rc = -ENOMEM;
+ goto class_device_destroy;
+ }
+ PDEBUG("physical_addr for sb_out: 0x%x", sb_out_phys);
+
+ sb_out_virt = (u8 *) ioremap((unsigned long)sb_out_phys,
+ sb_out_length);
+ if (!sb_out_virt) {
+ PERR("Shared buffer OUT allocation failed.");
+ rc = -ENOMEM;
+ goto class_device_destroy;
+ }
+ PDEBUG("sb_out virt address: %p, phys address: 0x%x",
+ sb_out_virt, tzcom_virt_to_phys(sb_out_virt));
+
+ /* Initialized in tzcom_open */
+ pil = NULL;
+
+ return 0;
+
+class_device_destroy:
+ if (sb_in_virt)
+ iounmap(sb_in_virt);
+ if (sb_in_phys)
+ pmem_kfree(sb_in_phys);
+ if (sb_out_virt)
+ iounmap(sb_out_virt);
+ if (sb_out_phys)
+ pmem_kfree(sb_out_phys);
+ device_destroy(driver_class, tzcom_device_no);
+class_destroy:
+ class_destroy(driver_class);
+unregister_chrdev_region:
+ unregister_chrdev_region(tzcom_device_no, 1);
+ return rc;
+}
+
+static void __exit tzcom_exit(void)
+{
+ PDEBUG("Goodbye tzcom");
+ if (sb_in_virt)
+ iounmap(sb_in_virt);
+ if (sb_in_phys)
+ pmem_kfree(sb_in_phys);
+ if (sb_out_virt)
+ iounmap(sb_out_virt);
+ if (sb_out_phys)
+ pmem_kfree(sb_out_phys);
+ if (pil != NULL) {
+ pil_put("playrdy");
+ pil = NULL;
+ }
+ device_destroy(driver_class, tzcom_device_no);
+ class_destroy(driver_class);
+ unregister_chrdev_region(tzcom_device_no, 1);
+}
+
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Sachin Shah <sachins@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm TrustZone Communicator");
+MODULE_VERSION("1.00");
+
+module_init(tzcom_init);
+module_exit(tzcom_exit);
diff --git a/drivers/misc/tzcomi.h b/drivers/misc/tzcomi.h
new file mode 100644
index 0000000..33634cf
--- /dev/null
+++ b/drivers/misc/tzcomi.h
@@ -0,0 +1,112 @@
+/* Qualcomm TrustZone communicator driver
+ *
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __TZCOMI_H_
+#define __TZCOMI_H_
+
+#include <linux/types.h>
+
+enum tz_sched_cmd_id {
+ TZ_SCHED_CMD_ID_INVALID = 0,
+ TZ_SCHED_CMD_ID_INIT_SB_OUT, /**< Initialize the shared buffer */
+ TZ_SCHED_CMD_ID_INIT_SB_LOG, /**< Initialize the logging shared buf */
+ TZ_SCHED_CMD_ID_UNKNOWN = 0x7FFFFFFE,
+ TZ_SCHED_CMD_ID_MAX = 0x7FFFFFFF
+};
+
+enum tz_sched_cmd_type {
+ TZ_SCHED_CMD_INVALID = 0,
+ TZ_SCHED_CMD_NEW, /** New TZ Scheduler Command */
+ TZ_SCHED_CMD_PENDING, /** Pending cmd...sched will restore stack */
+ TZ_SCHED_CMD_COMPLETE, /** TZ sched command is complete */
+ TZ_SCHED_CMD_MAX = 0x7FFFFFFF
+};
+
+enum tz_sched_cmd_status {
+ TZ_SCHED_STATUS_INCOMPLETE = 0,
+ TZ_SCHED_STATUS_COMPLETE,
+ TZ_SCHED_STATUS_MAX = 0x7FFFFFFF
+};
+
+/** Command structure for initializing shared buffers (SB_OUT
+ and SB_LOG)
+*/
+__packed struct tz_pr_init_sb_req_s {
+ /** First 4 bytes should always be command id
+ * from enum tz_sched_cmd_id */
+ uint32_t pr_cmd;
+ /** Pointer to the physical location of sb_out buffer */
+ uint32_t sb_ptr;
+ /** length of shared buffer */
+ uint32_t sb_len;
+};
+
+
+__packed struct tz_pr_init_sb_rsp_s {
+ /** First 4 bytes should always be command id
+ * from enum tz_sched_cmd_id */
+ uint32_t pr_cmd;
+ /** Return code, 0 for success, Approp error code otherwise */
+ int32_t ret;
+};
+
+
+/**
+ * struct tzcom_command - tzcom command buffer
+ * @cmd_type: value from enum tz_sched_cmd_type
+ * @sb_in_cmd_addr: points to physical location of command
+ * buffer
+ * @sb_in_cmd_len: length of command buffer
+ */
+__packed struct tzcom_command {
+ uint32_t cmd_type;
+ uint8_t *sb_in_cmd_addr;
+ uint32_t sb_in_cmd_len;
+};
+
+/**
+ * struct tzcom_response - tzcom response buffer
+ * @cmd_status: value from enum tz_sched_cmd_status
+ * @sb_in_rsp_addr: points to physical location of response
+ * buffer
+ * @sb_in_rsp_len: length of command response
+ */
+__packed struct tzcom_response {
+ uint32_t cmd_status;
+ uint8_t *sb_in_rsp_addr;
+ uint32_t sb_in_rsp_len;
+};
+
+/**
+ * struct tzcom_callback - tzcom callback buffer
+ * @cmd_id: command to run in registered service
+ * @sb_out_rsp_addr: points to physical location of response
+ * buffer
+ * @sb_in_cmd_len: length of command response
+ *
+ * A callback buffer would be laid out in sb_out as follows:
+ *
+ * --------------------- <--- struct tzcom_callback
+ * | callback header |
+ * --------------------- <--- tzcom_callback.sb_out_cb_data_off
+ * | callback data |
+ * ---------------------
+ */
+__packed struct tzcom_callback {
+ uint32_t cmd_id;
+ uint32_t sb_out_cb_data_len;
+ uint32_t sb_out_cb_data_off;
+};
+
+#endif /* __TZCOMI_H_ */