Merge changes I66b4da08,Iba26640f into msm-3.0
* changes:
msm: camera: add reset csid and csiphy
msm: camera: Add csi_lane_mask and csi_lane_assign
diff --git a/arch/arm/mach-msm/board-8064-pmic.c b/arch/arm/mach-msm/board-8064-pmic.c
index 99b0a8f..75160ef 100644
--- a/arch/arm/mach-msm/board-8064-pmic.c
+++ b/arch/arm/mach-msm/board-8064-pmic.c
@@ -125,6 +125,7 @@
PM8921_GPIO_INPUT(38, PM_GPIO_PULL_UP_1P5),
/* TABLA CODEC RESET */
PM8921_GPIO_OUTPUT(34, 1, MED),
+ PM8921_GPIO_INPUT(17, PM_GPIO_PULL_UP_1P5), /* SD_WP */
};
static struct pm8xxx_gpio_init pm8921_mtp_kp_gpios[] __initdata = {
diff --git a/arch/arm/mach-msm/board-8064-storage.c b/arch/arm/mach-msm/board-8064-storage.c
index cdafdfc..b8cae49 100644
--- a/arch/arm/mach-msm/board-8064-storage.c
+++ b/arch/arm/mach-msm/board-8064-storage.c
@@ -233,6 +233,8 @@
.pclk_src_dfab = 1,
.pin_data = &mmc_slot_pin_data[SDCC3],
.vreg_data = &mmc_slot_vreg_data[SDCC3],
+ .wpswitch_gpio = PM8921_GPIO_PM_TO_SYS(17),
+ .wpswitch_polarity = 1,
.status_gpio = 26,
.status_irq = MSM_GPIO_TO_INT(26),
.irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
diff --git a/arch/arm/mach-msm/board-8930.c b/arch/arm/mach-msm/board-8930.c
index 46c60ff..6a2a6dc 100644
--- a/arch/arm/mach-msm/board-8930.c
+++ b/arch/arm/mach-msm/board-8930.c
@@ -1796,7 +1796,7 @@
&msm_etb_device,
&msm_tpiu_device,
&msm_funnel_device,
- &msm_ptm_device,
+ &msm_etm_device,
#endif
&msm_device_dspcrashd_8960,
&msm8960_device_watchdog,
diff --git a/arch/arm/mach-msm/board-8960-pmic.c b/arch/arm/mach-msm/board-8960-pmic.c
index 4e18f89..3b84875 100644
--- a/arch/arm/mach-msm/board-8960-pmic.c
+++ b/arch/arm/mach-msm/board-8960-pmic.c
@@ -432,6 +432,7 @@
.warm_bat_voltage = 4100,
.thermal_mitigation = pm8921_therm_mitigation,
.thermal_levels = ARRAY_SIZE(pm8921_therm_mitigation),
+ .rconn_mohm = 18,
};
static struct pm8xxx_misc_platform_data pm8xxx_misc_pdata = {
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index de42371..e2ba303 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -2183,7 +2183,7 @@
&msm_etb_device,
&msm_tpiu_device,
&msm_funnel_device,
- &msm_ptm_device,
+ &msm_etm_device,
#endif
&msm_device_dspcrashd_8960,
&msm8960_device_watchdog,
diff --git a/arch/arm/mach-msm/board-msm7x27a.c b/arch/arm/mach-msm/board-msm7x27a.c
index ec2be96..c91f2b6 100644
--- a/arch/arm/mach-msm/board-msm7x27a.c
+++ b/arch/arm/mach-msm/board-msm7x27a.c
@@ -587,7 +587,8 @@
/* Concurrency 6 */
(DEC0_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)),
- 0, 0, 0, 0,
+ (DEC1_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)),
+ 0, 0, 0,
/* Concurrency 7 */
(DEC0_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)),
diff --git a/arch/arm/mach-msm/clock-8960.c b/arch/arm/mach-msm/clock-8960.c
index 98a32c6..69b92af 100644
--- a/arch/arm/mach-msm/clock-8960.c
+++ b/arch/arm/mach-msm/clock-8960.c
@@ -4445,6 +4445,7 @@
static DEFINE_CLK_VOTER(dfab_bam_dmux_clk, &dfab_clk.c);
static DEFINE_CLK_VOTER(dfab_scm_clk, &dfab_clk.c);
static DEFINE_CLK_VOTER(dfab_qseecom_clk, &dfab_clk.c);
+static DEFINE_CLK_VOTER(dfab_tzcom_clk, &dfab_clk.c);
static DEFINE_CLK_VOTER(ebi1_msmbus_clk, &ebi1_clk.c);
static DEFINE_CLK_VOTER(ebi1_adm_clk, &ebi1_clk.c);
@@ -5103,6 +5104,7 @@
CLK_LOOKUP("bus_clk", dfab_bam_dmux_clk.c, "BAM_RMNT"),
CLK_LOOKUP("bus_clk", dfab_scm_clk.c, "scm"),
CLK_LOOKUP("bus_clk", dfab_qseecom_clk.c, "qseecom"),
+ CLK_LOOKUP("bus_clk", dfab_tzcom_clk.c, "tzcom"),
CLK_LOOKUP("alt_core_clk", usb_hsic_xcvr_fs_clk.c, "msm_hsic_host"),
CLK_LOOKUP("phy_clk", usb_hsic_hsic_clk.c, "msm_hsic_host"),
@@ -5411,6 +5413,7 @@
CLK_LOOKUP("bus_clk", dfab_bam_dmux_clk.c, "BAM_RMNT"),
CLK_LOOKUP("bus_clk", dfab_scm_clk.c, "scm"),
CLK_LOOKUP("bus_clk", dfab_qseecom_clk.c, "qseecom"),
+ CLK_LOOKUP("bus_clk", dfab_tzcom_clk.c, "tzcom"),
CLK_LOOKUP("mem_clk", ebi1_adm_clk.c, "msm_dmov"),
diff --git a/arch/arm/mach-msm/devices-8960.c b/arch/arm/mach-msm/devices-8960.c
index 4c02215..dbf26d9 100644
--- a/arch/arm/mach-msm/devices-8960.c
+++ b/arch/arm/mach-msm/devices-8960.c
@@ -3127,7 +3127,7 @@
#define MSM_ETB_PHYS_BASE (MSM_QDSS_PHYS_BASE + 0x1000)
#define MSM_TPIU_PHYS_BASE (MSM_QDSS_PHYS_BASE + 0x3000)
#define MSM_FUNNEL_PHYS_BASE (MSM_QDSS_PHYS_BASE + 0x4000)
-#define MSM_PTM_PHYS_BASE (MSM_QDSS_PHYS_BASE + 0x1C000)
+#define MSM_ETM_PHYS_BASE (MSM_QDSS_PHYS_BASE + 0x1C000)
static struct resource msm_etb_resources[] = {
{
@@ -3174,19 +3174,19 @@
.resource = msm_funnel_resources,
};
-static struct resource msm_ptm_resources[] = {
+static struct resource msm_etm_resources[] = {
{
- .start = MSM_PTM_PHYS_BASE,
- .end = MSM_PTM_PHYS_BASE + (SZ_4K * 2) - 1,
+ .start = MSM_ETM_PHYS_BASE,
+ .end = MSM_ETM_PHYS_BASE + (SZ_4K * 2) - 1,
.flags = IORESOURCE_MEM,
},
};
-struct platform_device msm_ptm_device = {
- .name = "msm_ptm",
+struct platform_device msm_etm_device = {
+ .name = "msm_etm",
.id = 0,
- .num_resources = ARRAY_SIZE(msm_ptm_resources),
- .resource = msm_ptm_resources,
+ .num_resources = ARRAY_SIZE(msm_etm_resources),
+ .resource = msm_etm_resources,
};
#endif
diff --git a/arch/arm/mach-msm/devices-msm7x27a.c b/arch/arm/mach-msm/devices-msm7x27a.c
index beb0c55..b879d8b 100644
--- a/arch/arm/mach-msm/devices-msm7x27a.c
+++ b/arch/arm/mach-msm/devices-msm7x27a.c
@@ -645,16 +645,18 @@
/* Command sequence for simple WFI */
static uint8_t spm_wfi_cmd_sequence[] __initdata = {
- 0x00, 0x40, 0x40, 0x03,
- 0x00, 0x40, 0x40, 0x0f,
+ 0x04, 0x03, 0x04, 0x0f,
};
/* Command sequence for GDFS, this won't send any interrupt to the modem */
static uint8_t spm_pc_without_modem[] __initdata = {
0x20, 0x00, 0x30, 0x10,
- 0x40, 0x40, 0x03, 0x10,
- 0x00, 0x30, 0x2E, 0x40,
- 0x40, 0x0f,
+ 0x03, 0x1e, 0x0e, 0x3e,
+ 0x4e, 0x4e, 0x4e, 0x4e,
+ 0x4e, 0x4e, 0x4e, 0x4e,
+ 0x4e, 0x4e, 0x4e, 0x4e,
+ 0x4e, 0x4e, 0x4e, 0x4e,
+ 0x2E, 0x0f,
};
static struct msm_spm_seq_entry msm_spm_seq_list[] __initdata = {
diff --git a/arch/arm/mach-msm/devices.h b/arch/arm/mach-msm/devices.h
index ab55a25..0a14db0 100644
--- a/arch/arm/mach-msm/devices.h
+++ b/arch/arm/mach-msm/devices.h
@@ -316,7 +316,7 @@
extern struct platform_device msm_etb_device;
extern struct platform_device msm_tpiu_device;
extern struct platform_device msm_funnel_device;
-extern struct platform_device msm_ptm_device;
+extern struct platform_device msm_etm_device;
#endif
extern struct platform_device msm_bus_8064_apps_fabric;
diff --git a/arch/arm/mach-msm/qdsp5/Makefile b/arch/arm/mach-msm/qdsp5/Makefile
index a4a43ed..2ce0031 100644
--- a/arch/arm/mach-msm/qdsp5/Makefile
+++ b/arch/arm/mach-msm/qdsp5/Makefile
@@ -17,3 +17,4 @@
obj-y += snd.o snd_adie.o
obj-$(CONFIG_ARCH_MSM7X27A) += audio_fm.o
obj-$(CONFIG_ARCH_MSM7X27A) += audio_mvs.o
+obj-$(CONFIG_ARCH_MSM7X27A) += audio_lpa.o
diff --git a/arch/arm/mach-msm/qdsp5/audio_lpa.c b/arch/arm/mach-msm/qdsp5/audio_lpa.c
new file mode 100644
index 0000000..dab53dc
--- /dev/null
+++ b/arch/arm/mach-msm/qdsp5/audio_lpa.c
@@ -0,0 +1,1485 @@
+
+/* audio_lpa.c - low power audio driver
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * Based on the PCM decoder driver in arch/arm/mach-msm/qdsp5/audio_pcm.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (C) 2008 HTC Corporation
+ *
+ * All source code in this file is licensed under the following license except
+ * where indicated.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can find it at http://www.fsf.org
+ */
+
+#include <asm/atomic.h>
+#include <asm/ioctls.h>
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/earlysuspend.h>
+#include <linux/list.h>
+#include <linux/android_pmem.h>
+#include <linux/slab.h>
+#include <linux/msm_audio.h>
+
+
+#include <mach/msm_adsp.h>
+#include <mach/iommu.h>
+#include <mach/iommu_domains.h>
+#include <mach/msm_subsystem_map.h>
+#include <mach/qdsp5/qdsp5audppcmdi.h>
+#include <mach/qdsp5/qdsp5audppmsg.h>
+#include <mach/qdsp5/qdsp5audplaycmdi.h>
+#include <mach/qdsp5/qdsp5audplaymsg.h>
+#include <mach/qdsp5/qdsp5rmtcmdi.h>
+#include <mach/debug_mm.h>
+#include <linux/memory_alloc.h>
+#include <mach/msm_memtypes.h>
+
+#include "audmgr.h"
+
+/* for queue ids - should be relative to module number*/
+#include "adsp.h"
+
+#define ADRV_STATUS_AIO_INTF 0x00000001
+#define ADRV_STATUS_OBUF_GIVEN 0x00000002
+#define ADRV_STATUS_IBUF_GIVEN 0x00000004
+#define ADRV_STATUS_FSYNC 0x00000008
+
+#define MSM_MAX_VOLUME 0x2000
+/* 17 added to avoid more deviation */
+#define MSM_VOLUME_STEP (MSM_MAX_VOLUME+17)
+#define MSM_VOLUME_FACTOR (10000)
+
+/* Size must be power of 2 */
+#define MAX_BUF 2
+#define BUFSZ (524288)
+
+#define AUDDEC_DEC_PCM 0
+
+/* Decoder status received from AUDPPTASK */
+#define AUDPP_DEC_STATUS_SLEEP 0
+#define AUDPP_DEC_STATUS_INIT 1
+#define AUDPP_DEC_STATUS_CFG 2
+#define AUDPP_DEC_STATUS_PLAY 3
+
+#define AUDPCM_EVENT_NUM 10 /* Default number of pre-allocated event packets */
+
+#define __CONTAINS(r, v, l) ({ \
+ typeof(r) __r = r; \
+ typeof(v) __v = v; \
+ typeof(v) __e = __v + l; \
+ int res = ((__v >= __r->vaddr) && \
+ (__e <= __r->vaddr + __r->len)); \
+ res; \
+})
+
+#define CONTAINS(r1, r2) ({ \
+ typeof(r2) __r2 = r2; \
+ __CONTAINS(r1, __r2->vaddr, __r2->len); \
+})
+
+#define IN_RANGE(r, v) ({ \
+ typeof(r) __r = r; \
+ typeof(v) __vv = v; \
+ int res = ((__vv >= __r->vaddr) && \
+ (__vv < (__r->vaddr + __r->len))); \
+ res; \
+})
+
+#define OVERLAPS(r1, r2) ({ \
+ typeof(r1) __r1 = r1; \
+ typeof(r2) __r2 = r2; \
+ typeof(__r2->vaddr) __v = __r2->vaddr; \
+ typeof(__v) __e = __v + __r2->len - 1; \
+ int res = (IN_RANGE(__r1, __v) || IN_RANGE(__r1, __e)); \
+ res; \
+})
+
+struct audio;
+
+struct buffer {
+ void *data;
+ unsigned size;
+ unsigned used; /* Input usage actual DSP produced PCM size */
+ unsigned addr;
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+struct audpcm_suspend_ctl {
+struct early_suspend node;
+struct audio *audio;
+};
+#endif
+
+struct audpcm_event {
+ struct list_head list;
+ int event_type;
+ union msm_audio_event_payload payload;
+};
+
+struct audlpa_pmem_region {
+ struct list_head list;
+ struct file *file;
+ int fd;
+ void *vaddr;
+ unsigned long paddr;
+ unsigned long kvaddr;
+ unsigned long len;
+ unsigned ref_cnt;
+};
+
+struct audpcm_buffer_node {
+ struct list_head list;
+ struct msm_audio_aio_buf buf;
+ unsigned long paddr;
+};
+
+struct audio {
+ struct buffer out[2];
+
+ spinlock_t dsp_lock;
+
+ uint8_t out_head;
+ uint8_t out_tail;
+ uint8_t out_needed; /* number of buffers the dsp is waiting for */
+ struct list_head out_queue; /* queue to retain output buffers */
+ atomic_t out_bytes;
+
+ struct mutex lock;
+ struct mutex write_lock;
+ wait_queue_head_t write_wait;
+
+ struct msm_adsp_module *audplay;
+
+ /* configuration to use on next enable */
+ uint32_t out_sample_rate;
+ uint32_t out_channel_mode;
+ uint32_t out_bits; /* bits per sample */
+
+ struct audmgr audmgr;
+
+ /* data allocated for various buffers */
+ char *data;
+ int32_t phys;
+ struct msm_mapped_buffer *map_v_write;
+
+ uint32_t drv_status;
+ int wflush; /* Write flush */
+ int opened;
+ int enabled;
+ int running;
+ int stopped; /* set when stopped, cleared on flush */
+ int teos; /* valid only if tunnel mode & no data left for decoder */
+ int rmt_resource_released;
+ enum msm_aud_decoder_state dec_state; /* Represents decoder state */
+ int reserved; /* A byte is being reserved */
+ char rsv_byte; /* Handle odd length user data */
+
+ const char *module_name;
+ unsigned queue_id;
+
+ unsigned long volume;
+
+ uint16_t dec_id;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct audpcm_suspend_ctl suspend_ctl;
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dentry;
+#endif
+ wait_queue_head_t wait;
+ struct list_head free_event_queue;
+ struct list_head event_queue;
+ wait_queue_head_t event_wait;
+ spinlock_t event_queue_lock;
+ struct mutex get_event_lock;
+ int event_abort;
+
+ struct list_head pmem_region_queue;
+ int buffer_count;
+ int buffer_size;
+};
+
+static int auddec_dsp_config(struct audio *audio, int enable);
+static void audpp_cmd_cfg_adec_params(struct audio *audio);
+static void audio_dsp_event(void *private, unsigned id, uint16_t *msg);
+static void audpcm_post_event(struct audio *audio, int type,
+ union msm_audio_event_payload payload);
+static unsigned long audlpa_pmem_fixup(struct audio *audio, void *addr,
+ unsigned long len, int ref_up);
+static void audpcm_async_send_data(struct audio *audio,
+ unsigned needed);
+
+
+static int rmt_put_resource(struct audio *audio)
+{
+ struct aud_codec_config_cmd cmd;
+ unsigned short client_idx;
+
+ cmd.cmd_id = RM_CMD_AUD_CODEC_CFG;
+ cmd.client_id = RM_AUD_CLIENT_ID;
+ cmd.task_id = audio->dec_id;
+ cmd.enable = RMT_DISABLE;
+ cmd.dec_type = AUDDEC_DEC_PCM;
+ client_idx = ((cmd.client_id << 8) | cmd.task_id);
+
+ return put_adsp_resource(client_idx, &cmd, sizeof(cmd));
+}
+
+static int rmt_get_resource(struct audio *audio)
+{
+ struct aud_codec_config_cmd cmd;
+ unsigned short client_idx;
+
+ cmd.cmd_id = RM_CMD_AUD_CODEC_CFG;
+ cmd.client_id = RM_AUD_CLIENT_ID;
+ cmd.task_id = audio->dec_id;
+ cmd.enable = RMT_ENABLE;
+ cmd.dec_type = AUDDEC_DEC_PCM;
+ client_idx = ((cmd.client_id << 8) | cmd.task_id);
+
+ return get_adsp_resource(client_idx, &cmd, sizeof(cmd));
+}
+
+/* must be called with audio->lock held */
+static int audio_enable(struct audio *audio)
+{
+ struct audmgr_config cfg;
+ int rc;
+
+ MM_DBG("\n"); /* Macro prints the file name and function */
+ if (audio->enabled)
+ return 0;
+
+ if (audio->rmt_resource_released == 1) {
+ audio->rmt_resource_released = 0;
+ rc = rmt_get_resource(audio);
+ if (rc)
+ MM_ERR("ADSP resources are not available");
+ }
+
+ audio->dec_state = MSM_AUD_DECODER_STATE_NONE;
+ audio->out_tail = 0;
+ audio->out_needed = 0;
+
+ cfg.tx_rate = RPC_AUD_DEF_SAMPLE_RATE_NONE;
+ cfg.rx_rate = RPC_AUD_DEF_SAMPLE_RATE_48000;
+ cfg.def_method = RPC_AUD_DEF_METHOD_PLAYBACK;
+ cfg.codec = RPC_AUD_DEF_CODEC_PCM;
+ cfg.snd_method = RPC_SND_METHOD_MIDI;
+
+ rc = audmgr_enable(&audio->audmgr, &cfg);
+ if (rc < 0)
+ return rc;
+
+ if (msm_adsp_enable(audio->audplay)) {
+ MM_ERR("msm_adsp_enable(audplay) failed\n");
+ audmgr_disable(&audio->audmgr);
+ return -ENODEV;
+ }
+
+ if (audpp_enable(audio->dec_id, audio_dsp_event, audio)) {
+ MM_ERR("audpp_enable() failed\n");
+ msm_adsp_disable(audio->audplay);
+ audmgr_disable(&audio->audmgr);
+ return -ENODEV;
+ }
+
+ audio->enabled = 1;
+ return 0;
+}
+
+/* must be called with audio->lock held */
+static int audio_disable(struct audio *audio)
+{
+ int rc = 0;
+ MM_DBG("\n"); /* Macro prints the file name and function */
+ if (audio->enabled) {
+ audio->enabled = 0;
+ audio->dec_state = MSM_AUD_DECODER_STATE_NONE;
+ auddec_dsp_config(audio, 0);
+ rc = wait_event_interruptible_timeout(audio->wait,
+ audio->dec_state != MSM_AUD_DECODER_STATE_NONE,
+ msecs_to_jiffies(MSM_AUD_DECODER_WAIT_MS));
+ if (rc == 0)
+ rc = -ETIMEDOUT;
+ else if (audio->dec_state != MSM_AUD_DECODER_STATE_CLOSE)
+ rc = -EFAULT;
+ else
+ rc = 0;
+ audio->stopped = 1;
+ wake_up(&audio->write_wait);
+ msm_adsp_disable(audio->audplay);
+ audpp_disable(audio->dec_id, audio);
+ audmgr_disable(&audio->audmgr);
+ audio->out_needed = 0;
+ rmt_put_resource(audio);
+ audio->rmt_resource_released = 1;
+ }
+ return rc;
+}
+
+/* ------------------- dsp --------------------- */
+static void audplay_dsp_event(void *data, unsigned id, size_t len,
+ void (*getevent) (void *ptr, size_t len))
+{
+ struct audio *audio = data;
+ uint32_t msg[28];
+ getevent(msg, sizeof(msg));
+
+ MM_DBG("msg_id=%x\n", id);
+
+ switch (id) {
+ case AUDPLAY_MSG_DEC_NEEDS_DATA:
+ audpcm_async_send_data(audio, 1);
+ break;
+ case ADSP_MESSAGE_ID:
+ MM_DBG("Received ADSP event: module enable(audplaytask)\n");
+ break;
+ default:
+ MM_ERR("unexpected message from decoder\n");
+ break;
+ }
+}
+
+static void audio_dsp_event(void *private, unsigned id, uint16_t *msg)
+{
+ struct audio *audio = private;
+
+ switch (id) {
+ case AUDPP_MSG_STATUS_MSG:{
+ unsigned status = msg[1];
+
+ switch (status) {
+ case AUDPP_DEC_STATUS_SLEEP: {
+ uint16_t reason = msg[2];
+ MM_DBG("decoder status: sleep reason =0x%04x\n",
+ reason);
+ if ((reason == AUDPP_MSG_REASON_MEM)
+ || (reason ==
+ AUDPP_MSG_REASON_NODECODER)) {
+ audio->dec_state =
+ MSM_AUD_DECODER_STATE_FAILURE;
+ wake_up(&audio->wait);
+ } else if (reason == AUDPP_MSG_REASON_NONE) {
+ /* decoder is in disable state */
+ audio->dec_state =
+ MSM_AUD_DECODER_STATE_CLOSE;
+ wake_up(&audio->wait);
+ }
+ break;
+ }
+ case AUDPP_DEC_STATUS_INIT:
+ MM_DBG("decoder status: init\n");
+ audpp_cmd_cfg_adec_params(audio);
+ break;
+
+ case AUDPP_DEC_STATUS_CFG:
+ MM_DBG("decoder status: cfg\n");
+ break;
+ case AUDPP_DEC_STATUS_PLAY:
+ MM_DBG("decoder status: play\n");
+ audio->dec_state =
+ MSM_AUD_DECODER_STATE_SUCCESS;
+ wake_up(&audio->wait);
+ break;
+ default:
+ MM_ERR("unknown decoder status\n");
+ break;
+ }
+ break;
+ }
+ case AUDPP_MSG_CFG_MSG:
+ if (msg[0] == AUDPP_MSG_ENA_ENA) {
+ MM_DBG("CFG_MSG ENABLE\n");
+ auddec_dsp_config(audio, 1);
+ audio->out_needed = 0;
+ audio->running = 1;
+ audpp_set_volume_and_pan(audio->dec_id, audio->volume,
+ 0);
+ } else if (msg[0] == AUDPP_MSG_ENA_DIS) {
+ MM_DBG("CFG_MSG DISABLE\n");
+ audio->running = 0;
+ } else {
+ MM_ERR("CFG_MSG %d?\n", msg[0]);
+ }
+ break;
+ case AUDPP_MSG_FLUSH_ACK:
+ MM_DBG("FLUSH_ACK\n");
+ audio->wflush = 0;
+ wake_up(&audio->write_wait);
+ break;
+
+ case AUDPP_MSG_PCMDMAMISSED:
+ MM_DBG("PCMDMAMISSED\n");
+ audio->teos = 1;
+ wake_up(&audio->write_wait);
+ break;
+
+ default:
+ MM_ERR("UNKNOWN (%d)\n", id);
+ }
+
+}
+
+
+struct msm_adsp_ops audlpadec_adsp_ops = {
+ .event = audplay_dsp_event,
+};
+
+
+#define audplay_send_queue0(audio, cmd, len) \
+ msm_adsp_write(audio->audplay, audio->queue_id, \
+ cmd, len)
+
+static int auddec_dsp_config(struct audio *audio, int enable)
+{
+ u16 cfg_dec_cmd[AUDPP_CMD_CFG_DEC_TYPE_LEN / sizeof(unsigned short)];
+
+ memset(cfg_dec_cmd, 0, sizeof(cfg_dec_cmd));
+
+ cfg_dec_cmd[0] = AUDPP_CMD_CFG_DEC_TYPE;
+ if (enable)
+ cfg_dec_cmd[1 + audio->dec_id] = AUDPP_CMD_UPDATDE_CFG_DEC |
+ AUDPP_CMD_ENA_DEC_V | AUDDEC_DEC_PCM;
+ else
+ cfg_dec_cmd[1 + audio->dec_id] = AUDPP_CMD_UPDATDE_CFG_DEC |
+ AUDPP_CMD_DIS_DEC_V;
+
+ return audpp_send_queue1(&cfg_dec_cmd, sizeof(cfg_dec_cmd));
+}
+
+static void audpp_cmd_cfg_adec_params(struct audio *audio)
+{
+ audpp_cmd_cfg_adec_params_wav cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.common.cmd_id = AUDPP_CMD_CFG_ADEC_PARAMS;
+ cmd.common.length = AUDPP_CMD_CFG_ADEC_PARAMS_WAV_LEN;
+ cmd.common.dec_id = audio->dec_id;
+ cmd.common.input_sampling_frequency = audio->out_sample_rate;
+ cmd.stereo_cfg = audio->out_channel_mode;
+ cmd.pcm_width = audio->out_bits;
+ cmd.sign = 0;
+ audpp_send_queue2(&cmd, sizeof(cmd));
+}
+static void audpcm_async_send_data(struct audio *audio, unsigned needed)
+{
+ unsigned long flags;
+
+ if (!audio->running)
+ return;
+
+ spin_lock_irqsave(&audio->dsp_lock, flags);
+
+ if (needed && !audio->wflush) {
+ audio->out_needed = 1;
+ if (audio->drv_status & ADRV_STATUS_OBUF_GIVEN) {
+ /* pop one node out of queue */
+ union msm_audio_event_payload payload;
+ struct audpcm_buffer_node *used_buf;
+
+ MM_DBG("consumed\n");
+
+ BUG_ON(list_empty(&audio->out_queue));
+ used_buf = list_first_entry(&audio->out_queue,
+ struct audpcm_buffer_node, list);
+ list_del(&used_buf->list);
+ payload.aio_buf = used_buf->buf;
+ audpcm_post_event(audio, AUDIO_EVENT_WRITE_DONE,
+ payload);
+ kfree(used_buf);
+ audio->drv_status &= ~ADRV_STATUS_OBUF_GIVEN;
+ }
+ }
+ if (audio->out_needed) {
+ struct audpcm_buffer_node *next_buf;
+ audplay_cmd_bitstream_data_avail cmd;
+ if (!list_empty(&audio->out_queue)) {
+ next_buf = list_first_entry(&audio->out_queue,
+ struct audpcm_buffer_node, list);
+ MM_DBG("next_buf %p\n", next_buf);
+ if (next_buf) {
+ MM_DBG("next buf phy %lx len %d\n",
+ next_buf->paddr, next_buf->buf.data_len);
+
+ cmd.cmd_id = AUDPLAY_CMD_BITSTREAM_DATA_AVAIL;
+ if (next_buf->buf.data_len)
+ cmd.decoder_id = audio->dec_id;
+ else {
+ cmd.decoder_id = -1;
+ MM_DBG("input EOS signaled\n");
+ }
+ cmd.buf_ptr = (unsigned) next_buf->paddr;
+ cmd.buf_size = next_buf->buf.data_len >> 1;
+ cmd.partition_number = 0;
+ /* complete writes to the input buffer */
+ wmb();
+ audplay_send_queue0(audio, &cmd, sizeof(cmd));
+ audio->out_needed = 0;
+ audio->drv_status |= ADRV_STATUS_OBUF_GIVEN;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&audio->dsp_lock, flags);
+}
+
+/* ------------------- device --------------------- */
+static void audpcm_async_flush(struct audio *audio)
+{
+ struct audpcm_buffer_node *buf_node;
+ struct list_head *ptr, *next;
+ union msm_audio_event_payload payload;
+
+ MM_DBG("\n"); /* Macro prints the file name and function */
+ list_for_each_safe(ptr, next, &audio->out_queue) {
+ buf_node = list_entry(ptr, struct audpcm_buffer_node, list);
+ list_del(&buf_node->list);
+ payload.aio_buf = buf_node->buf;
+ audpcm_post_event(audio, AUDIO_EVENT_WRITE_DONE,
+ payload);
+ kfree(buf_node);
+ }
+ audio->drv_status &= ~ADRV_STATUS_OBUF_GIVEN;
+ audio->out_needed = 0;
+ atomic_set(&audio->out_bytes, 0);
+}
+static void audio_ioport_reset(struct audio *audio)
+{
+ if (audio->drv_status & ADRV_STATUS_AIO_INTF) {
+ /* If fsync is in progress, make sure
+ * return value of fsync indicates
+ * abort due to flush
+ */
+ if (audio->drv_status & ADRV_STATUS_FSYNC) {
+ MM_DBG("fsync in progress\n");
+ wake_up(&audio->write_wait);
+ mutex_lock(&audio->write_lock);
+ audpcm_async_flush(audio);
+ mutex_unlock(&audio->write_lock);
+ } else
+ audpcm_async_flush(audio);
+ } else {
+ /* Make sure read/write thread are free from
+ * sleep and knowing that system is not able
+ * to process io request at the moment
+ */
+ wake_up(&audio->write_wait);
+ mutex_lock(&audio->write_lock);
+ audpcm_async_flush(audio);
+ mutex_unlock(&audio->write_lock);
+ }
+}
+
+static int audpcm_events_pending(struct audio *audio)
+{
+ unsigned long flags;
+ int empty;
+
+ spin_lock_irqsave(&audio->event_queue_lock, flags);
+ empty = !list_empty(&audio->event_queue);
+ spin_unlock_irqrestore(&audio->event_queue_lock, flags);
+ return empty || audio->event_abort;
+}
+
+static void audpcm_reset_event_queue(struct audio *audio)
+{
+ unsigned long flags;
+ struct audpcm_event *drv_evt;
+ struct list_head *ptr, *next;
+
+ spin_lock_irqsave(&audio->event_queue_lock, flags);
+ list_for_each_safe(ptr, next, &audio->event_queue) {
+ drv_evt = list_first_entry(&audio->event_queue,
+ struct audpcm_event, list);
+ list_del(&drv_evt->list);
+ kfree(drv_evt);
+ }
+ list_for_each_safe(ptr, next, &audio->free_event_queue) {
+ drv_evt = list_first_entry(&audio->free_event_queue,
+ struct audpcm_event, list);
+ list_del(&drv_evt->list);
+ kfree(drv_evt);
+ }
+ spin_unlock_irqrestore(&audio->event_queue_lock, flags);
+
+ return;
+}
+
+static long audpcm_process_event_req(struct audio *audio, void __user *arg)
+{
+ long rc;
+ struct msm_audio_event usr_evt;
+ struct audpcm_event *drv_evt = NULL;
+ int timeout;
+ unsigned long flags;
+
+ if (copy_from_user(&usr_evt, arg, sizeof(struct msm_audio_event)))
+ return -EFAULT;
+
+ timeout = (int) usr_evt.timeout_ms;
+
+ if (timeout > 0) {
+ rc = wait_event_interruptible_timeout(
+ audio->event_wait, audpcm_events_pending(audio),
+ msecs_to_jiffies(timeout));
+ if (rc == 0)
+ return -ETIMEDOUT;
+ } else {
+ rc = wait_event_interruptible(
+ audio->event_wait, audpcm_events_pending(audio));
+ }
+
+ if (rc < 0)
+ return rc;
+
+ if (audio->event_abort) {
+ audio->event_abort = 0;
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&audio->event_queue_lock, flags);
+ if (!list_empty(&audio->event_queue)) {
+ drv_evt = list_first_entry(&audio->event_queue,
+ struct audpcm_event, list);
+ list_del(&drv_evt->list);
+ }
+ if (drv_evt) {
+ usr_evt.event_type = drv_evt->event_type;
+ usr_evt.event_payload = drv_evt->payload;
+ list_add_tail(&drv_evt->list, &audio->free_event_queue);
+ } else
+ rc = -1;
+ spin_unlock_irqrestore(&audio->event_queue_lock, flags);
+
+ if (drv_evt && drv_evt->event_type == AUDIO_EVENT_WRITE_DONE) {
+ mutex_lock(&audio->lock);
+ audlpa_pmem_fixup(audio, drv_evt->payload.aio_buf.buf_addr,
+ drv_evt->payload.aio_buf.buf_len, 0);
+ mutex_unlock(&audio->lock);
+ }
+ if (!rc && copy_to_user(arg, &usr_evt, sizeof(usr_evt)))
+ rc = -EFAULT;
+
+ return rc;
+}
+
+static int audlpa_pmem_check(struct audio *audio,
+ void *vaddr, unsigned long len)
+{
+ struct audlpa_pmem_region *region_elt;
+ struct audlpa_pmem_region t = { .vaddr = vaddr, .len = len };
+
+ list_for_each_entry(region_elt, &audio->pmem_region_queue, list) {
+ if (CONTAINS(region_elt, &t) || CONTAINS(&t, region_elt) ||
+ OVERLAPS(region_elt, &t)) {
+ MM_ERR("region (vaddr %p len %ld)"
+ " clashes with registered region"
+ " (vaddr %p paddr %p len %ld)\n",
+ vaddr, len,
+ region_elt->vaddr,
+ (void *)region_elt->paddr,
+ region_elt->len);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int audlpa_pmem_add(struct audio *audio,
+ struct msm_audio_pmem_info *info)
+{
+ unsigned long paddr, kvaddr, len;
+ struct file *file;
+ struct audlpa_pmem_region *region;
+ int rc = -EINVAL;
+
+ MM_DBG("\n"); /* Macro prints the file name and function */
+ region = kmalloc(sizeof(*region), GFP_KERNEL);
+
+ if (!region) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ if (get_pmem_file(info->fd, &paddr, &kvaddr, &len, &file)) {
+ kfree(region);
+ goto end;
+ }
+
+ rc = audlpa_pmem_check(audio, info->vaddr, len);
+ if (rc < 0) {
+ put_pmem_file(file);
+ kfree(region);
+ goto end;
+ }
+
+ region->vaddr = info->vaddr;
+ region->fd = info->fd;
+ region->paddr = paddr;
+ region->kvaddr = kvaddr;
+ region->len = len;
+ region->file = file;
+ region->ref_cnt = 0;
+ MM_DBG("add region paddr %lx vaddr %p, len %lu\n", region->paddr,
+ region->vaddr, region->len);
+ list_add_tail(®ion->list, &audio->pmem_region_queue);
+end:
+ return rc;
+}
+
+static int audlpa_pmem_remove(struct audio *audio,
+ struct msm_audio_pmem_info *info)
+{
+ struct audlpa_pmem_region *region;
+ struct list_head *ptr, *next;
+ int rc = -EINVAL;
+
+ MM_DBG("info fd %d vaddr %p\n", info->fd, info->vaddr);
+
+ list_for_each_safe(ptr, next, &audio->pmem_region_queue) {
+ region = list_entry(ptr, struct audlpa_pmem_region, list);
+
+ if ((region->fd == info->fd) &&
+ (region->vaddr == info->vaddr)) {
+ if (region->ref_cnt) {
+ MM_DBG("region %p in use ref_cnt %d\n",
+ region, region->ref_cnt);
+ break;
+ }
+ MM_DBG("remove region fd %d vaddr %p\n",
+ info->fd, info->vaddr);
+ list_del(®ion->list);
+ put_pmem_file(region->file);
+ kfree(region);
+ rc = 0;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static int audlpa_pmem_lookup_vaddr(struct audio *audio, void *addr,
+ unsigned long len, struct audlpa_pmem_region **region)
+{
+ struct audlpa_pmem_region *region_elt;
+
+ int match_count = 0;
+
+ *region = NULL;
+
+ /* returns physical address or zero */
+ list_for_each_entry(region_elt, &audio->pmem_region_queue,
+ list) {
+ if (addr >= region_elt->vaddr &&
+ addr < region_elt->vaddr + region_elt->len &&
+ addr + len <= region_elt->vaddr + region_elt->len) {
+ /* offset since we could pass vaddr inside a registerd
+ * pmem buffer
+ */
+
+ match_count++;
+ if (!*region)
+ *region = region_elt;
+ }
+ }
+
+ if (match_count > 1) {
+ MM_ERR("multiple hits for vaddr %p, len %ld\n", addr, len);
+ list_for_each_entry(region_elt,
+ &audio->pmem_region_queue, list) {
+ if (addr >= region_elt->vaddr &&
+ addr < region_elt->vaddr + region_elt->len &&
+ addr + len <= region_elt->vaddr + region_elt->len)
+ MM_ERR("\t%p, %ld --> %p\n", region_elt->vaddr,
+ region_elt->len,
+ (void *)region_elt->paddr);
+ }
+ }
+
+ return *region ? 0 : -1;
+}
+
+unsigned long audlpa_pmem_fixup(struct audio *audio, void *addr,
+ unsigned long len, int ref_up)
+{
+ struct audlpa_pmem_region *region;
+ unsigned long paddr;
+ int ret;
+
+ ret = audlpa_pmem_lookup_vaddr(audio, addr, len, ®ion);
+ if (ret) {
+ MM_ERR("lookup (%p, %ld) failed\n", addr, len);
+ return 0;
+ }
+ if (ref_up)
+ region->ref_cnt++;
+ else
+ region->ref_cnt--;
+ MM_DBG("found region %p ref_cnt %d\n", region, region->ref_cnt);
+ paddr = region->paddr + (addr - region->vaddr);
+ return paddr;
+}
+
+/* audio -> lock must be held at this point */
+static int audlpa_aio_buf_add(struct audio *audio, unsigned dir,
+ void __user *arg)
+{
+ unsigned long flags;
+ struct audpcm_buffer_node *buf_node;
+
+ buf_node = kmalloc(sizeof(*buf_node), GFP_KERNEL);
+
+ if (!buf_node)
+ return -ENOMEM;
+
+ if (copy_from_user(&buf_node->buf, arg, sizeof(buf_node->buf))) {
+ kfree(buf_node);
+ return -EFAULT;
+ }
+
+ MM_DBG("node %p dir %x buf_addr %p buf_len %d data_len"
+ "%d\n", buf_node, dir,
+ buf_node->buf.buf_addr, buf_node->buf.buf_len,
+ buf_node->buf.data_len);
+
+ buf_node->paddr = audlpa_pmem_fixup(
+ audio, buf_node->buf.buf_addr,
+ buf_node->buf.buf_len, 1);
+
+ if (dir) {
+ /* write */
+ if (!buf_node->paddr ||
+ (buf_node->paddr & 0x1) ||
+ (buf_node->buf.data_len & 0x1)) {
+ kfree(buf_node);
+ return -EINVAL;
+ }
+ spin_lock_irqsave(&audio->dsp_lock, flags);
+ list_add_tail(&buf_node->list, &audio->out_queue);
+ spin_unlock_irqrestore(&audio->dsp_lock, flags);
+ audpcm_async_send_data(audio, 0);
+ }
+ MM_DBG("Add buf_node %p paddr %lx\n", buf_node, buf_node->paddr);
+
+ return 0;
+}
+
+static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct audio *audio = file->private_data;
+ int rc = 0;
+
+ MM_DBG("cmd = %d\n", cmd);
+
+ if (cmd == AUDIO_GET_STATS) {
+ struct msm_audio_stats stats;
+ stats.byte_count = audpp_avsync_byte_count(audio->dec_id);
+ stats.sample_count = audpp_avsync_sample_count(audio->dec_id);
+ if (copy_to_user((void *) arg, &stats, sizeof(stats)))
+ return -EFAULT;
+ return 0;
+ }
+ if (cmd == AUDIO_SET_VOLUME) {
+ unsigned long flags;
+ spin_lock_irqsave(&audio->dsp_lock, flags);
+
+ audio->volume = MSM_VOLUME_STEP * arg;
+ audio->volume /= MSM_VOLUME_FACTOR;
+
+ if (audio->volume > MSM_MAX_VOLUME)
+ audio->volume = MSM_MAX_VOLUME;
+
+ if (audio->running)
+ audpp_set_volume_and_pan(audio->dec_id,
+ audio->volume, 0);
+ spin_unlock_irqrestore(&audio->dsp_lock, flags);
+ return 0;
+ }
+ if (cmd == AUDIO_GET_EVENT) {
+ MM_DBG("AUDIO_GET_EVENT\n");
+ if (mutex_trylock(&audio->get_event_lock)) {
+ rc = audpcm_process_event_req(audio,
+ (void __user *) arg);
+ mutex_unlock(&audio->get_event_lock);
+ } else
+ rc = -EBUSY;
+ return rc;
+ }
+
+ if (cmd == AUDIO_ABORT_GET_EVENT) {
+ audio->event_abort = 1;
+ wake_up(&audio->event_wait);
+ return 0;
+ }
+
+ mutex_lock(&audio->lock);
+ switch (cmd) {
+ case AUDIO_START:
+ MM_DBG("AUDIO_START\n");
+ rc = audio_enable(audio);
+ if (!rc) {
+ rc = wait_event_interruptible_timeout(audio->wait,
+ audio->dec_state != MSM_AUD_DECODER_STATE_NONE,
+ msecs_to_jiffies(MSM_AUD_DECODER_WAIT_MS));
+ MM_INFO("dec_state %d rc = %d\n", audio->dec_state, rc);
+
+ if (audio->dec_state != MSM_AUD_DECODER_STATE_SUCCESS)
+ rc = -ENODEV;
+ else
+ rc = 0;
+ }
+ break;
+ case AUDIO_STOP:
+ MM_DBG("AUDIO_STOP\n");
+ rc = audio_disable(audio);
+ audio_ioport_reset(audio);
+ audio->stopped = 0;
+ break;
+ case AUDIO_FLUSH:
+ MM_DBG("AUDIO_FLUSH\n");
+ audio->wflush = 1;
+ audio_ioport_reset(audio);
+ if (audio->running) {
+ audpp_flush(audio->dec_id);
+ rc = wait_event_interruptible(audio->write_wait,
+ !audio->wflush);
+ if (rc < 0) {
+ MM_ERR("AUDIO_FLUSH interrupted\n");
+ rc = -EINTR;
+ }
+ } else {
+ audio->wflush = 0;
+ }
+ break;
+
+ case AUDIO_SET_CONFIG: {
+ struct msm_audio_config config;
+ if (copy_from_user(&config, (void *) arg, sizeof(config))) {
+ rc = -EFAULT;
+ break;
+ }
+ if (config.channel_count == 1) {
+ config.channel_count = AUDPP_CMD_PCM_INTF_MONO_V;
+ } else if (config.channel_count == 2) {
+ config.channel_count = AUDPP_CMD_PCM_INTF_STEREO_V;
+ } else {
+ rc = -EINVAL;
+ break;
+ }
+ if (config.bits == 8)
+ config.bits = AUDPP_CMD_WAV_PCM_WIDTH_8;
+ else if (config.bits == 16)
+ config.bits = AUDPP_CMD_WAV_PCM_WIDTH_16;
+ else {
+ rc = -EINVAL;
+ break;
+ }
+ audio->out_sample_rate = config.sample_rate;
+ audio->out_channel_mode = config.channel_count;
+ audio->out_bits = config.bits;
+ audio->buffer_count = config.buffer_count;
+ audio->buffer_size = config.buffer_size;
+ MM_DBG("AUDIO_SET_CONFIG\n");
+ break;
+ }
+ case AUDIO_GET_CONFIG: {
+ struct msm_audio_config config;
+ config.buffer_count = audio->buffer_count;
+ config.buffer_size = audio->buffer_size;
+ config.sample_rate = audio->out_sample_rate;
+ if (audio->out_channel_mode == AUDPP_CMD_PCM_INTF_MONO_V)
+ config.channel_count = 1;
+ else
+ config.channel_count = 2;
+ if (audio->out_bits == AUDPP_CMD_WAV_PCM_WIDTH_8)
+ config.bits = 8;
+ else if (audio->out_bits == AUDPP_CMD_WAV_PCM_WIDTH_16)
+ config.bits = 16;
+ else
+ config.bits = 16;
+ config.unused[0] = 0;
+ config.unused[1] = 0;
+
+ if (copy_to_user((void *) arg, &config, sizeof(config)))
+ rc = -EFAULT;
+ else
+ rc = 0;
+ MM_DBG("AUDIO_GET_CONFIG\n");
+ break;
+ }
+
+
+ case AUDIO_PAUSE:
+ MM_DBG("AUDIO_PAUSE %ld\n", arg);
+ rc = audpp_pause(audio->dec_id, (int) arg);
+ break;
+
+ case AUDIO_REGISTER_PMEM: {
+ struct msm_audio_pmem_info info;
+ MM_DBG("AUDIO_REGISTER_PMEM\n");
+ if (copy_from_user(&info, (void *) arg, sizeof(info)))
+ rc = -EFAULT;
+ else
+ rc = audlpa_pmem_add(audio, &info);
+ break;
+ }
+
+ case AUDIO_DEREGISTER_PMEM: {
+ struct msm_audio_pmem_info info;
+ MM_DBG("AUDIO_DEREGISTER_PMEM\n");
+ if (copy_from_user(&info, (void *) arg, sizeof(info)))
+ rc = -EFAULT;
+ else
+ rc = audlpa_pmem_remove(audio, &info);
+ break;
+ }
+
+ case AUDIO_ASYNC_WRITE:
+ if (audio->drv_status & ADRV_STATUS_FSYNC)
+ rc = -EBUSY;
+ else
+ rc = audlpa_aio_buf_add(audio, 1, (void __user *) arg);
+ break;
+
+ case AUDIO_ASYNC_READ:
+ MM_ERR("AUDIO_ASYNC_READ not supported\n");
+ rc = -EPERM;
+ break;
+
+ default:
+ rc = -EINVAL;
+ }
+ mutex_unlock(&audio->lock);
+ return rc;
+}
+
+/* Only useful in tunnel-mode */
+int audlpa_async_fsync(struct audio *audio)
+{
+ int rc = 0;
+
+ MM_DBG("\n"); /* Macro prints the file name and function */
+
+ /* Blocking client sends more data */
+ mutex_lock(&audio->lock);
+ audio->drv_status |= ADRV_STATUS_FSYNC;
+ mutex_unlock(&audio->lock);
+
+ mutex_lock(&audio->write_lock);
+ /* pcm dmamiss message is sent continously
+ * when decoder is starved so no race
+ * condition concern
+ */
+ audio->teos = 0;
+
+ rc = wait_event_interruptible(audio->write_wait,
+ (audio->teos && audio->out_needed &&
+ list_empty(&audio->out_queue))
+ || audio->wflush || audio->stopped);
+
+ if (audio->stopped || audio->wflush)
+ rc = -EBUSY;
+
+ mutex_unlock(&audio->write_lock);
+ mutex_lock(&audio->lock);
+ audio->drv_status &= ~ADRV_STATUS_FSYNC;
+ mutex_unlock(&audio->lock);
+
+ return rc;
+}
+
+int audlpa_sync_fsync(struct audio *audio)
+{
+ struct buffer *frame;
+ int rc = 0;
+
+ MM_DBG("\n"); /* Macro prints the file name and function */
+
+ mutex_lock(&audio->write_lock);
+
+ rc = wait_event_interruptible(audio->write_wait,
+ (!audio->out[0].used &&
+ !audio->out[1].used &&
+ audio->out_needed) || audio->wflush);
+
+ if (rc < 0)
+ goto done;
+ else if (audio->wflush) {
+ rc = -EBUSY;
+ goto done;
+ }
+
+ if (audio->reserved) {
+ MM_DBG("send reserved byte\n");
+ frame = audio->out + audio->out_tail;
+ ((char *) frame->data)[0] = audio->rsv_byte;
+ ((char *) frame->data)[1] = 0;
+ frame->used = 2;
+ audpcm_async_send_data(audio, 0);
+
+ rc = wait_event_interruptible(audio->write_wait,
+ (!audio->out[0].used &&
+ !audio->out[1].used &&
+ audio->out_needed) || audio->wflush);
+
+ if (rc < 0)
+ goto done;
+ else if (audio->wflush) {
+ rc = -EBUSY;
+ goto done;
+ }
+ }
+
+ /* pcm dmamiss message is sent continously
+ * when decoder is starved so no race
+ * condition concern
+ */
+ audio->teos = 0;
+
+ rc = wait_event_interruptible(audio->write_wait,
+ audio->teos || audio->wflush);
+
+ if (audio->wflush)
+ rc = -EBUSY;
+
+done:
+ mutex_unlock(&audio->write_lock);
+ return rc;
+}
+
+int audlpa_fsync(struct file *file, int datasync)
+{
+ struct audio *audio = file->private_data;
+
+ if (!audio->running)
+ return -EINVAL;
+
+ return audlpa_async_fsync(audio);
+}
+
+static void audpcm_reset_pmem_region(struct audio *audio)
+{
+ struct audlpa_pmem_region *region;
+ struct list_head *ptr, *next;
+
+ list_for_each_safe(ptr, next, &audio->pmem_region_queue) {
+ region = list_entry(ptr, struct audlpa_pmem_region, list);
+ list_del(®ion->list);
+ put_pmem_file(region->file);
+ kfree(region);
+ }
+
+ return;
+}
+
+static int audio_release(struct inode *inode, struct file *file)
+{
+ struct audio *audio = file->private_data;
+
+ MM_DBG("audio instance 0x%08x freeing\n", (int)audio);
+ mutex_lock(&audio->lock);
+ audio_disable(audio);
+ if (audio->rmt_resource_released == 0)
+ rmt_put_resource(audio);
+ audpcm_async_flush(audio);
+ audpcm_reset_pmem_region(audio);
+
+ msm_adsp_put(audio->audplay);
+ audpp_adec_free(audio->dec_id);
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ unregister_early_suspend(&audio->suspend_ctl.node);
+#endif
+ audio->opened = 0;
+ audio->event_abort = 1;
+ wake_up(&audio->event_wait);
+ audpcm_reset_event_queue(audio);
+ MM_DBG("pmem area = 0x%8x\n", (unsigned int)audio->data);
+ if (audio->data) {
+ msm_subsystem_unmap_buffer(audio->map_v_write);
+ free_contiguous_memory_by_paddr(audio->phys);
+ }
+ mutex_unlock(&audio->lock);
+#ifdef CONFIG_DEBUG_FS
+ if (audio->dentry)
+ debugfs_remove(audio->dentry);
+#endif
+ kfree(audio);
+ return 0;
+}
+
+static void audpcm_post_event(struct audio *audio, int type,
+ union msm_audio_event_payload payload)
+{
+ struct audpcm_event *e_node = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&audio->event_queue_lock, flags);
+
+ if (!list_empty(&audio->free_event_queue)) {
+ e_node = list_first_entry(&audio->free_event_queue,
+ struct audpcm_event, list);
+ list_del(&e_node->list);
+ } else {
+ e_node = kmalloc(sizeof(struct audpcm_event), GFP_ATOMIC);
+ if (!e_node) {
+ MM_ERR("No mem to post event %d\n", type);
+ spin_unlock_irqrestore(&audio->event_queue_lock, flags);
+ return;
+ }
+ }
+
+ e_node->event_type = type;
+ e_node->payload = payload;
+
+ list_add_tail(&e_node->list, &audio->event_queue);
+ spin_unlock_irqrestore(&audio->event_queue_lock, flags);
+ wake_up(&audio->event_wait);
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void audpcm_suspend(struct early_suspend *h)
+{
+ struct audpcm_suspend_ctl *ctl =
+ container_of(h, struct audpcm_suspend_ctl, node);
+ union msm_audio_event_payload payload;
+
+ MM_DBG("\n"); /* Macro prints the file name and function */
+ audpcm_post_event(ctl->audio, AUDIO_EVENT_SUSPEND, payload);
+}
+
+static void audpcm_resume(struct early_suspend *h)
+{
+ struct audpcm_suspend_ctl *ctl =
+ container_of(h, struct audpcm_suspend_ctl, node);
+ union msm_audio_event_payload payload;
+
+ MM_DBG("\n"); /* Macro prints the file name and function */
+ audpcm_post_event(ctl->audio, AUDIO_EVENT_RESUME, payload);
+}
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t audpcm_debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t audpcm_debug_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ const int debug_bufmax = 4096;
+ static char buffer[4096];
+ int n = 0;
+ struct audio *audio = file->private_data;
+
+ mutex_lock(&audio->lock);
+ n = scnprintf(buffer, debug_bufmax, "opened %d\n", audio->opened);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "enabled %d\n", audio->enabled);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "stopped %d\n", audio->stopped);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "out_buf_sz %d\n", audio->out[0].size);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "volume %lx\n", audio->volume);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "sample rate %d\n", audio->out_sample_rate);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "channel mode %d\n", audio->out_channel_mode);
+ mutex_unlock(&audio->lock);
+ /* Following variables are only useful for debugging when
+ * when playback halts unexpectedly. Thus, no mutual exclusion
+ * enforced
+ */
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "wflush %d\n", audio->wflush);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "running %d\n", audio->running);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "dec state %d\n", audio->dec_state);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "out_needed %d\n", audio->out_needed);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "out_head %d\n", audio->out_head);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "out_tail %d\n", audio->out_tail);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "out[0].used %d\n", audio->out[0].used);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "out[1].used %d\n", audio->out[1].used);
+ buffer[n] = 0;
+ return simple_read_from_buffer(buf, count, ppos, buffer, n);
+}
+
+static const struct file_operations audpcm_debug_fops = {
+ .read = audpcm_debug_read,
+ .open = audpcm_debug_open,
+};
+#endif
+
+static int audio_open(struct inode *inode, struct file *file)
+{
+ struct audio *audio = NULL;
+ int rc, i, dec_attrb, decid;
+ struct audpcm_event *e_node = NULL;
+
+#ifdef CONFIG_DEBUG_FS
+ /* 4 bytes represents decoder number, 1 byte for terminate string */
+ char name[sizeof "msm_lpa_" + 5];
+#endif
+
+ /* Allocate audio instance, set to zero */
+ audio = kzalloc(sizeof(struct audio), GFP_KERNEL);
+ if (!audio) {
+ MM_ERR("no memory to allocate audio instance\n");
+ rc = -ENOMEM;
+ goto done;
+ }
+ MM_DBG("audio instance 0x%08x created\n", (int)audio);
+
+ /* Allocate the decoder */
+ dec_attrb = AUDDEC_DEC_PCM;
+ if (file->f_mode & FMODE_READ) {
+ MM_ERR("Non-Tunneled mode not supported\n");
+ rc = -EPERM;
+ kfree(audio);
+ goto done;
+ } else
+ dec_attrb |= MSM_AUD_MODE_TUNNEL;
+
+ decid = audpp_adec_alloc(dec_attrb, &audio->module_name,
+ &audio->queue_id);
+ if (decid < 0) {
+ MM_ERR("No free decoder available\n");
+ rc = -ENODEV;
+ MM_DBG("audio instance 0x%08x freeing\n", (int)audio);
+ kfree(audio);
+ goto done;
+ }
+ audio->dec_id = decid & MSM_AUD_DECODER_MASK;
+
+ audio->buffer_size = BUFSZ;
+ audio->buffer_count = MAX_BUF;
+ rc = audmgr_open(&audio->audmgr);
+ if (rc)
+ goto err;
+
+ rc = msm_adsp_get(audio->module_name, &audio->audplay,
+ &audlpadec_adsp_ops, audio);
+ if (rc) {
+ MM_ERR("failed to get %s module\n", audio->module_name);
+ audmgr_close(&audio->audmgr);
+ goto err;
+ }
+
+ rc = rmt_get_resource(audio);
+ if (rc) {
+ MM_ERR("ADSP resources are not available for PCM session");
+ audmgr_close(&audio->audmgr);
+ msm_adsp_put(audio->audplay);
+ goto err;
+ }
+
+ /* Initialize all locks of audio instance */
+ mutex_init(&audio->lock);
+ mutex_init(&audio->write_lock);
+ mutex_init(&audio->get_event_lock);
+ spin_lock_init(&audio->dsp_lock);
+ init_waitqueue_head(&audio->write_wait);
+ INIT_LIST_HEAD(&audio->out_queue);
+ INIT_LIST_HEAD(&audio->pmem_region_queue);
+ INIT_LIST_HEAD(&audio->free_event_queue);
+ INIT_LIST_HEAD(&audio->event_queue);
+ init_waitqueue_head(&audio->wait);
+ init_waitqueue_head(&audio->event_wait);
+ spin_lock_init(&audio->event_queue_lock);
+
+ audio->out_sample_rate = 44100;
+ audio->out_channel_mode = AUDPP_CMD_PCM_INTF_STEREO_V;
+ audio->out_bits = AUDPP_CMD_WAV_PCM_WIDTH_16;
+ audio->volume = 0x2000;
+ audpcm_async_flush(audio);
+
+ file->private_data = audio;
+ audio->opened = 1;
+
+#ifdef CONFIG_DEBUG_FS
+ snprintf(name, sizeof name, "msm_pcm_lp_dec_%04x", audio->dec_id);
+ audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
+ NULL, (void *) audio, &audpcm_debug_fops);
+
+ if (IS_ERR(audio->dentry))
+ MM_DBG("debugfs_create_file failed\n");
+#endif
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ audio->suspend_ctl.node.level = EARLY_SUSPEND_LEVEL_DISABLE_FB;
+ audio->suspend_ctl.node.resume = audpcm_resume;
+ audio->suspend_ctl.node.suspend = audpcm_suspend;
+ audio->suspend_ctl.audio = audio;
+ register_early_suspend(&audio->suspend_ctl.node);
+#endif
+ for (i = 0; i < AUDPCM_EVENT_NUM; i++) {
+ e_node = kmalloc(sizeof(struct audpcm_event), GFP_KERNEL);
+ if (e_node)
+ list_add_tail(&e_node->list, &audio->free_event_queue);
+ else {
+ MM_ERR("event pkt alloc failed\n");
+ break;
+ }
+ }
+done:
+ return rc;
+err:
+ audpp_adec_free(audio->dec_id);
+ MM_DBG("audio instance 0x%08x freeing\n", (int)audio);
+ kfree(audio);
+ return rc;
+}
+
+static const struct file_operations audio_pcm_lp_fops = {
+ .owner = THIS_MODULE,
+ .open = audio_open,
+ .release = audio_release,
+ .unlocked_ioctl = audio_ioctl,
+ .fsync = audlpa_fsync,
+};
+
+struct miscdevice audio_lpa_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "msm_pcm_lp_dec",
+ .fops = &audio_pcm_lp_fops,
+};
+
+static int __init audio_init(void)
+{
+ return misc_register(&audio_lpa_misc);
+}
+
+device_initcall(audio_init);
diff --git a/arch/arm/mach-msm/qdss-etb.c b/arch/arm/mach-msm/qdss-etb.c
index 39b7670..252352c 100644
--- a/arch/arm/mach-msm/qdss-etb.c
+++ b/arch/arm/mach-msm/qdss-etb.c
@@ -69,9 +69,11 @@
void __iomem *base;
bool enabled;
bool reading;
- struct mutex lock;
+ struct mutex mutex;
atomic_t in_use;
struct device *dev;
+ struct kobject *kobj;
+ uint32_t trigger_cntr;
};
static struct etb_ctx etb;
@@ -89,6 +91,7 @@
etb_writel(etb, 0x0, ETB_RAM_WRITE_POINTER);
etb_writel(etb, 0x0, ETB_RAM_READ_POINTER);
+ etb_writel(etb, etb.trigger_cntr, ETB_TRG);
etb_writel(etb, BIT(13) | BIT(0), ETB_FFCR);
etb_writel(etb, BIT(0), ETB_CTL_REG);
@@ -97,37 +100,48 @@
void etb_enable(void)
{
- mutex_lock(&etb.lock);
+ mutex_lock(&etb.mutex);
__etb_enable();
etb.enabled = true;
- dev_info(etb.dev, "etb enabled\n");
- mutex_unlock(&etb.lock);
+ dev_info(etb.dev, "ETB enabled\n");
+ mutex_unlock(&etb.mutex);
}
static void __etb_disable(void)
{
int count;
+ uint32_t ffcr;
ETB_UNLOCK();
- etb_writel(etb, BIT(12) | BIT(13), ETB_FFCR);
+ ffcr = etb_readl(etb, ETB_FFCR);
+ ffcr |= (BIT(12) | BIT(6));
+ etb_writel(etb, ffcr, ETB_FFCR);
+
+ for (count = TIMEOUT_US; BVAL(etb_readl(etb, ETB_FFCR), 6) != 0
+ && count > 0; count--)
+ udelay(1);
+ WARN(count == 0, "timeout while flushing ETB, ETB_FFCR: %#x\n",
+ etb_readl(etb, ETB_FFCR));
+
etb_writel(etb, 0x0, ETB_CTL_REG);
for (count = TIMEOUT_US; BVAL(etb_readl(etb, ETB_FFSR), 1) != 1
&& count > 0; count--)
udelay(1);
- WARN(count == 0, "timeout while disabling etb\n");
+ WARN(count == 0, "timeout while disabling ETB, ETB_FFSR: %#x\n",
+ etb_readl(etb, ETB_FFSR));
ETB_LOCK();
}
void etb_disable(void)
{
- mutex_lock(&etb.lock);
+ mutex_lock(&etb.mutex);
__etb_disable();
etb.enabled = false;
- dev_info(etb.dev, "etb disabled\n");
- mutex_unlock(&etb.lock);
+ dev_info(etb.dev, "ETB disabled\n");
+ mutex_unlock(&etb.mutex);
}
static void __etb_dump(void)
@@ -186,15 +200,15 @@
void etb_dump(void)
{
- mutex_lock(&etb.lock);
+ mutex_lock(&etb.mutex);
if (etb.enabled) {
__etb_disable();
__etb_dump();
__etb_enable();
- dev_info(etb.dev, "etb dumped\n");
+ dev_info(etb.dev, "ETB dumped\n");
}
- mutex_unlock(&etb.lock);
+ mutex_unlock(&etb.mutex);
}
static int etb_open(struct inode *inode, struct file *file)
@@ -254,6 +268,62 @@
.fops = &etb_fops,
};
+#define ETB_ATTR(__name) \
+static struct kobj_attribute __name##_attr = \
+ __ATTR(__name, S_IRUGO | S_IWUSR, __name##_show, __name##_store)
+
+static ssize_t trigger_cntr_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ etb.trigger_cntr = val;
+ return n;
+}
+static ssize_t trigger_cntr_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long val = etb.trigger_cntr;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+ETB_ATTR(trigger_cntr);
+
+static int __init etb_sysfs_init(void)
+{
+ int ret;
+
+ etb.kobj = kobject_create_and_add("etb", qdss_get_modulekobj());
+ if (!etb.kobj) {
+ dev_err(etb.dev, "failed to create ETB sysfs kobject\n");
+ ret = -ENOMEM;
+ goto err_create;
+ }
+
+ ret = sysfs_create_file(etb.kobj, &trigger_cntr_attr.attr);
+ if (ret) {
+ dev_err(etb.dev, "failed to create ETB sysfs trigger_cntr"
+ " attribute\n");
+ goto err_file;
+ }
+
+ return 0;
+err_file:
+ kobject_put(etb.kobj);
+err_create:
+ return ret;
+}
+
+static void etb_sysfs_exit(void)
+{
+ sysfs_remove_file(etb.kobj, &trigger_cntr_attr.attr);
+ kobject_put(etb.kobj);
+}
+
static int __devinit etb_probe(struct platform_device *pdev)
{
int ret;
@@ -273,6 +343,8 @@
etb.dev = &pdev->dev;
+ mutex_init(&etb.mutex);
+
ret = misc_register(&etb_misc);
if (ret)
goto err_misc;
@@ -283,16 +355,19 @@
goto err_alloc;
}
- mutex_init(&etb.lock);
+ etb_sysfs_init();
+ dev_info(etb.dev, "ETB initialized\n");
return 0;
err_alloc:
misc_deregister(&etb_misc);
err_misc:
+ mutex_destroy(&etb.mutex);
iounmap(etb.base);
err_ioremap:
err_res:
+ dev_err(etb.dev, "ETB init failed\n");
return ret;
}
@@ -300,9 +375,10 @@
{
if (etb.enabled)
etb_disable();
- mutex_destroy(&etb.lock);
+ etb_sysfs_exit();
kfree(etb.buf);
misc_deregister(&etb_misc);
+ mutex_destroy(&etb.mutex);
iounmap(etb.base);
return 0;
diff --git a/arch/arm/mach-msm/qdss-etm.c b/arch/arm/mach-msm/qdss-etm.c
index c0dc58e..4bc3f495 100644
--- a/arch/arm/mach-msm/qdss-etm.c
+++ b/arch/arm/mach-msm/qdss-etm.c
@@ -19,21 +19,22 @@
#include <linux/io.h>
#include <linux/err.h>
#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/smp.h>
#include <linux/wakelock.h>
#include <linux/pm_qos_params.h>
-#include <asm/atomic.h>
+#include <linux/sysfs.h>
+#include <linux/stat.h>
+#include <asm/sections.h>
+#include <mach/socinfo.h>
#include "qdss.h"
-#define ptm_writel(ptm, cpu, val, off) \
- __raw_writel((val), ptm.base + (SZ_4K * cpu) + off)
-#define ptm_readl(ptm, cpu, off) \
- __raw_readl(ptm.base + (SZ_4K * cpu) + off)
+#define etm_writel(etm, cpu, val, off) \
+ __raw_writel((val), etm.base + (SZ_4K * cpu) + off)
+#define etm_readl(etm, cpu, off) \
+ __raw_readl(etm.base + (SZ_4K * cpu) + off)
/*
* Device registers:
@@ -99,92 +100,120 @@
#define ETMPDCR (0x310)
#define ETMPDSR (0x314)
-#define PTM_LOCK(cpu) \
+#define ETM_MAX_ADDR_CMP (16)
+#define ETM_MAX_CNTR (4)
+#define ETM_MAX_CTXID_CMP (3)
+
+#define ETM_MODE_EXCLUDE BIT(0)
+#define ETM_MODE_CYCACC BIT(1)
+#define ETM_MODE_STALL BIT(2)
+#define ETM_MODE_TIMESTAMP BIT(3)
+#define ETM_MODE_CTXID BIT(4)
+#define ETM_MODE_ALL (0x1F)
+
+#define ETM_EVENT_MASK (0x1FFFF)
+#define ETM_SYNC_MASK (0xFFF)
+#define ETM_ALL_MASK (0xFFFFFFFF)
+
+#define ETM_SEQ_STATE_MAX_VAL (0x2)
+
+enum {
+ ETM_ADDR_TYPE_NONE,
+ ETM_ADDR_TYPE_SINGLE,
+ ETM_ADDR_TYPE_RANGE,
+ ETM_ADDR_TYPE_START,
+ ETM_ADDR_TYPE_STOP,
+};
+
+#define ETM_LOCK(cpu) \
do { \
mb(); \
- ptm_writel(ptm, cpu, 0x0, CS_LAR); \
+ etm_writel(etm, cpu, 0x0, CS_LAR); \
} while (0)
-#define PTM_UNLOCK(cpu) \
+#define ETM_UNLOCK(cpu) \
do { \
- ptm_writel(ptm, cpu, CS_UNLOCK_MAGIC, CS_LAR); \
+ etm_writel(etm, cpu, CS_UNLOCK_MAGIC, CS_LAR); \
mb(); \
} while (0)
-/* Forward declarations */
-static void ptm_cfg_rw_init(void);
+#ifdef MODULE_PARAM_PREFIX
+#undef MODULE_PARAM_PREFIX
+#endif
+#define MODULE_PARAM_PREFIX "qdss."
#ifdef CONFIG_MSM_QDSS_ETM_DEFAULT_ENABLE
-static int trace_on_boot = 1;
+static int etm_boot_enable = 1;
#else
-static int trace_on_boot;
+static int etm_boot_enable;
#endif
module_param_named(
- trace_on_boot, trace_on_boot, int, S_IRUGO
+ etm_boot_enable, etm_boot_enable, int, S_IRUGO
);
-struct ptm_config {
- /* read only config registers */
- uint32_t config_code;
- /* derived values */
- uint8_t nr_addr_comp;
- uint8_t nr_cntr;
- uint8_t nr_ext_input;
- uint8_t nr_ext_output;
- uint8_t nr_context_id_comp;
-
- uint32_t config_code_extn;
- /* derived values */
- uint8_t nr_extnd_ext_input_sel;
- uint8_t nr_instr_resources;
-
- uint32_t system_config;
- /* derived values */
- uint8_t fifofull_supported;
- uint8_t nr_procs_supported;
-
- /* read-write registers */
- uint32_t main_control;
- uint32_t trigger_event;
- uint32_t te_start_stop_control;
- uint32_t te_event;
- uint32_t te_control;
- uint32_t fifofull_level;
- uint32_t addr_comp_value[16];
- uint32_t addr_comp_access_type[16];
- uint32_t cntr_reload_value[4];
- uint32_t cntr_enable_event[4];
- uint32_t cntr_reload_event[4];
- uint32_t cntr_value[4];
- uint32_t seq_state_12_event;
- uint32_t seq_state_21_event;
- uint32_t seq_state_23_event;
- uint32_t seq_state_32_event;
- uint32_t seq_state_13_event;
- uint32_t seq_state_31_event;
- uint32_t current_seq_state;
- uint32_t ext_output_event[4];
- uint32_t context_id_comp_value[3];
- uint32_t context_id_comp_mask;
- uint32_t sync_freq;
- uint32_t extnd_ext_input_sel;
- uint32_t ts_event;
- uint32_t aux_control;
- uint32_t coresight_trace_id;
- uint32_t vmid_comp_value;
-};
-
-struct ptm_ctx {
- struct ptm_config cfg;
+struct etm_ctx {
void __iomem *base;
- bool trace_enabled;
+ bool enabled;
struct wake_lock wake_lock;
struct pm_qos_request_list qos_req;
- atomic_t in_use;
+ struct mutex mutex;
struct device *dev;
+ struct kobject *kobj;
+ uint8_t arch;
+ uint8_t nr_addr_cmp;
+ uint8_t nr_cntr;
+ uint8_t nr_ext_inp;
+ uint8_t nr_ext_out;
+ uint8_t nr_ctxid_cmp;
+ uint8_t reset;
+ uint32_t mode;
+ uint32_t ctrl;
+ uint32_t trigger_event;
+ uint32_t startstop_ctrl;
+ uint32_t enable_event;
+ uint32_t enable_ctrl1;
+ uint32_t fifofull_level;
+ uint8_t addr_idx;
+ uint32_t addr_val[ETM_MAX_ADDR_CMP];
+ uint32_t addr_acctype[ETM_MAX_ADDR_CMP];
+ uint32_t addr_type[ETM_MAX_ADDR_CMP];
+ uint8_t cntr_idx;
+ uint32_t cntr_rld_val[ETM_MAX_CNTR];
+ uint32_t cntr_event[ETM_MAX_CNTR];
+ uint32_t cntr_rld_event[ETM_MAX_CNTR];
+ uint32_t cntr_val[ETM_MAX_CNTR];
+ uint32_t seq_12_event;
+ uint32_t seq_21_event;
+ uint32_t seq_23_event;
+ uint32_t seq_31_event;
+ uint32_t seq_32_event;
+ uint32_t seq_13_event;
+ uint32_t seq_curr_state;
+ uint8_t ctxid_idx;
+ uint32_t ctxid_val[ETM_MAX_CTXID_CMP];
+ uint32_t ctxid_mask;
+ uint32_t sync_freq;
+ uint32_t timestamp_event;
};
-static struct ptm_ctx ptm;
+static struct etm_ctx etm = {
+ .trigger_event = 0x406F,
+ .enable_event = 0x6F,
+ .enable_ctrl1 = 0x1,
+ .fifofull_level = 0x28,
+ .addr_val = {(uint32_t) _stext, (uint32_t) _etext},
+ .addr_type = {ETM_ADDR_TYPE_RANGE, ETM_ADDR_TYPE_RANGE},
+ .cntr_event = {[0 ... (ETM_MAX_CNTR - 1)] = 0x406F},
+ .cntr_rld_event = {[0 ... (ETM_MAX_CNTR - 1)] = 0x406F},
+ .seq_12_event = 0x406F,
+ .seq_21_event = 0x406F,
+ .seq_23_event = 0x406F,
+ .seq_31_event = 0x406F,
+ .seq_32_event = 0x406F,
+ .seq_13_event = 0x406F,
+ .sync_freq = 0x80,
+ .timestamp_event = 0x406F,
+};
/* ETM clock is derived from the processor clock and gets enabled on a
@@ -202,117 +231,119 @@
* clock vote in the driver and the save-restore code uses 1. above
* for its vote
*/
-static void ptm_set_powerdown(int cpu)
+static void etm_set_pwrdwn(int cpu)
{
uint32_t etmcr;
- etmcr = ptm_readl(ptm, cpu, ETMCR);
+ etmcr = etm_readl(etm, cpu, ETMCR);
etmcr |= BIT(0);
- ptm_writel(ptm, cpu, etmcr, ETMCR);
+ etm_writel(etm, cpu, etmcr, ETMCR);
}
-static void ptm_clear_powerdown(int cpu)
+static void etm_clr_pwrdwn(int cpu)
{
uint32_t etmcr;
- etmcr = ptm_readl(ptm, cpu, ETMCR);
+ etmcr = etm_readl(etm, cpu, ETMCR);
etmcr &= ~BIT(0);
- ptm_writel(ptm, cpu, etmcr, ETMCR);
+ etm_writel(etm, cpu, etmcr, ETMCR);
}
-static void ptm_set_prog(int cpu)
+static void etm_set_prog(int cpu)
{
uint32_t etmcr;
int count;
- etmcr = ptm_readl(ptm, cpu, ETMCR);
+ etmcr = etm_readl(etm, cpu, ETMCR);
etmcr |= BIT(10);
- ptm_writel(ptm, cpu, etmcr, ETMCR);
+ etm_writel(etm, cpu, etmcr, ETMCR);
- for (count = TIMEOUT_US; BVAL(ptm_readl(ptm, cpu, ETMSR), 1) != 1
+ for (count = TIMEOUT_US; BVAL(etm_readl(etm, cpu, ETMSR), 1) != 1
&& count > 0; count--)
udelay(1);
- WARN(count == 0, "timeout while setting prog bit\n");
+ WARN(count == 0, "timeout while setting prog bit, ETMSR: %#x\n",
+ etm_readl(etm, cpu, ETMSR));
}
-static void ptm_clear_prog(int cpu)
+static void etm_clr_prog(int cpu)
{
uint32_t etmcr;
int count;
- etmcr = ptm_readl(ptm, cpu, ETMCR);
+ etmcr = etm_readl(etm, cpu, ETMCR);
etmcr &= ~BIT(10);
- ptm_writel(ptm, cpu, etmcr, ETMCR);
+ etm_writel(etm, cpu, etmcr, ETMCR);
- for (count = TIMEOUT_US; BVAL(ptm_readl(ptm, cpu, ETMSR), 1) != 0
+ for (count = TIMEOUT_US; BVAL(etm_readl(etm, cpu, ETMSR), 1) != 0
&& count > 0; count--)
udelay(1);
- WARN(count == 0, "timeout while clearing prog bit\n");
+ WARN(count == 0, "timeout while clearing prog bit, ETMSR: %#x\n",
+ etm_readl(etm, cpu, ETMSR));
}
-static void __ptm_trace_enable(int cpu)
+static void __etm_enable(int cpu)
{
int i;
- PTM_UNLOCK(cpu);
+ ETM_UNLOCK(cpu);
/* Vote for ETM power/clock enable */
- ptm_clear_powerdown(cpu);
- ptm_set_prog(cpu);
+ etm_clr_pwrdwn(cpu);
+ etm_set_prog(cpu);
- ptm_writel(ptm, cpu, ptm.cfg.main_control | BIT(10), ETMCR);
- ptm_writel(ptm, cpu, ptm.cfg.trigger_event, ETMTRIGGER);
- ptm_writel(ptm, cpu, ptm.cfg.te_start_stop_control, ETMTSSCR);
- ptm_writel(ptm, cpu, ptm.cfg.te_event, ETMTEEVR);
- ptm_writel(ptm, cpu, ptm.cfg.te_control, ETMTECR1);
- ptm_writel(ptm, cpu, ptm.cfg.fifofull_level, ETMFFLR);
- for (i = 0; i < ptm.cfg.nr_addr_comp; i++) {
- ptm_writel(ptm, cpu, ptm.cfg.addr_comp_value[i], ETMACVRn(i));
- ptm_writel(ptm, cpu, ptm.cfg.addr_comp_access_type[i],
- ETMACTRn(i));
+ etm_writel(etm, cpu, etm.ctrl | BIT(10), ETMCR);
+ etm_writel(etm, cpu, etm.trigger_event, ETMTRIGGER);
+ etm_writel(etm, cpu, etm.startstop_ctrl, ETMTSSCR);
+ etm_writel(etm, cpu, etm.enable_event, ETMTEEVR);
+ etm_writel(etm, cpu, etm.enable_ctrl1, ETMTECR1);
+ etm_writel(etm, cpu, etm.fifofull_level, ETMFFLR);
+ for (i = 0; i < etm.nr_addr_cmp; i++) {
+ etm_writel(etm, cpu, etm.addr_val[i], ETMACVRn(i));
+ etm_writel(etm, cpu, etm.addr_acctype[i], ETMACTRn(i));
}
- for (i = 0; i < ptm.cfg.nr_cntr; i++) {
- ptm_writel(ptm, cpu, ptm.cfg.cntr_reload_value[i],
- ETMCNTRLDVRn(i));
- ptm_writel(ptm, cpu, ptm.cfg.cntr_enable_event[i],
- ETMCNTENRn(i));
- ptm_writel(ptm, cpu, ptm.cfg.cntr_reload_event[i],
- ETMCNTRLDEVRn(i));
- ptm_writel(ptm, cpu, ptm.cfg.cntr_value[i], ETMCNTVRn(i));
+ for (i = 0; i < etm.nr_cntr; i++) {
+ etm_writel(etm, cpu, etm.cntr_rld_val[i], ETMCNTRLDVRn(i));
+ etm_writel(etm, cpu, etm.cntr_event[i], ETMCNTENRn(i));
+ etm_writel(etm, cpu, etm.cntr_rld_event[i], ETMCNTRLDEVRn(i));
+ etm_writel(etm, cpu, etm.cntr_val[i], ETMCNTVRn(i));
}
- ptm_writel(ptm, cpu, ptm.cfg.seq_state_12_event, ETMSQ12EVR);
- ptm_writel(ptm, cpu, ptm.cfg.seq_state_21_event, ETMSQ21EVR);
- ptm_writel(ptm, cpu, ptm.cfg.seq_state_23_event, ETMSQ23EVR);
- ptm_writel(ptm, cpu, ptm.cfg.seq_state_32_event, ETMSQ32EVR);
- ptm_writel(ptm, cpu, ptm.cfg.seq_state_13_event, ETMSQ13EVR);
- ptm_writel(ptm, cpu, ptm.cfg.seq_state_31_event, ETMSQ31EVR);
- ptm_writel(ptm, cpu, ptm.cfg.current_seq_state, ETMSQR);
- for (i = 0; i < ptm.cfg.nr_ext_output; i++)
- ptm_writel(ptm, cpu, ptm.cfg.ext_output_event[i],
- ETMEXTOUTEVRn(i));
- for (i = 0; i < ptm.cfg.nr_context_id_comp; i++)
- ptm_writel(ptm, cpu, ptm.cfg.context_id_comp_value[i],
- ETMCIDCVRn(i));
- ptm_writel(ptm, cpu, ptm.cfg.context_id_comp_mask, ETMCIDCMR);
- ptm_writel(ptm, cpu, ptm.cfg.sync_freq, ETMSYNCFR);
- ptm_writel(ptm, cpu, ptm.cfg.extnd_ext_input_sel, ETMEXTINSELR);
- ptm_writel(ptm, cpu, ptm.cfg.ts_event, ETMTSEVR);
- ptm_writel(ptm, cpu, ptm.cfg.aux_control, ETMAUXCR);
- ptm_writel(ptm, cpu, cpu+1, ETMTRACEIDR);
- ptm_writel(ptm, cpu, ptm.cfg.vmid_comp_value, ETMVMIDCVR);
+ etm_writel(etm, cpu, etm.seq_12_event, ETMSQ12EVR);
+ etm_writel(etm, cpu, etm.seq_21_event, ETMSQ21EVR);
+ etm_writel(etm, cpu, etm.seq_23_event, ETMSQ23EVR);
+ etm_writel(etm, cpu, etm.seq_31_event, ETMSQ31EVR);
+ etm_writel(etm, cpu, etm.seq_32_event, ETMSQ32EVR);
+ etm_writel(etm, cpu, etm.seq_13_event, ETMSQ13EVR);
+ etm_writel(etm, cpu, etm.seq_curr_state, ETMSQR);
+ for (i = 0; i < etm.nr_ext_out; i++)
+ etm_writel(etm, cpu, 0x0000406F, ETMEXTOUTEVRn(i));
+ for (i = 0; i < etm.nr_ctxid_cmp; i++)
+ etm_writel(etm, cpu, etm.ctxid_val[i], ETMCIDCVRn(i));
+ etm_writel(etm, cpu, etm.ctxid_mask, ETMCIDCMR);
+ etm_writel(etm, cpu, etm.sync_freq, ETMSYNCFR);
+ etm_writel(etm, cpu, 0x00000000, ETMEXTINSELR);
+ etm_writel(etm, cpu, etm.timestamp_event, ETMTSEVR);
+ etm_writel(etm, cpu, 0x00000000, ETMAUXCR);
+ etm_writel(etm, cpu, cpu+1, ETMTRACEIDR);
+ etm_writel(etm, cpu, 0x00000000, ETMVMIDCVR);
- ptm_clear_prog(cpu);
- PTM_LOCK(cpu);
+ etm_clr_prog(cpu);
+ ETM_LOCK(cpu);
}
-static int ptm_trace_enable(void)
+static int etm_enable(void)
{
int ret, cpu;
+ if (etm.enabled) {
+ dev_err(etm.dev, "ETM tracing already enabled\n");
+ ret = -EPERM;
+ goto err;
+ }
+
ret = qdss_clk_enable();
if (ret)
- return ret;
+ goto err;
- wake_lock(&ptm.wake_lock);
+ wake_lock(&etm.wake_lock);
/* 1. causes all online cpus to come out of idle PC
* 2. prevents idle PC until save restore flag is enabled atomically
*
@@ -320,7 +351,7 @@
* operation and to ensure cores where trace is expected to be turned
* on are already hotplugged on
*/
- pm_qos_update_request(&ptm.qos_req, 0);
+ pm_qos_update_request(&etm.qos_req, 0);
etb_disable();
tpiu_disable();
@@ -328,34 +359,43 @@
etb_enable();
funnel_enable(0x0, 0x3);
for_each_online_cpu(cpu)
- __ptm_trace_enable(cpu);
+ __etm_enable(cpu);
- ptm.trace_enabled = true;
+ etm.enabled = true;
- pm_qos_update_request(&ptm.qos_req, PM_QOS_DEFAULT_VALUE);
- wake_unlock(&ptm.wake_lock);
+ pm_qos_update_request(&etm.qos_req, PM_QOS_DEFAULT_VALUE);
+ wake_unlock(&etm.wake_lock);
+ dev_info(etm.dev, "ETM tracing enabled\n");
return 0;
+err:
+ return ret;
}
-static void __ptm_trace_disable(int cpu)
+static void __etm_disable(int cpu)
{
- PTM_UNLOCK(cpu);
- ptm_set_prog(cpu);
+ ETM_UNLOCK(cpu);
+ etm_set_prog(cpu);
/* program trace enable to low by using always false event */
- ptm_writel(ptm, cpu, 0x6F | BIT(14), ETMTEEVR);
+ etm_writel(etm, cpu, 0x6F | BIT(14), ETMTEEVR);
/* Vote for ETM power/clock disable */
- ptm_set_powerdown(cpu);
- PTM_LOCK(cpu);
+ etm_set_pwrdwn(cpu);
+ ETM_LOCK(cpu);
}
-static void ptm_trace_disable(void)
+static int etm_disable(void)
{
- int cpu;
+ int ret, cpu;
- wake_lock(&ptm.wake_lock);
+ if (!etm.enabled) {
+ dev_err(etm.dev, "ETM tracing already disabled\n");
+ ret = -EPERM;
+ goto err;
+ }
+
+ wake_lock(&etm.wake_lock);
/* 1. causes all online cpus to come out of idle PC
* 2. prevents idle PC until save restore flag is disabled atomically
*
@@ -363,219 +403,29 @@
* operation and to ensure cores where trace is expected to be turned
* off are already hotplugged on
*/
- pm_qos_update_request(&ptm.qos_req, 0);
+ pm_qos_update_request(&etm.qos_req, 0);
for_each_online_cpu(cpu)
- __ptm_trace_disable(cpu);
+ __etm_disable(cpu);
etb_dump();
etb_disable();
funnel_disable(0x0, 0x3);
- ptm.trace_enabled = false;
+ etm.enabled = false;
- pm_qos_update_request(&ptm.qos_req, PM_QOS_DEFAULT_VALUE);
- wake_unlock(&ptm.wake_lock);
+ pm_qos_update_request(&etm.qos_req, PM_QOS_DEFAULT_VALUE);
+ wake_unlock(&etm.wake_lock);
qdss_clk_disable();
-}
-static int ptm_open(struct inode *inode, struct file *file)
-{
- if (atomic_cmpxchg(&ptm.in_use, 0, 1))
- return -EBUSY;
-
- dev_dbg(ptm.dev, "%s: successfully opened\n", __func__);
+ dev_info(etm.dev, "ETM tracing disabled\n");
return 0;
-}
-
-static void ptm_range_filter(char range, uint32_t reg1,
- uint32_t addr1, uint32_t reg2, uint32_t addr2)
-{
- ptm.cfg.addr_comp_value[reg1] = addr1;
- ptm.cfg.addr_comp_value[reg2] = addr2;
-
- ptm.cfg.te_control |= (1 << (reg1/2));
- if (range == 'i')
- ptm.cfg.te_control &= ~BIT(24);
- else if (range == 'e')
- ptm.cfg.te_control |= BIT(24);
-}
-
-static void ptm_start_stop_filter(char start_stop,
- uint32_t reg, uint32_t addr)
-{
- ptm.cfg.addr_comp_value[reg] = addr;
-
- if (start_stop == 's')
- ptm.cfg.te_start_stop_control |= (1 << reg);
- else if (start_stop == 't')
- ptm.cfg.te_start_stop_control |= (1 << (reg + 16));
-
- ptm.cfg.te_control |= BIT(25);
-}
-
-#define MAX_COMMAND_STRLEN 40
-static ssize_t ptm_write(struct file *file, const char __user *data,
- size_t len, loff_t *ppos)
-{
- char command[MAX_COMMAND_STRLEN];
- int str_len;
- unsigned long reg1, reg2;
- unsigned long addr1, addr2;
-
- str_len = strnlen_user(data, MAX_COMMAND_STRLEN);
- dev_dbg(ptm.dev, "string length: %d", str_len);
- if (str_len == 0 || str_len == (MAX_COMMAND_STRLEN+1)) {
- dev_err(ptm.dev, "error in str_len: %d", str_len);
- return -EFAULT;
- }
- /* includes the null character */
- if (copy_from_user(command, data, str_len)) {
- dev_err(ptm.dev, "error in copy_from_user: %d", str_len);
- return -EFAULT;
- }
-
- dev_dbg(ptm.dev, "input = %s", command);
-
- switch (command[0]) {
- case '0':
- if (ptm.trace_enabled) {
- ptm_trace_disable();
- dev_info(ptm.dev, "tracing disabled\n");
- } else
- dev_err(ptm.dev, "trace already disabled\n");
-
- break;
- case '1':
- if (!ptm.trace_enabled) {
- if (!ptm_trace_enable())
- dev_info(ptm.dev, "tracing enabled\n");
- else
- dev_err(ptm.dev, "error enabling trace\n");
- } else
- dev_err(ptm.dev, "trace already enabled\n");
- break;
- case 'f':
- switch (command[2]) {
- case 'i':
- switch (command[4]) {
- case 'i':
- if (sscanf(&command[6], "%lx:%lx:%lx:%lx\\0",
- ®1, &addr1, ®2, &addr2) != 4)
- goto err_out;
- if (reg1 > 7 || reg2 > 7 || (reg1 % 2))
- goto err_out;
- ptm_range_filter('i',
- reg1, addr1, reg2, addr2);
- break;
- case 'e':
- if (sscanf(&command[6], "%lx:%lx:%lx:%lx\\0",
- ®1, &addr1, ®2, &addr2) != 4)
- goto err_out;
- if (reg1 > 7 || reg2 > 7 || (reg1 % 2)
- || command[2] == 'd')
- goto err_out;
- ptm_range_filter('e',
- reg1, addr1, reg2, addr2);
- break;
- case 's':
- if (sscanf(&command[6], "%lx:%lx\\0",
- ®1, &addr1) != 2)
- goto err_out;
- if (reg1 > 7)
- goto err_out;
- ptm_start_stop_filter('s', reg1, addr1);
- break;
- case 't':
- if (sscanf(&command[6], "%lx:%lx\\0",
- ®1, &addr1) != 2)
- goto err_out;
- if (reg1 > 7)
- goto err_out;
- ptm_start_stop_filter('t', reg1, addr1);
- break;
- default:
- goto err_out;
- }
- break;
- case 'r':
- ptm_cfg_rw_init();
- break;
- default:
- goto err_out;
- }
- break;
- default:
- goto err_out;
- }
-
- return len;
-
-err_out:
- return -EFAULT;
-}
-
-static int ptm_release(struct inode *inode, struct file *file)
-{
- atomic_set(&ptm.in_use, 0);
- dev_dbg(ptm.dev, "%s: released\n", __func__);
- return 0;
-}
-
-static const struct file_operations ptm_fops = {
- .owner = THIS_MODULE,
- .open = ptm_open,
- .write = ptm_write,
- .release = ptm_release,
-};
-
-static struct miscdevice ptm_misc = {
- .name = "msm_ptm",
- .minor = MISC_DYNAMIC_MINOR,
- .fops = &ptm_fops,
-};
-
-static void ptm_cfg_rw_init(void)
-{
- int i;
-
- ptm.cfg.main_control = 0x00001000;
- ptm.cfg.trigger_event = 0x0000406F;
- ptm.cfg.te_start_stop_control = 0x00000000;
- ptm.cfg.te_event = 0x0000006F;
- ptm.cfg.te_control = 0x01000000;
- ptm.cfg.fifofull_level = 0x00000028;
- for (i = 0; i < ptm.cfg.nr_addr_comp; i++) {
- ptm.cfg.addr_comp_value[i] = 0x00000000;
- ptm.cfg.addr_comp_access_type[i] = 0x00000000;
- }
- for (i = 0; i < ptm.cfg.nr_cntr; i++) {
- ptm.cfg.cntr_reload_value[i] = 0x00000000;
- ptm.cfg.cntr_enable_event[i] = 0x0000406F;
- ptm.cfg.cntr_reload_event[i] = 0x0000406F;
- ptm.cfg.cntr_value[i] = 0x00000000;
- }
- ptm.cfg.seq_state_12_event = 0x0000406F;
- ptm.cfg.seq_state_21_event = 0x0000406F;
- ptm.cfg.seq_state_23_event = 0x0000406F;
- ptm.cfg.seq_state_32_event = 0x0000406F;
- ptm.cfg.seq_state_13_event = 0x0000406F;
- ptm.cfg.seq_state_31_event = 0x0000406F;
- ptm.cfg.current_seq_state = 0x00000000;
- for (i = 0; i < ptm.cfg.nr_ext_output; i++)
- ptm.cfg.ext_output_event[i] = 0x0000406F;
- for (i = 0; i < ptm.cfg.nr_context_id_comp; i++)
- ptm.cfg.context_id_comp_value[i] = 0x00000000;
- ptm.cfg.context_id_comp_mask = 0x00000000;
- ptm.cfg.sync_freq = 0x00000080;
- ptm.cfg.extnd_ext_input_sel = 0x00000000;
- ptm.cfg.ts_event = 0x0000406F;
- ptm.cfg.aux_control = 0x00000000;
- ptm.cfg.vmid_comp_value = 0x00000000;
+err:
+ return ret;
}
/* Memory mapped writes to clear os lock not supported */
-static void ptm_os_unlock(void *unused)
+static void etm_os_unlock(void *unused)
{
unsigned long value = 0x0;
@@ -583,42 +433,800 @@
asm("isb\n\t");
}
-static void ptm_cfg_ro_init(void)
-{
- /* use cpu 0 for setup */
- int cpu = 0;
-
- /* Unlock OS lock first to allow memory mapped reads and writes */
- ptm_os_unlock(NULL);
- smp_call_function(ptm_os_unlock, NULL, 1);
- PTM_UNLOCK(cpu);
- /* Vote for ETM power/clock enable */
- ptm_clear_powerdown(cpu);
- ptm_set_prog(cpu);
-
- /* find all capabilities */
- ptm.cfg.config_code = ptm_readl(ptm, cpu, ETMCCR);
- ptm.cfg.nr_addr_comp = BMVAL(ptm.cfg.config_code, 0, 3) * 2;
- ptm.cfg.nr_cntr = BMVAL(ptm.cfg.config_code, 13, 15);
- ptm.cfg.nr_ext_input = BMVAL(ptm.cfg.config_code, 17, 19);
- ptm.cfg.nr_ext_output = BMVAL(ptm.cfg.config_code, 20, 22);
- ptm.cfg.nr_context_id_comp = BMVAL(ptm.cfg.config_code, 24, 25);
-
- ptm.cfg.config_code_extn = ptm_readl(ptm, cpu, ETMCCER);
- ptm.cfg.nr_extnd_ext_input_sel =
- BMVAL(ptm.cfg.config_code_extn, 0, 2);
- ptm.cfg.nr_instr_resources = BMVAL(ptm.cfg.config_code_extn, 13, 15);
-
- ptm.cfg.system_config = ptm_readl(ptm, cpu, ETMSCR);
- ptm.cfg.fifofull_supported = BVAL(ptm.cfg.system_config, 8);
- ptm.cfg.nr_procs_supported = BMVAL(ptm.cfg.system_config, 12, 14);
-
- /* Vote for ETM power/clock disable */
- ptm_set_powerdown(cpu);
- PTM_LOCK(cpu);
+#define ETM_STORE(__name, mask) \
+static ssize_t __name##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ const char *buf, size_t n) \
+{ \
+ unsigned long val; \
+ \
+ if (sscanf(buf, "%lx", &val) != 1) \
+ return -EINVAL; \
+ \
+ etm.__name = val & mask; \
+ return n; \
}
-static int __devinit ptm_probe(struct platform_device *pdev)
+#define ETM_SHOW(__name) \
+static ssize_t __name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ char *buf) \
+{ \
+ unsigned long val = etm.__name; \
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); \
+}
+
+#define ETM_ATTR(__name) \
+static struct kobj_attribute __name##_attr = \
+ __ATTR(__name, S_IRUGO | S_IWUSR, __name##_show, __name##_store)
+#define ETM_ATTR_RO(__name) \
+static struct kobj_attribute __name##_attr = \
+ __ATTR(__name, S_IRUGO, __name##_show, NULL)
+
+static ssize_t enabled_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int ret = 0;
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&etm.mutex);
+ if (val)
+ ret = etm_enable();
+ else
+ ret = etm_disable();
+ mutex_unlock(&etm.mutex);
+
+ if (ret)
+ return ret;
+ return n;
+}
+ETM_SHOW(enabled);
+ETM_ATTR(enabled);
+
+ETM_SHOW(nr_addr_cmp);
+ETM_ATTR_RO(nr_addr_cmp);
+ETM_SHOW(nr_cntr);
+ETM_ATTR_RO(nr_cntr);
+ETM_SHOW(nr_ctxid_cmp);
+ETM_ATTR_RO(nr_ctxid_cmp);
+
+/* Reset to trace everything i.e. exclude nothing. */
+static ssize_t reset_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int i;
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&etm.mutex);
+ if (val) {
+ etm.mode = ETM_MODE_EXCLUDE;
+ etm.ctrl = 0x0;
+ if (cpu_is_krait_v1()) {
+ etm.mode |= ETM_MODE_CYCACC;
+ etm.ctrl |= BIT(12);
+ }
+ etm.trigger_event = 0x406F;
+ etm.startstop_ctrl = 0x0;
+ etm.enable_event = 0x6F;
+ etm.enable_ctrl1 = 0x1000000;
+ etm.fifofull_level = 0x28;
+ etm.addr_idx = 0x0;
+ for (i = 0; i < etm.nr_addr_cmp; i++) {
+ etm.addr_val[i] = 0x0;
+ etm.addr_acctype[i] = 0x0;
+ etm.addr_type[i] = ETM_ADDR_TYPE_NONE;
+ }
+ etm.cntr_idx = 0x0;
+ for (i = 0; i < etm.nr_cntr; i++) {
+ etm.cntr_rld_val[i] = 0x0;
+ etm.cntr_event[i] = 0x406F;
+ etm.cntr_rld_event[i] = 0x406F;
+ etm.cntr_val[i] = 0x0;
+ }
+ etm.seq_12_event = 0x406F;
+ etm.seq_21_event = 0x406F;
+ etm.seq_23_event = 0x406F;
+ etm.seq_31_event = 0x406F;
+ etm.seq_32_event = 0x406F;
+ etm.seq_13_event = 0x406F;
+ etm.seq_curr_state = 0x0;
+ etm.ctxid_idx = 0x0;
+ for (i = 0; i < etm.nr_ctxid_cmp; i++)
+ etm.ctxid_val[i] = 0x0;
+ etm.ctxid_mask = 0x0;
+ etm.sync_freq = 0x80;
+ etm.timestamp_event = 0x406F;
+ }
+ mutex_unlock(&etm.mutex);
+ return n;
+}
+ETM_SHOW(reset);
+ETM_ATTR(reset);
+
+static ssize_t mode_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&etm.mutex);
+ etm.mode = val & ETM_MODE_ALL;
+
+ if (etm.mode & ETM_MODE_EXCLUDE)
+ etm.enable_ctrl1 |= BIT(24);
+ else
+ etm.enable_ctrl1 &= ~BIT(24);
+
+ if (etm.mode & ETM_MODE_CYCACC)
+ etm.ctrl |= BIT(12);
+ else
+ etm.ctrl &= ~BIT(12);
+
+ if (etm.mode & ETM_MODE_STALL)
+ etm.ctrl |= BIT(7);
+ else
+ etm.ctrl &= ~BIT(7);
+
+ if (etm.mode & ETM_MODE_TIMESTAMP)
+ etm.ctrl |= BIT(28);
+ else
+ etm.ctrl &= ~BIT(28);
+ if (etm.mode & ETM_MODE_CTXID)
+ etm.ctrl |= (BIT(14) | BIT(15));
+ else
+ etm.ctrl &= ~(BIT(14) | BIT(15));
+ mutex_unlock(&etm.mutex);
+
+ return n;
+}
+ETM_SHOW(mode);
+ETM_ATTR(mode);
+
+ETM_STORE(trigger_event, ETM_EVENT_MASK);
+ETM_SHOW(trigger_event);
+ETM_ATTR(trigger_event);
+
+ETM_STORE(enable_event, ETM_EVENT_MASK);
+ETM_SHOW(enable_event);
+ETM_ATTR(enable_event);
+
+ETM_STORE(fifofull_level, ETM_ALL_MASK);
+ETM_SHOW(fifofull_level);
+ETM_ATTR(fifofull_level);
+
+static ssize_t addr_idx_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+ if (val >= etm.nr_addr_cmp)
+ return -EINVAL;
+
+ /* Use mutex to ensure index doesn't change while it gets dereferenced
+ * multiple times within a mutex block elsewhere.
+ */
+ mutex_lock(&etm.mutex);
+ etm.addr_idx = val;
+ mutex_unlock(&etm.mutex);
+ return n;
+}
+ETM_SHOW(addr_idx);
+ETM_ATTR(addr_idx);
+
+static ssize_t addr_single_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+ uint8_t idx;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&etm.mutex);
+ idx = etm.addr_idx;
+ if (!(etm.addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ etm.addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+ mutex_unlock(&etm.mutex);
+ return -EPERM;
+ }
+
+ etm.addr_val[idx] = val;
+ etm.addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
+ mutex_unlock(&etm.mutex);
+ return n;
+}
+static ssize_t addr_single_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ uint8_t idx;
+
+ mutex_lock(&etm.mutex);
+ idx = etm.addr_idx;
+ if (!(etm.addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ etm.addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+ mutex_unlock(&etm.mutex);
+ return -EPERM;
+ }
+
+ val = etm.addr_val[idx];
+ mutex_unlock(&etm.mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+ETM_ATTR(addr_single);
+
+static ssize_t addr_range_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val1, val2;
+ uint8_t idx;
+
+ if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ return -EINVAL;
+ /* lower address comparator cannot have a higher address value */
+ if (val1 > val2)
+ return -EINVAL;
+
+ mutex_lock(&etm.mutex);
+ idx = etm.addr_idx;
+ if (idx % 2 != 0) {
+ mutex_unlock(&etm.mutex);
+ return -EPERM;
+ }
+ if (!((etm.addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+ etm.addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+ (etm.addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+ etm.addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+ mutex_unlock(&etm.mutex);
+ return -EPERM;
+ }
+
+ etm.addr_val[idx] = val1;
+ etm.addr_type[idx] = ETM_ADDR_TYPE_RANGE;
+ etm.addr_val[idx + 1] = val2;
+ etm.addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
+ etm.enable_ctrl1 |= (1 << (idx/2));
+ mutex_unlock(&etm.mutex);
+ return n;
+}
+static ssize_t addr_range_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long val1, val2;
+ uint8_t idx;
+
+ mutex_lock(&etm.mutex);
+ idx = etm.addr_idx;
+ if (idx % 2 != 0) {
+ mutex_unlock(&etm.mutex);
+ return -EPERM;
+ }
+ if (!((etm.addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+ etm.addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+ (etm.addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+ etm.addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+ mutex_unlock(&etm.mutex);
+ return -EPERM;
+ }
+
+ val1 = etm.addr_val[idx];
+ val2 = etm.addr_val[idx + 1];
+ mutex_unlock(&etm.mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
+}
+ETM_ATTR(addr_range);
+
+static ssize_t addr_start_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+ uint8_t idx;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&etm.mutex);
+ idx = etm.addr_idx;
+ if (!(etm.addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ etm.addr_type[idx] == ETM_ADDR_TYPE_START)) {
+ mutex_unlock(&etm.mutex);
+ return -EPERM;
+ }
+
+ etm.addr_val[idx] = val;
+ etm.addr_type[idx] = ETM_ADDR_TYPE_START;
+ etm.startstop_ctrl |= (1 << idx);
+ etm.enable_ctrl1 |= BIT(25);
+ mutex_unlock(&etm.mutex);
+ return n;
+}
+static ssize_t addr_start_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ uint8_t idx;
+
+ mutex_lock(&etm.mutex);
+ idx = etm.addr_idx;
+ if (!(etm.addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ etm.addr_type[idx] == ETM_ADDR_TYPE_START)) {
+ mutex_unlock(&etm.mutex);
+ return -EPERM;
+ }
+
+ val = etm.addr_val[idx];
+ mutex_unlock(&etm.mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+ETM_ATTR(addr_start);
+
+static ssize_t addr_stop_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+ uint8_t idx;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&etm.mutex);
+ idx = etm.addr_idx;
+ if (!(etm.addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ etm.addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+ mutex_unlock(&etm.mutex);
+ return -EPERM;
+ }
+
+ etm.addr_val[idx] = val;
+ etm.addr_type[idx] = ETM_ADDR_TYPE_STOP;
+ etm.startstop_ctrl |= (1 << (idx + 16));
+ etm.enable_ctrl1 |= BIT(25);
+ mutex_unlock(&etm.mutex);
+ return n;
+}
+static ssize_t addr_stop_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ uint8_t idx;
+
+ mutex_lock(&etm.mutex);
+ idx = etm.addr_idx;
+ if (!(etm.addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ etm.addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+ mutex_unlock(&etm.mutex);
+ return -EPERM;
+ }
+
+ val = etm.addr_val[idx];
+ mutex_unlock(&etm.mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+ETM_ATTR(addr_stop);
+
+static ssize_t addr_acctype_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&etm.mutex);
+ etm.addr_acctype[etm.addr_idx] = val;
+ mutex_unlock(&etm.mutex);
+ return n;
+}
+static ssize_t addr_acctype_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+
+ mutex_lock(&etm.mutex);
+ val = etm.addr_acctype[etm.addr_idx];
+ mutex_unlock(&etm.mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+ETM_ATTR(addr_acctype);
+
+static ssize_t cntr_idx_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+ if (val >= etm.nr_cntr)
+ return -EINVAL;
+
+ /* Use mutex to ensure index doesn't change while it gets dereferenced
+ * multiple times within a mutex block elsewhere.
+ */
+ mutex_lock(&etm.mutex);
+ etm.cntr_idx = val;
+ mutex_unlock(&etm.mutex);
+ return n;
+}
+ETM_SHOW(cntr_idx);
+ETM_ATTR(cntr_idx);
+
+static ssize_t cntr_rld_val_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&etm.mutex);
+ etm.cntr_rld_val[etm.cntr_idx] = val;
+ mutex_unlock(&etm.mutex);
+ return n;
+}
+static ssize_t cntr_rld_val_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ mutex_lock(&etm.mutex);
+ val = etm.cntr_rld_val[etm.cntr_idx];
+ mutex_unlock(&etm.mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+ETM_ATTR(cntr_rld_val);
+
+static ssize_t cntr_event_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&etm.mutex);
+ etm.cntr_event[etm.cntr_idx] = val & ETM_EVENT_MASK;
+ mutex_unlock(&etm.mutex);
+ return n;
+}
+static ssize_t cntr_event_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+
+ mutex_lock(&etm.mutex);
+ val = etm.cntr_event[etm.cntr_idx];
+ mutex_unlock(&etm.mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+ETM_ATTR(cntr_event);
+
+static ssize_t cntr_rld_event_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&etm.mutex);
+ etm.cntr_rld_event[etm.cntr_idx] = val & ETM_EVENT_MASK;
+ mutex_unlock(&etm.mutex);
+ return n;
+}
+static ssize_t cntr_rld_event_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+
+ mutex_lock(&etm.mutex);
+ val = etm.cntr_rld_event[etm.cntr_idx];
+ mutex_unlock(&etm.mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+ETM_ATTR(cntr_rld_event);
+
+static ssize_t cntr_val_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&etm.mutex);
+ etm.cntr_val[etm.cntr_idx] = val;
+ mutex_unlock(&etm.mutex);
+ return n;
+}
+static ssize_t cntr_val_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+
+ mutex_lock(&etm.mutex);
+ val = etm.cntr_val[etm.cntr_idx];
+ mutex_unlock(&etm.mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+ETM_ATTR(cntr_val);
+
+ETM_STORE(seq_12_event, ETM_EVENT_MASK);
+ETM_SHOW(seq_12_event);
+ETM_ATTR(seq_12_event);
+
+ETM_STORE(seq_21_event, ETM_EVENT_MASK);
+ETM_SHOW(seq_21_event);
+ETM_ATTR(seq_21_event);
+
+ETM_STORE(seq_23_event, ETM_EVENT_MASK);
+ETM_SHOW(seq_23_event);
+ETM_ATTR(seq_23_event);
+
+ETM_STORE(seq_31_event, ETM_EVENT_MASK);
+ETM_SHOW(seq_31_event);
+ETM_ATTR(seq_31_event);
+
+ETM_STORE(seq_32_event, ETM_EVENT_MASK);
+ETM_SHOW(seq_32_event);
+ETM_ATTR(seq_32_event);
+
+ETM_STORE(seq_13_event, ETM_EVENT_MASK);
+ETM_SHOW(seq_13_event);
+ETM_ATTR(seq_13_event);
+
+static ssize_t seq_curr_state_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+ if (val > ETM_SEQ_STATE_MAX_VAL)
+ return -EINVAL;
+
+ etm.seq_curr_state = val;
+ return n;
+}
+ETM_SHOW(seq_curr_state);
+ETM_ATTR(seq_curr_state);
+
+static ssize_t ctxid_idx_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+ if (val >= etm.nr_ctxid_cmp)
+ return -EINVAL;
+
+ /* Use mutex to ensure index doesn't change while it gets dereferenced
+ * multiple times within a mutex block elsewhere.
+ */
+ mutex_lock(&etm.mutex);
+ etm.ctxid_idx = val;
+ mutex_unlock(&etm.mutex);
+ return n;
+}
+ETM_SHOW(ctxid_idx);
+ETM_ATTR(ctxid_idx);
+
+static ssize_t ctxid_val_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&etm.mutex);
+ etm.ctxid_val[etm.ctxid_idx] = val;
+ mutex_unlock(&etm.mutex);
+ return n;
+}
+static ssize_t ctxid_val_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+
+ mutex_lock(&etm.mutex);
+ val = etm.ctxid_val[etm.ctxid_idx];
+ mutex_unlock(&etm.mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+ETM_ATTR(ctxid_val);
+
+ETM_STORE(ctxid_mask, ETM_ALL_MASK);
+ETM_SHOW(ctxid_mask);
+ETM_ATTR(ctxid_mask);
+
+ETM_STORE(sync_freq, ETM_SYNC_MASK);
+ETM_SHOW(sync_freq);
+ETM_ATTR(sync_freq);
+
+ETM_STORE(timestamp_event, ETM_EVENT_MASK);
+ETM_SHOW(timestamp_event);
+ETM_ATTR(timestamp_event);
+
+static struct attribute *etm_attrs[] = {
+ &nr_addr_cmp_attr.attr,
+ &nr_cntr_attr.attr,
+ &nr_ctxid_cmp_attr.attr,
+ &reset_attr.attr,
+ &mode_attr.attr,
+ &trigger_event_attr.attr,
+ &enable_event_attr.attr,
+ &fifofull_level_attr.attr,
+ &addr_idx_attr.attr,
+ &addr_single_attr.attr,
+ &addr_range_attr.attr,
+ &addr_start_attr.attr,
+ &addr_stop_attr.attr,
+ &addr_acctype_attr.attr,
+ &cntr_idx_attr.attr,
+ &cntr_rld_val_attr.attr,
+ &cntr_event_attr.attr,
+ &cntr_rld_event_attr.attr,
+ &cntr_val_attr.attr,
+ &seq_12_event_attr.attr,
+ &seq_21_event_attr.attr,
+ &seq_23_event_attr.attr,
+ &seq_31_event_attr.attr,
+ &seq_32_event_attr.attr,
+ &seq_13_event_attr.attr,
+ &seq_curr_state_attr.attr,
+ &ctxid_idx_attr.attr,
+ &ctxid_val_attr.attr,
+ &ctxid_mask_attr.attr,
+ &sync_freq_attr.attr,
+ ×tamp_event_attr.attr,
+ NULL,
+};
+
+static struct attribute_group etm_attr_grp = {
+ .attrs = etm_attrs,
+};
+
+static int __init etm_sysfs_init(void)
+{
+ int ret;
+
+ etm.kobj = kobject_create_and_add("etm", qdss_get_modulekobj());
+ if (!etm.kobj) {
+ dev_err(etm.dev, "failed to create ETM sysfs kobject\n");
+ ret = -ENOMEM;
+ goto err_create;
+ }
+
+ ret = sysfs_create_file(etm.kobj, &enabled_attr.attr);
+ if (ret) {
+ dev_err(etm.dev, "failed to create ETM sysfs enabled"
+ " attribute\n");
+ goto err_file;
+ }
+
+ if (sysfs_create_group(etm.kobj, &etm_attr_grp))
+ dev_err(etm.dev, "failed to create ETM sysfs group\n");
+
+ return 0;
+err_file:
+ kobject_put(etm.kobj);
+err_create:
+ return ret;
+}
+
+static void etm_sysfs_exit(void)
+{
+ sysfs_remove_group(etm.kobj, &etm_attr_grp);
+ sysfs_remove_file(etm.kobj, &enabled_attr.attr);
+ kobject_put(etm.kobj);
+}
+
+static bool etm_arch_supported(uint8_t arch)
+{
+ switch (arch) {
+ case PFT_ARCH_V1_1:
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+static int __init etm_arch_init(void)
+{
+ int ret, i;
+ /* use cpu 0 for setup */
+ int cpu = 0;
+ uint32_t etmidr;
+ uint32_t etmccr;
+
+ /* Unlock OS lock first to allow memory mapped reads and writes */
+ etm_os_unlock(NULL);
+ smp_call_function(etm_os_unlock, NULL, 1);
+ ETM_UNLOCK(cpu);
+ /* Vote for ETM power/clock enable */
+ etm_clr_pwrdwn(cpu);
+ /* Set prog bit. It will be set from reset but this is included to
+ * ensure it is set
+ */
+ etm_set_prog(cpu);
+
+ /* find all capabilities */
+ etmidr = etm_readl(etm, cpu, ETMIDR);
+ etm.arch = BMVAL(etmidr, 4, 11);
+ if (etm_arch_supported(etm.arch) == false) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ etmccr = etm_readl(etm, cpu, ETMCCR);
+ etm.nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2;
+ etm.nr_cntr = BMVAL(etmccr, 13, 15);
+ etm.nr_ext_inp = BMVAL(etmccr, 17, 19);
+ etm.nr_ext_out = BMVAL(etmccr, 20, 22);
+ etm.nr_ctxid_cmp = BMVAL(etmccr, 24, 25);
+
+ if (cpu_is_krait_v1()) {
+ /* Krait pass1 doesn't support include filtering and non-cycle
+ * accurate tracing
+ */
+ etm.mode = (ETM_MODE_EXCLUDE | ETM_MODE_CYCACC);
+ etm.ctrl = 0x1000;
+ etm.enable_ctrl1 = 0x1000000;
+ for (i = 0; i < etm.nr_addr_cmp; i++) {
+ etm.addr_val[i] = 0x0;
+ etm.addr_acctype[i] = 0x0;
+ etm.addr_type[i] = ETM_ADDR_TYPE_NONE;
+ }
+ }
+
+ /* Vote for ETM power/clock disable */
+ etm_set_pwrdwn(cpu);
+ ETM_LOCK(cpu);
+
+ return 0;
+err:
+ return ret;
+}
+
+static int __devinit etm_probe(struct platform_device *pdev)
{
int ret;
struct resource *res;
@@ -629,80 +1237,82 @@
goto err_res;
}
- ptm.base = ioremap_nocache(res->start, resource_size(res));
- if (!ptm.base) {
+ etm.base = ioremap_nocache(res->start, resource_size(res));
+ if (!etm.base) {
ret = -EINVAL;
goto err_ioremap;
}
- ptm.dev = &pdev->dev;
+ etm.dev = &pdev->dev;
- ret = misc_register(&ptm_misc);
- if (ret)
- goto err_misc;
-
+ mutex_init(&etm.mutex);
+ wake_lock_init(&etm.wake_lock, WAKE_LOCK_SUSPEND, "msm_etm");
+ pm_qos_add_request(&etm.qos_req, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
ret = qdss_clk_enable();
if (ret)
goto err_clk;
- ptm_cfg_ro_init();
- ptm_cfg_rw_init();
+ ret = etm_arch_init();
+ if (ret)
+ goto err_arch;
- ptm.trace_enabled = false;
+ ret = etm_sysfs_init();
+ if (ret)
+ goto err_sysfs;
- wake_lock_init(&ptm.wake_lock, WAKE_LOCK_SUSPEND, "msm_ptm");
- pm_qos_add_request(&ptm.qos_req, PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
- atomic_set(&ptm.in_use, 0);
+ etm.enabled = false;
qdss_clk_disable();
- dev_info(ptm.dev, "PTM intialized.\n");
+ dev_info(etm.dev, "ETM initialized\n");
- if (trace_on_boot) {
- if (!ptm_trace_enable())
- dev_info(ptm.dev, "tracing enabled\n");
- else
- dev_err(ptm.dev, "error enabling trace\n");
- }
+ if (etm_boot_enable)
+ etm_enable();
return 0;
+err_sysfs:
+err_arch:
+ qdss_clk_disable();
err_clk:
- misc_deregister(&ptm_misc);
-err_misc:
- iounmap(ptm.base);
+ pm_qos_remove_request(&etm.qos_req);
+ wake_lock_destroy(&etm.wake_lock);
+ mutex_destroy(&etm.mutex);
+ iounmap(etm.base);
err_ioremap:
err_res:
+ dev_err(etm.dev, "ETM init failed\n");
return ret;
}
-static int ptm_remove(struct platform_device *pdev)
+static int etm_remove(struct platform_device *pdev)
{
- if (ptm.trace_enabled)
- ptm_trace_disable();
- pm_qos_remove_request(&ptm.qos_req);
- wake_lock_destroy(&ptm.wake_lock);
- misc_deregister(&ptm_misc);
- iounmap(ptm.base);
+ if (etm.enabled)
+ etm_disable();
+ etm_sysfs_exit();
+ pm_qos_remove_request(&etm.qos_req);
+ wake_lock_destroy(&etm.wake_lock);
+ mutex_destroy(&etm.mutex);
+ iounmap(etm.base);
return 0;
}
-static struct platform_driver ptm_driver = {
- .probe = ptm_probe,
- .remove = ptm_remove,
+static struct platform_driver etm_driver = {
+ .probe = etm_probe,
+ .remove = etm_remove,
.driver = {
- .name = "msm_ptm",
+ .name = "msm_etm",
},
};
-int __init ptm_init(void)
+int __init etm_init(void)
{
- return platform_driver_register(&ptm_driver);
+ return platform_driver_register(&etm_driver);
}
-void ptm_exit(void)
+void etm_exit(void)
{
- platform_driver_unregister(&ptm_driver);
+ platform_driver_unregister(&etm_driver);
}
diff --git a/arch/arm/mach-msm/qdss-funnel.c b/arch/arm/mach-msm/qdss-funnel.c
index dd61c15..3eec560 100644
--- a/arch/arm/mach-msm/qdss-funnel.c
+++ b/arch/arm/mach-msm/qdss-funnel.c
@@ -26,12 +26,12 @@
#define funnel_readl(funnel, id, off) \
__raw_readl(funnel.base + (SZ_4K * id) + off)
-#define CS_TFUNNEL_FUNCTL (0x000)
-#define CS_TFUNNEL_PRICTL (0x004)
-#define CS_TFUNNEL_ITATBDATA0 (0xEEC)
-#define CS_TFUNNEL_ITATBCTR2 (0xEF0)
-#define CS_TFUNNEL_ITATBCTR1 (0xEF4)
-#define CS_TFUNNEL_ITATBCTR0 (0xEF8)
+#define FUNNEL_FUNCTL (0x000)
+#define FUNNEL_PRICTL (0x004)
+#define FUNNEL_ITATBDATA0 (0xEEC)
+#define FUNNEL_ITATBCTR2 (0xEF0)
+#define FUNNEL_ITATBCTR1 (0xEF4)
+#define FUNNEL_ITATBCTR0 (0xEF8)
#define FUNNEL_LOCK(id) \
@@ -45,18 +45,21 @@
mb(); \
} while (0)
-#define DEFAULT_HOLDTIME_MASK (0xF00)
-#define DEFAULT_HOLDTIME_SHFT (0x8)
-#define DEFAULT_HOLDTIME (0x7 << DEFAULT_HOLDTIME_SHFT)
-#define DEFAULT_PRIORITY (0xFAC680)
+#define FUNNEL_HOLDTIME_MASK (0xF00)
+#define FUNNEL_HOLDTIME_SHFT (0x8)
+#define FUNNEL_HOLDTIME (0x7 << FUNNEL_HOLDTIME_SHFT)
struct funnel_ctx {
void __iomem *base;
bool enabled;
struct device *dev;
+ struct kobject *kobj;
+ uint32_t priority;
};
-static struct funnel_ctx funnel;
+static struct funnel_ctx funnel = {
+ .priority = 0xFAC680,
+};
static void __funnel_enable(uint8_t id, uint32_t port_mask)
{
@@ -64,12 +67,12 @@
FUNNEL_UNLOCK(id);
- functl = funnel_readl(funnel, id, CS_TFUNNEL_FUNCTL);
- functl &= ~DEFAULT_HOLDTIME_MASK;
- functl |= DEFAULT_HOLDTIME;
+ functl = funnel_readl(funnel, id, FUNNEL_FUNCTL);
+ functl &= ~FUNNEL_HOLDTIME_MASK;
+ functl |= FUNNEL_HOLDTIME;
functl |= port_mask;
- funnel_writel(funnel, id, functl, CS_TFUNNEL_FUNCTL);
- funnel_writel(funnel, id, DEFAULT_PRIORITY, CS_TFUNNEL_PRICTL);
+ funnel_writel(funnel, id, functl, FUNNEL_FUNCTL);
+ funnel_writel(funnel, id, funnel.priority, FUNNEL_PRICTL);
FUNNEL_LOCK(id);
}
@@ -78,7 +81,7 @@
{
__funnel_enable(id, port_mask);
funnel.enabled = true;
- dev_info(funnel.dev, "funnel port mask 0x%lx enabled\n",
+ dev_info(funnel.dev, "FUNNEL port mask 0x%lx enabled\n",
(unsigned long) port_mask);
}
@@ -88,9 +91,9 @@
FUNNEL_UNLOCK(id);
- functl = funnel_readl(funnel, id, CS_TFUNNEL_FUNCTL);
+ functl = funnel_readl(funnel, id, FUNNEL_FUNCTL);
functl &= ~port_mask;
- funnel_writel(funnel, id, functl, CS_TFUNNEL_FUNCTL);
+ funnel_writel(funnel, id, functl, FUNNEL_FUNCTL);
FUNNEL_LOCK(id);
}
@@ -99,10 +102,66 @@
{
__funnel_disable(id, port_mask);
funnel.enabled = false;
- dev_info(funnel.dev, "funnel port mask 0x%lx disabled\n",
+ dev_info(funnel.dev, "FUNNEL port mask 0x%lx disabled\n",
(unsigned long) port_mask);
}
+#define FUNNEL_ATTR(__name) \
+static struct kobj_attribute __name##_attr = \
+ __ATTR(__name, S_IRUGO | S_IWUSR, __name##_show, __name##_store)
+
+static ssize_t priority_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ funnel.priority = val;
+ return n;
+}
+static ssize_t priority_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long val = funnel.priority;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+FUNNEL_ATTR(priority);
+
+static int __init funnel_sysfs_init(void)
+{
+ int ret;
+
+ funnel.kobj = kobject_create_and_add("funnel", qdss_get_modulekobj());
+ if (!funnel.kobj) {
+ dev_err(funnel.dev, "failed to create FUNNEL sysfs kobject\n");
+ ret = -ENOMEM;
+ goto err_create;
+ }
+
+ ret = sysfs_create_file(funnel.kobj, &priority_attr.attr);
+ if (ret) {
+ dev_err(funnel.dev, "failed to create FUNNEL sysfs priority"
+ " attribute\n");
+ goto err_file;
+ }
+
+ return 0;
+err_file:
+ kobject_put(funnel.kobj);
+err_create:
+ return ret;
+}
+
+static void funnel_sysfs_exit(void)
+{
+ sysfs_remove_file(funnel.kobj, &priority_attr.attr);
+ kobject_put(funnel.kobj);
+}
+
static int __devinit funnel_probe(struct platform_device *pdev)
{
int ret;
@@ -122,10 +181,14 @@
funnel.dev = &pdev->dev;
+ funnel_sysfs_init();
+
+ dev_info(funnel.dev, "FUNNEL initialized\n");
return 0;
err_ioremap:
err_res:
+ dev_err(funnel.dev, "FUNNEL init failed\n");
return ret;
}
@@ -133,6 +196,7 @@
{
if (funnel.enabled)
funnel_disable(0x0, 0xFF);
+ funnel_sysfs_exit();
iounmap(funnel.base);
return 0;
diff --git a/arch/arm/mach-msm/qdss-tpiu.c b/arch/arm/mach-msm/qdss-tpiu.c
index e4a61de..409bf2c 100644
--- a/arch/arm/mach-msm/qdss-tpiu.c
+++ b/arch/arm/mach-msm/qdss-tpiu.c
@@ -23,19 +23,19 @@
#define tpiu_writel(tpiu, val, off) __raw_writel((val), tpiu.base + off)
#define tpiu_readl(tpiu, off) __raw_readl(tpiu.base + off)
-#define TPIU_SUPPORTED_PORT_SIZE (0x000)
-#define TPIU_CURRENT_PORT_SIZE (0x004)
-#define TPIU_SUPPORTED_TRIGGER_MODES (0x100)
-#define TPIU_TRIGGER_COUNTER_VALUE (0x104)
-#define TPIU_TRIGGER_MULTIPLIER (0x108)
-#define TPIU_SUPPORTED_TEST_PATTERNM (0x200)
-#define TPIU_CURRENT_TEST_PATTERNM (0x204)
-#define TPIU_TEST_PATTERN_REPEAT_COUNTER (0x208)
-#define TPIU_FORMATTER_AND_FLUSH_STATUS (0x300)
-#define TPIU_FORMATTER_AND_FLUSH_CONTROL (0x304)
-#define TPIU_FORMATTER_SYNCHRONIZATION_COUNTER (0x308)
-#define TPIU_EXTCTL_IN_PORT (0x400)
-#define TPIU_EXTCTL_OUT_PORT (0x404)
+#define TPIU_SUPP_PORTSZ (0x000)
+#define TPIU_CURR_PORTSZ (0x004)
+#define TPIU_SUPP_TRIGMODES (0x100)
+#define TPIU_TRIG_CNTRVAL (0x104)
+#define TPIU_TRIG_MULT (0x108)
+#define TPIU_SUPP_TESTPATM (0x200)
+#define TPIU_CURR_TESTPATM (0x204)
+#define TPIU_TEST_PATREPCNTR (0x208)
+#define TPIU_FFSR (0x300)
+#define TPIU_FFCR (0x304)
+#define TPIU_FSYNC_CNTR (0x308)
+#define TPIU_EXTCTL_INPORT (0x400)
+#define TPIU_EXTCTL_OUTPORT (0x404)
#define TPIU_ITTRFLINACK (0xEE4)
#define TPIU_ITTRFLIN (0xEE8)
#define TPIU_ITATBDATA0 (0xEEC)
@@ -67,8 +67,8 @@
{
TPIU_UNLOCK();
- tpiu_writel(tpiu, 0x3000, TPIU_FORMATTER_AND_FLUSH_CONTROL);
- tpiu_writel(tpiu, 0x3040, TPIU_FORMATTER_AND_FLUSH_CONTROL);
+ tpiu_writel(tpiu, 0x3000, TPIU_FFCR);
+ tpiu_writel(tpiu, 0x3040, TPIU_FFCR);
TPIU_LOCK();
}
@@ -77,7 +77,7 @@
{
__tpiu_disable();
tpiu.enabled = false;
- dev_info(tpiu.dev, "tpiu disabled\n");
+ dev_info(tpiu.dev, "TPIU disabled\n");
}
static int __devinit tpiu_probe(struct platform_device *pdev)
@@ -99,10 +99,12 @@
tpiu.dev = &pdev->dev;
+ dev_info(tpiu.dev, "TPIU initialized\n");
return 0;
err_ioremap:
err_res:
+ dev_err(tpiu.dev, "TPIU init failed\n");
return ret;
}
diff --git a/arch/arm/mach-msm/qdss.c b/arch/arm/mach-msm/qdss.c
index 55d14cd..ab28c82 100644
--- a/arch/arm/mach-msm/qdss.c
+++ b/arch/arm/mach-msm/qdss.c
@@ -29,6 +29,18 @@
QDSS_CLK_ON_HSDBG,
};
+struct qdss_ctx {
+ struct kobject *modulekobj;
+ uint8_t max_clk;
+};
+
+static struct qdss_ctx qdss;
+
+
+struct kobject *qdss_get_modulekobj(void)
+{
+ return qdss.modulekobj;
+}
int qdss_clk_enable(void)
{
@@ -36,13 +48,15 @@
struct msm_rpm_iv_pair iv;
iv.id = MSM_RPM_ID_QDSS_CLK;
- iv.value = QDSS_CLK_ON_DBG;
+ if (qdss.max_clk)
+ iv.value = QDSS_CLK_ON_HSDBG;
+ else
+ iv.value = QDSS_CLK_ON_DBG;
ret = msm_rpmrs_set(MSM_RPM_CTX_SET_0, &iv, 1);
if (WARN(ret, "qdss clks not enabled (%d)\n", ret))
goto err_clk;
return 0;
-
err_clk:
return ret;
}
@@ -58,10 +72,65 @@
WARN(ret, "qdss clks not disabled (%d)\n", ret);
}
+#define QDSS_ATTR(name) \
+static struct kobj_attribute name##_attr = \
+ __ATTR(name, S_IRUGO | S_IWUSR, name##_show, name##_store)
+
+static ssize_t max_clk_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ qdss.max_clk = val;
+ return n;
+}
+static ssize_t max_clk_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long val = qdss.max_clk;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+QDSS_ATTR(max_clk);
+
+static int __init qdss_sysfs_init(void)
+{
+ int ret;
+
+ qdss.modulekobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+ if (!qdss.modulekobj) {
+ pr_err("failed to find QDSS sysfs module kobject\n");
+ ret = -ENOENT;
+ goto err;
+ }
+
+ ret = sysfs_create_file(qdss.modulekobj, &max_clk_attr.attr);
+ if (ret) {
+ pr_err("failed to create QDSS sysfs max_clk attribute\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ return ret;
+}
+
+static void qdss_sysfs_exit(void)
+{
+ sysfs_remove_file(qdss.modulekobj, &max_clk_attr.attr);
+}
+
static int __init qdss_init(void)
{
int ret;
+ ret = qdss_sysfs_init();
+ if (ret)
+ goto err_sysfs;
ret = etb_init();
if (ret)
goto err_etb;
@@ -71,26 +140,30 @@
ret = funnel_init();
if (ret)
goto err_funnel;
- ret = ptm_init();
+ ret = etm_init();
if (ret)
- goto err_ptm;
+ goto err_etm;
+ pr_info("QDSS initialized\n");
return 0;
-
-err_ptm:
+err_etm:
funnel_exit();
err_funnel:
tpiu_exit();
err_tpiu:
etb_exit();
err_etb:
+ qdss_sysfs_exit();
+err_sysfs:
+ pr_err("QDSS init failed\n");
return ret;
}
module_init(qdss_init);
static void __exit qdss_exit(void)
{
- ptm_exit();
+ qdss_sysfs_exit();
+ etm_exit();
funnel_exit();
tpiu_exit();
etb_exit();
diff --git a/arch/arm/mach-msm/qdss.h b/arch/arm/mach-msm/qdss.h
index 199222a..fee0587 100644
--- a/arch/arm/mach-msm/qdss.h
+++ b/arch/arm/mach-msm/qdss.h
@@ -64,8 +64,8 @@
void tpiu_exit(void);
int funnel_init(void);
void funnel_exit(void);
-int ptm_init(void);
-void ptm_exit(void);
+int etm_init(void);
+void etm_exit(void);
void etb_enable(void);
void etb_disable(void);
@@ -73,6 +73,8 @@
void tpiu_disable(void);
void funnel_enable(uint8_t id, uint32_t port_mask);
void funnel_disable(uint8_t id, uint32_t port_mask);
+
+struct kobject *qdss_get_modulekobj(void);
int qdss_clk_enable(void);
void qdss_clk_disable(void);
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 228c77fb..cd32152 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -50,7 +50,7 @@
#define EVENT_MASK_SIZE 1000
#define USER_SPACE_DATA 8000
#define PKT_SIZE 4096
-#define MAX_EQUIP_ID 12
+#define MAX_EQUIP_ID 15
#define DIAG_CTRL_MSG_LOG_MASK 9
#define DIAG_CTRL_MSG_EVENT_MASK 10
#define DIAG_CTRL_MSG_F3_MASK 11
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index f16aa0c..4cf6d33 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -522,7 +522,7 @@
uint8_t *temp = buf;
int i = 0;
unsigned char *ptr_data;
- int offset = 8*MAX_EQUIP_ID;
+ int offset = (sizeof(struct mask_info))*MAX_EQUIP_ID;
struct mask_info *ptr = (struct mask_info *)driver->log_masks;
mutex_lock(&driver->diagchar_mutex);
@@ -661,9 +661,10 @@
void *buf = driver->buf_log_mask_update;
int header_size = sizeof(struct diag_ctrl_log_mask);
struct mask_info *ptr = (struct mask_info *)driver->log_masks;
- int i, size = (driver->log_mask->num_items+7)/8;
+ int i, size;
for (i = 0; i < MAX_EQUIP_ID; i++) {
+ size = (ptr->num_items+7)/8;
/* reached null entry */
if ((ptr->equip_id == 0) && (ptr->index == 0))
break;
@@ -1608,7 +1609,7 @@
if (driver->log_masks == NULL &&
(driver->log_masks = kzalloc(LOG_MASK_SIZE, GFP_KERNEL)) == NULL)
goto err;
- driver->log_masks_length = 8*MAX_EQUIP_ID;
+ driver->log_masks_length = (sizeof(struct mask_info))*MAX_EQUIP_ID;
if (driver->event_masks == NULL &&
(driver->event_masks = kzalloc(EVENT_MASK_SIZE,
GFP_KERNEL)) == NULL)
diff --git a/drivers/misc/tzcom.c b/drivers/misc/tzcom.c
index 2b1484c..3b943c8 100644
--- a/drivers/misc/tzcom.c
+++ b/drivers/misc/tzcom.c
@@ -29,9 +29,13 @@
#include <linux/android_pmem.h>
#include <linux/io.h>
#include <linux/ion.h>
+#include <linux/tzcom.h>
+#include <linux/clk.h>
#include <mach/scm.h>
#include <mach/peripheral-loader.h>
-#include <linux/tzcom.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
+#include <mach/socinfo.h>
#include "tzcomi.h"
#define TZCOM_DEV "tzcom"
@@ -51,6 +55,7 @@
__func__, current->pid, current->comm, ## args)
+static uint32_t tzcom_perf_client;
static struct class *driver_class;
static dev_t tzcom_device_no;
static struct cdev tzcom_cdev;
@@ -68,7 +73,9 @@
static DEFINE_MUTEX(sb_in_lock);
static DEFINE_MUTEX(sb_out_lock);
static DEFINE_MUTEX(send_cmd_lock);
-
+static DEFINE_MUTEX(tzcom_bw_mutex);
+static int tzcom_bw_count;
+static struct clk *tzcom_bus_clk;
struct tzcom_callback_list {
struct list_head list;
struct tzcom_callback callback;
@@ -94,6 +101,53 @@
atomic_t ioctl_count;
};
+static int tzcom_enable_bus_scaling(void)
+{
+ int ret = 0;
+ if (!tzcom_perf_client)
+ return -EINVAL;
+
+ if (IS_ERR_OR_NULL(tzcom_bus_clk))
+ return -EINVAL;
+
+ mutex_lock(&tzcom_bw_mutex);
+ if (!tzcom_bw_count) {
+ ret = msm_bus_scale_client_update_request(
+ tzcom_perf_client, 1);
+ if (ret) {
+ pr_err("Bandwidth request failed (%d)\n", ret);
+ } else {
+ ret = clk_enable(tzcom_bus_clk);
+ if (ret)
+ pr_err("Clock enable failed\n");
+ }
+ }
+ if (ret)
+ msm_bus_scale_client_update_request(tzcom_perf_client, 0);
+ else
+ tzcom_bw_count++;
+ mutex_unlock(&tzcom_bw_mutex);
+ return ret;
+}
+
+static void tzcom_disable_bus_scaling(void)
+{
+ if (!tzcom_perf_client)
+ return ;
+
+ if (IS_ERR_OR_NULL(tzcom_bus_clk))
+ return ;
+
+ mutex_lock(&tzcom_bw_mutex);
+ if (tzcom_bw_count > 0)
+ if (tzcom_bw_count-- == 1) {
+ msm_bus_scale_client_update_request(tzcom_perf_client,
+ 0);
+ clk_disable(tzcom_bus_clk);
+ }
+ mutex_unlock(&tzcom_bw_mutex);
+}
+
static int tzcom_scm_call(const void *cmd_buf, size_t cmd_len,
void *resp_buf, size_t resp_len)
{
@@ -878,6 +932,9 @@
struct tzcom_data_t *tzcom_data;
PDEBUG("In here");
+
+ ret = tzcom_enable_bus_scaling();
+
if (pil == NULL) {
pil = pil_get("tzapps");
if (IS_ERR(pil)) {
@@ -1008,9 +1065,39 @@
}
PDEBUG("Freeing tzcom data");
kfree(tzcom_data);
+ tzcom_disable_bus_scaling();
return 0;
}
+static struct msm_bus_paths tzcom_bw_table[] = {
+ {
+ .vectors = (struct msm_bus_vectors[]){
+ {
+ .src = MSM_BUS_MASTER_SPS,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ },
+ },
+ .num_paths = 1,
+ },
+ {
+ .vectors = (struct msm_bus_vectors[]){
+ {
+ .src = MSM_BUS_MASTER_SPS,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ib = (492 * 8) * 1000000UL,
+ .ab = (492 * 8) * 100000UL,
+ },
+ },
+ .num_paths = 1,
+ },
+
+};
+
+static struct msm_bus_scale_pdata tzcom_bus_pdata = {
+ .usecase = tzcom_bw_table,
+ .num_usecases = ARRAY_SIZE(tzcom_bw_table),
+ .name = "tzcom",
+};
static const struct file_operations tzcom_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = tzcom_ioctl,
@@ -1098,6 +1185,18 @@
/* Initialized in tzcom_open */
pil = NULL;
+ tzcom_perf_client = msm_bus_scale_register_client(
+ &tzcom_bus_pdata);
+ if (!tzcom_perf_client)
+ pr_err("Unable to register bus client");
+
+ tzcom_bus_clk = clk_get(class_dev, "bus_clk");
+ if (IS_ERR(tzcom_bus_clk)) {
+ tzcom_bus_clk = NULL;
+ } else if (tzcom_bus_clk != NULL) {
+ pr_debug("Enabled DFAB clock\n");
+ clk_set_rate(tzcom_bus_clk, 64000000);
+ }
return 0;
class_device_destroy:
@@ -1132,6 +1231,7 @@
pil_put(pil);
pil = NULL;
}
+ clk_put(tzcom_bus_clk);
device_destroy(driver_class, tzcom_device_no);
class_destroy(driver_class);
unregister_chrdev_region(tzcom_device_no, 1);
diff --git a/drivers/power/pm8921-charger.c b/drivers/power/pm8921-charger.c
index 78757f9..9b8ea48 100644
--- a/drivers/power/pm8921-charger.c
+++ b/drivers/power/pm8921-charger.c
@@ -258,10 +258,10 @@
struct delayed_work eoc_work;
struct work_struct unplug_ovp_fet_open_work;
struct delayed_work unplug_check_work;
- struct wake_lock unplug_ovp_fet_open_wake_lock;
struct wake_lock eoc_wake_lock;
enum pm8921_chg_cold_thr cold_thr;
enum pm8921_chg_hot_thr hot_thr;
+ int rconn_mohm;
};
static int usb_max_current;
@@ -1728,6 +1728,7 @@
chip->usb_present = usb_present;
power_supply_changed(&chip->usb_psy);
power_supply_changed(&chip->batt_psy);
+ pm8921_bms_calibrate_hkadc();
}
if (usb_present) {
schedule_delayed_work(&chip->unplug_check_work,
@@ -2389,6 +2390,80 @@
round_jiffies_relative(msecs_to_jiffies
(chip->update_time)));
}
+#define VDD_LOOP_ACTIVE_BIT BIT(3)
+#define VDD_MAX_INCREASE_MV 400
+static int vdd_max_increase_mv = VDD_MAX_INCREASE_MV;
+module_param(vdd_max_increase_mv, int, 0644);
+
+static int ichg_threshold_ua = -400000;
+module_param(ichg_threshold_ua, int, 0644);
+static void adjust_vdd_max_for_fastchg(struct pm8921_chg_chip *chip)
+{
+ int ichg_meas_ua, vbat_uv;
+ int ichg_meas_ma;
+ int adj_vdd_max_mv, programmed_vdd_max;
+ int vbat_batt_terminal_uv;
+ int vbat_batt_terminal_mv;
+ int reg_loop;
+ int delta_mv = 0;
+
+ if (chip->rconn_mohm == 0) {
+ pr_debug("Exiting as rconn_mohm is 0\n");
+ return;
+ }
+ /* adjust vdd_max only in normal temperature zone */
+ if (chip->is_bat_cool || chip->is_bat_warm) {
+ pr_debug("Exiting is_bat_cool = %d is_batt_warm = %d\n",
+ chip->is_bat_cool, chip->is_bat_warm);
+ return;
+ }
+
+ reg_loop = pm_chg_get_regulation_loop(chip);
+ if (!(reg_loop & VDD_LOOP_ACTIVE_BIT)) {
+ pr_debug("Exiting Vdd loop is not active reg loop=0x%x\n",
+ reg_loop);
+ return;
+ }
+
+ pm8921_bms_get_simultaneous_battery_voltage_and_current(&ichg_meas_ua,
+ &vbat_uv);
+ if (ichg_meas_ua >= 0) {
+ pr_debug("Exiting ichg_meas_ua = %d > 0\n", ichg_meas_ua);
+ return;
+ }
+ if (ichg_meas_ua <= ichg_threshold_ua) {
+ pr_debug("Exiting ichg_meas_ua = %d < ichg_threshold_ua = %d\n",
+ ichg_meas_ua, ichg_threshold_ua);
+ return;
+ }
+ ichg_meas_ma = ichg_meas_ua / 1000;
+
+ /* rconn_mohm is in milliOhms */
+ vbat_batt_terminal_uv = vbat_uv + ichg_meas_ma * the_chip->rconn_mohm;
+ vbat_batt_terminal_mv = vbat_batt_terminal_uv/1000;
+ pm_chg_vddmax_get(the_chip, &programmed_vdd_max);
+
+ delta_mv = chip->max_voltage_mv - vbat_batt_terminal_mv;
+
+ adj_vdd_max_mv = programmed_vdd_max + delta_mv;
+ pr_debug("vdd_max needs to be changed by %d mv from %d to %d\n",
+ delta_mv,
+ programmed_vdd_max,
+ adj_vdd_max_mv);
+
+ if (adj_vdd_max_mv < chip->max_voltage_mv) {
+ pr_debug("adj vdd_max lower than default max voltage\n");
+ return;
+ }
+
+ if (adj_vdd_max_mv > (chip->max_voltage_mv + vdd_max_increase_mv))
+ adj_vdd_max_mv = chip->max_voltage_mv + vdd_max_increase_mv;
+
+ pr_debug("adjusting vdd_max_mv to %d to make "
+ "vbat_batt_termial_uv = %d to %d\n",
+ adj_vdd_max_mv, vbat_batt_terminal_uv, chip->max_voltage_mv);
+ pm_chg_vddmax_set(chip, adj_vdd_max_mv);
+}
enum {
CHG_IN_PROGRESS,
@@ -2442,8 +2517,6 @@
}
pr_debug("vddmax = %d vbat_meas_mv=%d\n",
vbat_programmed, vbat_meas_mv);
- if (vbat_meas_mv < vbat_programmed - VBAT_TOLERANCE_MV)
- return CHG_IN_PROGRESS;
if (last_vbat_programmed == -EINVAL)
last_vbat_programmed = vbat_programmed;
@@ -2455,10 +2528,6 @@
return CHG_IN_PROGRESS;
}
- /*
- * TODO if charging from an external charger
- * check SOC instead of regulation loop
- */
regulation_loop = pm_chg_get_regulation_loop(chip);
if (regulation_loop < 0) {
pr_err("couldnt read the regulation loop err=%d\n",
@@ -2518,10 +2587,9 @@
end = is_charging_finished(chip);
if (end == CHG_NOT_IN_PROGRESS) {
- /* enable fastchg irq */
- count = 0;
- wake_unlock(&chip->eoc_wake_lock);
- return;
+ count = 0;
+ wake_unlock(&chip->eoc_wake_lock);
+ return;
}
if (end == CHG_FINISHED) {
@@ -2547,6 +2615,7 @@
chgdone_irq_handler(chip->pmic_chg_irq[CHGDONE_IRQ], chip);
wake_unlock(&chip->eoc_wake_lock);
} else {
+ adjust_vdd_max_for_fastchg(chip);
pr_debug("EOC count = %d\n", count);
schedule_delayed_work(&chip->eoc_work,
round_jiffies_relative(msecs_to_jiffies
@@ -2687,6 +2756,23 @@
module_param_call(disabled, set_disable_status_param, param_get_uint,
&charging_disabled, 0644);
+static int rconn_mohm;
+static int set_rconn_mohm(const char *val, struct kernel_param *kp)
+{
+ int ret;
+ struct pm8921_chg_chip *chip = the_chip;
+
+ ret = param_set_int(val, kp);
+ if (ret) {
+ pr_err("error setting value %d\n", ret);
+ return ret;
+ }
+ if (chip)
+ chip->rconn_mohm = rconn_mohm;
+ return 0;
+}
+module_param_call(rconn_mohm, set_rconn_mohm, param_get_uint,
+ &rconn_mohm, 0644);
/**
* set_thermal_mitigation_level -
*
@@ -3004,6 +3090,7 @@
static int __devinit pm8921_chg_hw_init(struct pm8921_chg_chip *chip)
{
int rc;
+ int vdd_safe;
rc = pm_chg_masked_write(chip, SYS_CONFIG_2,
BOOT_DONE_BIT, BOOT_DONE_BIT);
@@ -3012,7 +3099,13 @@
return rc;
}
- rc = pm_chg_vddsafe_set(chip, chip->max_voltage_mv);
+ vdd_safe = chip->max_voltage_mv + VDD_MAX_INCREASE_MV;
+
+ if (vdd_safe > PM8921_CHG_VDDSAFE_MAX)
+ vdd_safe = PM8921_CHG_VDDSAFE_MAX;
+
+ rc = pm_chg_vddsafe_set(chip, vdd_safe);
+
if (rc) {
pr_err("Failed to set safe voltage to %d rc=%d\n",
chip->max_voltage_mv, rc);
@@ -3505,6 +3598,7 @@
chip->cold_thr = pdata->cold_thr;
chip->hot_thr = pdata->hot_thr;
+ chip->rconn_mohm = pdata->rconn_mohm;
rc = pm8921_chg_hw_init(chip);
if (rc) {
@@ -3556,8 +3650,6 @@
the_chip = chip;
wake_lock_init(&chip->eoc_wake_lock, WAKE_LOCK_SUSPEND, "pm8921_eoc");
- wake_lock_init(&chip->unplug_ovp_fet_open_wake_lock,
- WAKE_LOCK_SUSPEND, "pm8921_unplug_wrkarnd");
INIT_DELAYED_WORK(&chip->eoc_work, eoc_worker);
INIT_WORK(&chip->unplug_ovp_fet_open_work,
unplug_ovp_fet_open_worker);
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index 7a9d57d..8a97a6e 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -15,169 +15,43 @@
#ifndef __GADGET_CHIPS_H
#define __GADGET_CHIPS_H
-#ifdef CONFIG_USB_GADGET_NET2280
-#define gadget_is_net2280(g) !strcmp("net2280", (g)->name)
-#else
-#define gadget_is_net2280(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_AMD5536UDC
-#define gadget_is_amd5536udc(g) !strcmp("amd5536udc", (g)->name)
-#else
-#define gadget_is_amd5536udc(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_DUMMY_HCD
-#define gadget_is_dummy(g) !strcmp("dummy_udc", (g)->name)
-#else
-#define gadget_is_dummy(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_PXA25X
-#define gadget_is_pxa(g) !strcmp("pxa25x_udc", (g)->name)
-#else
-#define gadget_is_pxa(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_GOKU
-#define gadget_is_goku(g) !strcmp("goku_udc", (g)->name)
-#else
-#define gadget_is_goku(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_OMAP
-#define gadget_is_omap(g) !strcmp("omap_udc", (g)->name)
-#else
-#define gadget_is_omap(g) 0
-#endif
-
-/* various unstable versions available */
-#ifdef CONFIG_USB_GADGET_PXA27X
-#define gadget_is_pxa27x(g) !strcmp("pxa27x_udc", (g)->name)
-#else
-#define gadget_is_pxa27x(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_ATMEL_USBA
-#define gadget_is_atmel_usba(g) !strcmp("atmel_usba_udc", (g)->name)
-#else
-#define gadget_is_atmel_usba(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_S3C2410
-#define gadget_is_s3c2410(g) !strcmp("s3c2410_udc", (g)->name)
-#else
-#define gadget_is_s3c2410(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_AT91
-#define gadget_is_at91(g) !strcmp("at91_udc", (g)->name)
-#else
-#define gadget_is_at91(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_IMX
-#define gadget_is_imx(g) !strcmp("imx_udc", (g)->name)
-#else
-#define gadget_is_imx(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_FSL_USB2
-#define gadget_is_fsl_usb2(g) !strcmp("fsl-usb2-udc", (g)->name)
-#else
-#define gadget_is_fsl_usb2(g) 0
-#endif
-
-/* Mentor high speed "dual role" controller, in peripheral role */
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
-#define gadget_is_musbhdrc(g) !strcmp("musb-hdrc", (g)->name)
-#else
-#define gadget_is_musbhdrc(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_LANGWELL
-#define gadget_is_langwell(g) (!strcmp("langwell_udc", (g)->name))
-#else
-#define gadget_is_langwell(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_M66592
-#define gadget_is_m66592(g) !strcmp("m66592_udc", (g)->name)
-#else
-#define gadget_is_m66592(g) 0
-#endif
-
-/* Freescale CPM/QE UDC SUPPORT */
-#ifdef CONFIG_USB_GADGET_FSL_QE
-#define gadget_is_fsl_qe(g) !strcmp("fsl_qe_udc", (g)->name)
-#else
-#define gadget_is_fsl_qe(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_CI13XXX_PCI
-#define gadget_is_ci13xxx_pci(g) (!strcmp("ci13xxx_pci", (g)->name))
-#else
-#define gadget_is_ci13xxx_pci(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_DWC3
-#define gadget_is_dwc3(g) (!strcmp("dwc3-gadget", (g)->name))
-#else
-#define gadget_is_dwc3(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_MSM_72K
-#define gadget_is_msm72k(g) !strcmp("msm72k_udc", (g)->name)
-#else
-#define gadget_is_msm72k(g) 0
-#endif
-
-// CONFIG_USB_GADGET_SX2
-// CONFIG_USB_GADGET_AU1X00
-// ...
-
-#ifdef CONFIG_USB_GADGET_R8A66597
-#define gadget_is_r8a66597(g) !strcmp("r8a66597_udc", (g)->name)
-#else
-#define gadget_is_r8a66597(g) 0
-#endif
-
-#ifdef CONFIG_USB_S3C_HSOTG
-#define gadget_is_s3c_hsotg(g) (!strcmp("s3c-hsotg", (g)->name))
-#else
-#define gadget_is_s3c_hsotg(g) 0
-#endif
-
-#ifdef CONFIG_USB_S3C_HSUDC
-#define gadget_is_s3c_hsudc(g) (!strcmp("s3c-hsudc", (g)->name))
-#else
-#define gadget_is_s3c_hsudc(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_EG20T
-#define gadget_is_pch(g) (!strcmp("pch_udc", (g)->name))
-#else
-#define gadget_is_pch(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_CI13XXX_MSM
+/*
+ * NOTICE: the entries below are alphabetical and should be kept
+ * that way.
+ *
+ * Always be sure to add new entries to the correct position or
+ * accept the bashing later.
+ *
+ * If you have forgotten the alphabetical order let VIM/EMACS
+ * do that for you.
+ */
+#define gadget_is_amd5536udc(g) (!strcmp("amd5536udc", (g)->name))
+#define gadget_is_at91(g) (!strcmp("at91_udc", (g)->name))
+#define gadget_is_atmel_usba(g) (!strcmp("atmel_usba_udc", (g)->name))
#define gadget_is_ci13xxx_msm(g) (!strcmp("ci13xxx_msm", (g)->name))
-#else
-#define gadget_is_ci13xxx_msm(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_CI13XXX_MSM_HSIC
-#define gadget_is_ci13xxx_msm_hsic(g) \
- (!strncmp("ci13xxx_msm_hsic", (g)->name, 16))
-#else
-#define gadget_is_ci13xxx_msm_hsic(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_RENESAS_USBHS
-#define gadget_is_renesas_usbhs(g) (!strcmp("renesas_usbhs_udc", (g)->name))
-#else
-#define gadget_is_renesas_usbhs(g) 0
-#endif
+#define gadget_is_ci13xxx_msm_hsic(g) (!strcmp("ci13xxx_msm_hsic", (g)->name))
+#define gadget_is_ci13xxx_pci(g) (!strcmp("ci13xxx_pci", (g)->name))
+#define gadget_is_dummy(g) (!strcmp("dummy_udc", (g)->name))
+#define gadget_is_dwc3(g) (!strcmp("dwc3-gadget", (g)->name))
+#define gadget_is_fsl_qe(g) (!strcmp("fsl_qe_udc", (g)->name))
+#define gadget_is_fsl_usb2(g) (!strcmp("fsl-usb2-udc", (g)->name))
+#define gadget_is_goku(g) (!strcmp("goku_udc", (g)->name))
+#define gadget_is_imx(g) (!strcmp("imx_udc", (g)->name))
+#define gadget_is_langwell(g) (!strcmp("langwell_udc", (g)->name))
+#define gadget_is_m66592(g) (!strcmp("m66592_udc", (g)->name))
+#define gadget_is_msm72k(g) (!strcmp("msm72k_udc", (g)->name))
+#define gadget_is_musbhdrc(g) (!strcmp("musb-hdrc", (g)->name))
+#define gadget_is_net2272(g) (!strcmp("net2272", (g)->name))
+#define gadget_is_net2280(g) (!strcmp("net2280", (g)->name))
+#define gadget_is_omap(g) (!strcmp("omap_udc", (g)->name))
+#define gadget_is_pch(g) (!strcmp("pch_udc", (g)->name))
+#define gadget_is_pxa(g) (!strcmp("pxa25x_udc", (g)->name))
+#define gadget_is_pxa27x(g) (!strcmp("pxa27x_udc", (g)->name))
+#define gadget_is_r8a66597(g) (!strcmp("r8a66597_udc", (g)->name))
+#define gadget_is_renesas_usbhs(g) (!strcmp("renesas_usbhs_udc", (g)->name))
+#define gadget_is_s3c2410(g) (!strcmp("s3c2410_udc", (g)->name))
+#define gadget_is_s3c_hsotg(g) (!strcmp("s3c-hsotg", (g)->name))
+#define gadget_is_s3c_hsudc(g) (!strcmp("s3c-hsudc", (g)->name))
/**
* usb_gadget_controller_number - support bcdDevice id convention
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index 579899c..e451cee 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -498,7 +498,13 @@
outpdw(rgb_base + 0x0050, format);/* MDP_RGB_SRC_FORMAT */
outpdw(rgb_base + 0x0054, pattern);/* MDP_RGB_SRC_UNPACK_PATTERN */
- outpdw(rgb_base + 0x0058, pipe->op_mode);/* MDP_RGB_OP_MODE */
+ if (format & MDP4_FORMAT_SOLID_FILL) {
+ u32 op_mode = pipe->op_mode;
+ op_mode &= ~(MDP4_OP_FLIP_LR + MDP4_OP_SCALEX_EN);
+ op_mode &= ~(MDP4_OP_FLIP_UD + MDP4_OP_SCALEY_EN);
+ outpdw(rgb_base + 0x0058, op_mode);/* MDP_RGB_OP_MODE */
+ } else
+ outpdw(rgb_base + 0x0058, pipe->op_mode);/* MDP_RGB_OP_MODE */
outpdw(rgb_base + 0x005c, pipe->phasex_step);
outpdw(rgb_base + 0x0060, pipe->phasey_step);
@@ -670,7 +676,13 @@
outpdw(vg_base + 0x0050, format); /* MDP_RGB_SRC_FORMAT */
outpdw(vg_base + 0x0054, pattern); /* MDP_RGB_SRC_UNPACK_PATTERN */
- outpdw(vg_base + 0x0058, pipe->op_mode);/* MDP_RGB_OP_MODE */
+ if (format & MDP4_FORMAT_SOLID_FILL) {
+ u32 op_mode = pipe->op_mode;
+ op_mode &= ~(MDP4_OP_FLIP_LR + MDP4_OP_SCALEX_EN);
+ op_mode &= ~(MDP4_OP_FLIP_UD + MDP4_OP_SCALEY_EN);
+ outpdw(vg_base + 0x0058, op_mode);/* MDP_RGB_OP_MODE */
+ } else
+ outpdw(vg_base + 0x0058, pipe->op_mode);/* MDP_RGB_OP_MODE */
outpdw(vg_base + 0x005c, pipe->phasex_step);
outpdw(vg_base + 0x0060, pipe->phasey_step);
@@ -1494,11 +1506,21 @@
if (pipe->is_fg) {
if (pipe->alpha == 0xff &&
bg_pipe->pipe_type == OVERLAY_TYPE_RGB) {
+ u32 op_mode;
pnum = bg_pipe->pipe_num - OVERLAY_PIPE_RGB1;
rgb_base = MDP_BASE + MDP4_RGB_BASE;
rgb_base += MDP4_RGB_OFF * pnum;
rgb_src_format = inpdw(rgb_base + 0x50);
rgb_src_format |= MDP4_FORMAT_SOLID_FILL;
+ /*
+ * If solid fill is enabled, flip and scale
+ * have to be disabled. otherwise, h/w
+ * underruns.
+ */
+ op_mode = inpdw(rgb_base + 0x0058);
+ op_mode &= ~(MDP4_OP_FLIP_LR + MDP4_OP_SCALEX_EN);
+ op_mode &= ~(MDP4_OP_FLIP_UD + MDP4_OP_SCALEY_EN);
+ outpdw(rgb_base + 0x0058, op_mode);
outpdw(rgb_base + 0x50, rgb_src_format);
outpdw(rgb_base + 0x1008, constant_color);
}
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.c
index 1e02a2c..eea902d 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.c
@@ -164,14 +164,20 @@
DDL_MSG_ERROR("ddl_open:Client_trasac_failed");
return status;
}
- ddl->shared_mem[0].mem_type = DDL_CMD_MEM;
+ if (res_trk_check_for_sec_session())
+ ddl->shared_mem[0].mem_type = DDL_CMD_MEM;
+ else
+ ddl->shared_mem[0].mem_type = DDL_FW_MEM;
ptr = ddl_pmem_alloc(&ddl->shared_mem[0],
DDL_FW_AUX_HOST_CMD_SPACE_SIZE, 0);
if (!ptr)
status = VCD_ERR_ALLOC_FAIL;
if (!status && ddl_context->frame_channel_depth
== VCD_DUAL_FRAME_COMMAND_CHANNEL) {
- ddl->shared_mem[1].mem_type = DDL_CMD_MEM;
+ if (res_trk_check_for_sec_session())
+ ddl->shared_mem[1].mem_type = DDL_CMD_MEM;
+ else
+ ddl->shared_mem[1].mem_type = DDL_FW_MEM;
ptr = ddl_pmem_alloc(&ddl->shared_mem[1],
DDL_FW_AUX_HOST_CMD_SPACE_SIZE, 0);
if (!ptr) {
@@ -289,6 +295,11 @@
DDL_MSG_ERROR("ddl_enc_start:Seq_hdr_alloc_failed");
return VCD_ERR_ALLOC_FAIL;
}
+ msm_ion_do_cache_op(ddl_context->video_ion_client,
+ encoder->seq_header.alloc_handle,
+ encoder->seq_header.virtual_base_addr,
+ encoder->seq_header.buffer_size,
+ ION_IOC_CLEAN_INV_CACHES);
if (!ddl_take_command_channel(ddl_context, ddl, client_data))
return VCD_ERR_BUSY;
ddl_vidc_channel_set(ddl);
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
index 75df48d..1da70bc 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
@@ -14,7 +14,7 @@
#include <mach/msm_memtypes.h>
#include "vcd_ddl.h"
#include "vcd_ddl_shared_mem.h"
-
+#include "vcd_res_tracker_api.h"
struct ddl_context *ddl_get_context(void)
{
@@ -639,6 +639,7 @@
u32 status = VCD_S_SUCCESS, dpb = 0;
u32 width = 0, height = 0;
u8 *ptr;
+ struct ddl_context *ddl_context = ddl->ddl_context;
dec_bufs = &ddl->codec_data.decoder.hw_bufs;
ddl_calc_dec_hw_buffers_size(ddl->codec_data.decoder.
@@ -649,6 +650,11 @@
DDL_KILO_BYTE(2));
if (!ptr)
status = VCD_ERR_ALLOC_FAIL;
+ msm_ion_do_cache_op(ddl_context->video_ion_client,
+ dec_bufs->context.alloc_handle,
+ dec_bufs->context.virtual_base_addr,
+ dec_bufs->context.buffer_size,
+ ION_IOC_CLEAN_INV_CACHES);
}
if (buf_size.sz_nb_ip > 0) {
dec_bufs->h264_nb_ip.mem_type = DDL_MM_MEM;
@@ -726,9 +732,15 @@
DDL_KILO_BYTE(2));
if (!ptr)
status = VCD_ERR_ALLOC_FAIL;
- else
+ else {
memset(dec_bufs->desc.align_virtual_addr,
0, buf_size.sz_desc);
+ msm_ion_do_cache_op(ddl_context->video_ion_client,
+ dec_bufs->desc.alloc_handle,
+ dec_bufs->desc.alloc_handle,
+ dec_bufs->desc.buffer_size,
+ ION_IOC_CLEAN_INV_CACHES);
+ }
}
if (status)
ddl_free_dec_hw_buffers(ddl);
@@ -830,6 +842,7 @@
struct ddl_enc_buffer_size buf_size;
void *ptr;
u32 status = VCD_S_SUCCESS;
+ struct ddl_context *ddl_context = ddl->ddl_context;
enc_bufs = &ddl->codec_data.encoder.hw_bufs;
enc_bufs->dpb_count = DDL_ENC_MIN_DPB_BUFFERS;
@@ -908,6 +921,11 @@
buf_size.sz_context, DDL_KILO_BYTE(2));
if (!ptr)
status = VCD_ERR_ALLOC_FAIL;
+ msm_ion_do_cache_op(ddl_context->video_ion_client,
+ enc_bufs->context.alloc_handle,
+ enc_bufs->context.virtual_base_addr,
+ enc_bufs->context.buffer_size,
+ ION_IOC_CLEAN_INV_CACHES);
}
if (status)
ddl_free_enc_hw_buffers(ddl);
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.c
index 7ccf4c2..6aa7451 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.c
@@ -23,7 +23,6 @@
};
static struct time_data proc_time[MAX_TIME_DATA];
#define DDL_MSG_TIME(x...) printk(KERN_DEBUG x)
-
static unsigned int vidc_mmu_subsystem[] = {
MSM_SUBSYSTEM_VIDEO, MSM_SUBSYSTEM_VIDEO_FWARE};
@@ -37,13 +36,16 @@
#endif
void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment)
{
- u32 alloc_size, offset = 0, flags = 0;
+ u32 alloc_size, offset = 0 ;
u32 index = 0;
struct ddl_context *ddl_context;
struct msm_mapped_buffer *mapped_buffer = NULL;
- int rc = -EINVAL;
- ion_phys_addr_t phyaddr = 0;
- size_t len = 0;
+ unsigned long iova = 0;
+ unsigned long buffer_size = 0;
+ unsigned long *kernel_vaddr = NULL;
+ unsigned long ionflag = 0;
+ unsigned long flags = 0;
+ int ret = 0;
DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
if (!addr) {
DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__);
@@ -61,6 +63,7 @@
__func__);
goto bail_out;
}
+ alloc_size = (alloc_size+4095) & ~4095;
addr->alloc_handle = ion_alloc(
ddl_context->video_ion_client, alloc_size, SZ_4K,
res_trk_get_mem_type());
@@ -69,15 +72,48 @@
__func__);
goto bail_out;
}
- rc = ion_phys(ddl_context->video_ion_client,
- addr->alloc_handle, &phyaddr,
- &len);
- if (rc || !phyaddr) {
+ if (res_trk_check_for_sec_session() ||
+ addr->mem_type == DDL_FW_MEM)
+ ionflag = UNCACHED;
+ else
+ ionflag = CACHED;
+ kernel_vaddr = (unsigned long *) ion_map_kernel(
+ ddl_context->video_ion_client,
+ addr->alloc_handle, ionflag);
+ if (IS_ERR_OR_NULL(kernel_vaddr)) {
+ DDL_MSG_ERROR("%s() :DDL ION map failed\n",
+ __func__);
+ goto free_ion_alloc;
+ }
+ addr->virtual_base_addr = (u8 *) kernel_vaddr;
+ ret = ion_map_iommu(ddl_context->video_ion_client,
+ addr->alloc_handle,
+ VIDEO_DOMAIN,
+ VIDEO_MAIN_POOL,
+ SZ_4K,
+ 0,
+ &iova,
+ &buffer_size,
+ UNCACHED, 0);
+ if (ret) {
+ DDL_MSG_ERROR("%s():DDL ION ion map iommu failed\n",
+ __func__);
+ goto unmap_ion_alloc;
+ }
+ addr->alloced_phys_addr = (phys_addr_t) iova;
+ if (!addr->alloced_phys_addr) {
DDL_MSG_ERROR("%s():DDL ION client physical failed\n",
__func__);
- goto free_acm_ion_alloc;
+ goto unmap_ion_alloc;
}
- addr->alloced_phys_addr = phyaddr;
+ addr->mapped_buffer = NULL;
+ addr->physical_base_addr = (u8 *) iova;
+ addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
+ addr->physical_base_addr, alignment);
+ offset = (u32)(addr->align_physical_addr -
+ addr->physical_base_addr);
+ addr->align_virtual_addr = addr->virtual_base_addr + offset;
+ addr->buffer_size = alloc_size;
} else {
addr->alloced_phys_addr = (phys_addr_t)
allocate_contiguous_memory_nomap(alloc_size,
@@ -87,51 +123,52 @@
__func__, alloc_size);
goto bail_out;
}
- }
- flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR;
- if (alignment == DDL_KILO_BYTE(128))
- index = 1;
- else if (alignment > SZ_4K)
- flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K;
+ flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR;
+ if (alignment == DDL_KILO_BYTE(128))
+ index = 1;
+ else if (alignment > SZ_4K)
+ flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K;
- addr->mapped_buffer =
- msm_subsystem_map_buffer((unsigned long)addr->alloced_phys_addr,
- alloc_size, flags, &vidc_mmu_subsystem[index],
- sizeof(vidc_mmu_subsystem[index])/sizeof(unsigned int));
- if (IS_ERR(addr->mapped_buffer)) {
- pr_err(" %s() buffer map failed", __func__);
- goto free_acm_ion_alloc;
+ addr->mapped_buffer =
+ msm_subsystem_map_buffer((unsigned long)addr->alloced_phys_addr,
+ alloc_size, flags, &vidc_mmu_subsystem[index],
+ sizeof(vidc_mmu_subsystem[index])/sizeof(unsigned int));
+ if (IS_ERR(addr->mapped_buffer)) {
+ pr_err(" %s() buffer map failed", __func__);
+ goto free_acm_alloc;
+ }
+ mapped_buffer = addr->mapped_buffer;
+ if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) {
+ pr_err("%s() map buffers failed\n", __func__);
+ goto free_map_buffers;
+ }
+ addr->physical_base_addr = (u8 *)mapped_buffer->iova[0];
+ addr->virtual_base_addr = mapped_buffer->vaddr;
+ addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
+ addr->physical_base_addr, alignment);
+ offset = (u32)(addr->align_physical_addr -
+ addr->physical_base_addr);
+ addr->align_virtual_addr = addr->virtual_base_addr + offset;
+ addr->buffer_size = sz;
}
- mapped_buffer = addr->mapped_buffer;
- if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) {
- pr_err("%s() map buffers failed\n", __func__);
- goto free_map_buffers;
- }
- addr->physical_base_addr = (u8 *)mapped_buffer->iova[0];
- addr->virtual_base_addr = mapped_buffer->vaddr;
- addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
- addr->physical_base_addr, alignment);
- offset = (u32)(addr->align_physical_addr -
- addr->physical_base_addr);
- addr->align_virtual_addr = addr->virtual_base_addr + offset;
- addr->buffer_size = sz;
return addr->virtual_base_addr;
-
free_map_buffers:
msm_subsystem_unmap_buffer(addr->mapped_buffer);
addr->mapped_buffer = NULL;
-free_acm_ion_alloc:
- if (ddl_context->video_ion_client) {
- if (addr->alloc_handle) {
- ion_free(ddl_context->video_ion_client,
- addr->alloc_handle);
- addr->alloc_handle = NULL;
- }
- } else {
+free_acm_alloc:
free_contiguous_memory_by_paddr(
(unsigned long)addr->alloced_phys_addr);
addr->alloced_phys_addr = (phys_addr_t)NULL;
- }
+ return NULL;
+unmap_ion_alloc:
+ ion_unmap_kernel(ddl_context->video_ion_client,
+ addr->alloc_handle);
+ addr->virtual_base_addr = NULL;
+ addr->alloced_phys_addr = (phys_addr_t)NULL;
+free_ion_alloc:
+ ion_free(ddl_context->video_ion_client,
+ addr->alloc_handle);
+ addr->alloc_handle = NULL;
bail_out:
return NULL;
}
@@ -146,16 +183,22 @@
}
if (ddl_context->video_ion_client) {
if (!IS_ERR_OR_NULL(addr->alloc_handle)) {
+ ion_unmap_kernel(ddl_context->video_ion_client,
+ addr->alloc_handle);
+ ion_unmap_iommu(ddl_context->video_ion_client,
+ addr->alloc_handle,
+ VIDEO_DOMAIN,
+ VIDEO_MAIN_POOL);
ion_free(ddl_context->video_ion_client,
addr->alloc_handle);
- }
+ }
} else {
+ if (addr->mapped_buffer)
+ msm_subsystem_unmap_buffer(addr->mapped_buffer);
if (addr->alloced_phys_addr)
free_contiguous_memory_by_paddr(
(unsigned long)addr->alloced_phys_addr);
}
- if (addr->mapped_buffer)
- msm_subsystem_unmap_buffer(addr->mapped_buffer);
memset(addr, 0, sizeof(struct ddl_buf_addr));
}
diff --git a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
index 2f26e01..7e9ac40 100644
--- a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
+++ b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
@@ -58,40 +58,88 @@
u32 index = 0;
struct ddl_context *ddl_context;
struct msm_mapped_buffer *mapped_buffer = NULL;
- ddl_context = ddl_get_context();
- if (!addr->alloced_phys_addr) {
- pr_err(" %s() alloced addres NULL", __func__);
- goto bail_out;
- }
- flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR;
- if (alignment == DDL_KILO_BYTE(128))
- index = 1;
- else if (alignment > SZ_4K)
- flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K;
+ int ret = 0;
+ unsigned long iova = 0;
+ unsigned long buffer_size = 0;
+ unsigned long *kernel_vaddr = NULL;
- addr->mapped_buffer =
- msm_subsystem_map_buffer((unsigned long)addr->alloced_phys_addr,
- sz, flags, &restrk_mmu_subsystem[index],
- sizeof(restrk_mmu_subsystem[index])/sizeof(unsigned int));
- if (IS_ERR(addr->mapped_buffer)) {
- pr_err(" %s() buffer map failed", __func__);
- goto bail_out;
- }
- mapped_buffer = addr->mapped_buffer;
- if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) {
- pr_err("%s() map buffers failed\n", __func__);
- goto bail_out;
- }
- addr->physical_base_addr = (u8 *)mapped_buffer->iova[0];
- addr->virtual_base_addr = mapped_buffer->vaddr;
- addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
+ ddl_context = ddl_get_context();
+ if (res_trk_get_enable_ion()) {
+ kernel_vaddr = (unsigned long *) ion_map_kernel(
+ ddl_context->video_ion_client,
+ addr->alloc_handle, UNCACHED);
+ if (IS_ERR_OR_NULL(kernel_vaddr)) {
+ DDL_MSG_ERROR("%s():DDL ION client map failed\n",
+ __func__);
+ goto ion_bail_out;
+ }
+ addr->virtual_base_addr = (u8 *) kernel_vaddr;
+ ret = ion_map_iommu(ddl_context->video_ion_client,
+ addr->alloc_handle,
+ VIDEO_DOMAIN,
+ VIDEO_FIRMWARE_POOL,
+ SZ_4K,
+ 0,
+ &iova,
+ &buffer_size,
+ UNCACHED, 0);
+ if (ret) {
+ DDL_MSG_ERROR("%s():DDL ION client iommu map failed\n",
+ __func__);
+ goto ion_unmap_bail_out;
+ }
+ addr->mapped_buffer = NULL;
+ addr->physical_base_addr = (u8 *)iova;
+ addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
addr->physical_base_addr, alignment);
- offset = (u32)(addr->align_physical_addr -
- addr->physical_base_addr);
- addr->align_virtual_addr = addr->virtual_base_addr + offset;
- addr->buffer_size = sz;
+ offset = (u32)(addr->align_physical_addr -
+ addr->physical_base_addr);
+ addr->align_virtual_addr = addr->virtual_base_addr + offset;
+ addr->buffer_size = buffer_size;
+ } else {
+ if (!addr->alloced_phys_addr) {
+ pr_err(" %s() alloced addres NULL", __func__);
+ goto bail_out;
+ }
+ flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR;
+ if (alignment == DDL_KILO_BYTE(128))
+ index = 1;
+ else if (alignment > SZ_4K)
+ flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K;
+
+ addr->mapped_buffer =
+ msm_subsystem_map_buffer((unsigned long)addr->alloced_phys_addr,
+ sz, flags, &restrk_mmu_subsystem[index],
+ sizeof(restrk_mmu_subsystem[index])/sizeof(unsigned int));
+ if (IS_ERR(addr->mapped_buffer)) {
+ pr_err(" %s() buffer map failed", __func__);
+ goto bail_out;
+ }
+ mapped_buffer = addr->mapped_buffer;
+ if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) {
+ pr_err("%s() map buffers failed\n", __func__);
+ goto bail_out;
+ }
+ addr->physical_base_addr = (u8 *)mapped_buffer->iova[0];
+ addr->virtual_base_addr = mapped_buffer->vaddr;
+ addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
+ addr->physical_base_addr, alignment);
+ offset = (u32)(addr->align_physical_addr -
+ addr->physical_base_addr);
+ addr->align_virtual_addr = addr->virtual_base_addr + offset;
+ addr->buffer_size = sz;
+ }
return addr->virtual_base_addr;
bail_out:
+ if (addr->mapped_buffer)
+ msm_subsystem_unmap_buffer(addr->mapped_buffer);
+ return NULL;
+ion_unmap_bail_out:
+ if (!IS_ERR_OR_NULL(addr->alloc_handle)) {
+ ion_unmap_kernel(resource_context.
+ res_ion_client, addr->alloc_handle);
+ }
+ion_bail_out:
return NULL;
}
@@ -100,9 +148,6 @@
{
u32 alloc_size;
struct ddl_context *ddl_context;
- int rc = -EINVAL;
- ion_phys_addr_t phyaddr = 0;
- size_t len = 0;
DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
if (!addr) {
DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__);
@@ -120,23 +165,16 @@
__func__);
goto bail_out;
}
+ alloc_size = (alloc_size+4095) & ~4095;
addr->alloc_handle = ion_alloc(
ddl_context->video_ion_client, alloc_size, SZ_4K,
res_trk_get_mem_type());
if (IS_ERR_OR_NULL(addr->alloc_handle)) {
DDL_MSG_ERROR("%s() :DDL ION alloc failed\n",
__func__);
- goto bail_out;
- }
- rc = ion_phys(ddl_context->video_ion_client,
- addr->alloc_handle, &phyaddr,
- &len);
- if (rc || !phyaddr) {
- DDL_MSG_ERROR("%s():DDL ION client physical failed\n",
- __func__);
goto free_acm_ion_alloc;
}
- addr->alloced_phys_addr = phyaddr;
+ return (void *) addr->alloc_handle;
} else {
addr->alloced_phys_addr = (phys_addr_t)
allocate_contiguous_memory_nomap(alloc_size,
@@ -146,10 +184,10 @@
__func__, alloc_size);
goto bail_out;
}
+ addr->buffer_size = sz;
+ return (void *)addr->alloced_phys_addr;
}
- addr->buffer_size = sz;
- return (void *)addr->alloced_phys_addr;
free_acm_ion_alloc:
if (ddl_context->video_ion_client) {
@@ -169,7 +207,18 @@
pr_err("%s() invalid args\n", __func__);
return;
}
- if (addr->mapped_buffer)
+ if (!IS_ERR_OR_NULL(addr->alloc_handle)) {
+ if (addr->physical_base_addr) {
+ ion_unmap_kernel(resource_context.res_ion_client,
+ addr->alloc_handle);
+ ion_unmap_iommu(resource_context.res_ion_client,
+ addr->alloc_handle,
+ VIDEO_DOMAIN,
+ VIDEO_FIRMWARE_POOL);
+ addr->virtual_base_addr = NULL;
+ addr->physical_base_addr = NULL;
+ }
+ } else if (addr->mapped_buffer)
msm_subsystem_unmap_buffer(addr->mapped_buffer);
addr->mapped_buffer = NULL;
}
@@ -628,8 +677,8 @@
int mem_type = -1;
switch (resource_context.res_mem_type) {
case DDL_FW_MEM:
- mem_type = resource_context.fw_mem_type;
- break;
+ mem_type = ION_HEAP(resource_context.fw_mem_type);
+ return mem_type;
case DDL_MM_MEM:
mem_type = resource_context.memtype;
break;
@@ -646,7 +695,8 @@
if (res_trk_check_for_sec_session())
mem_type = (ION_HEAP(mem_type) | ION_SECURE);
else
- mem_type = ION_HEAP(mem_type);
+ mem_type = (ION_HEAP(mem_type) |
+ ION_HEAP(ION_IOMMU_HEAP_ID));
}
return mem_type;
}
diff --git a/drivers/video/msm/vidc/common/dec/vdec.c b/drivers/video/msm/vidc/common/dec/vdec.c
index cffb0a9..356a4ae 100644
--- a/drivers/video/msm/vidc/common/dec/vdec.c
+++ b/drivers/video/msm/vidc/common/dec/vdec.c
@@ -812,7 +812,9 @@
u32 len = 0, flags = 0;
struct file *file;
int rc = 0;
- unsigned long ionflag;
+ unsigned long ionflag = 0;
+ unsigned long buffer_size = 0;
+ unsigned long iova = 0;
if (!client_ctx || !mv_data)
return false;
@@ -839,13 +841,25 @@
return false;
}
put_pmem_file(file);
+ flags = MSM_SUBSYSTEM_MAP_IOVA;
+ mapped_buffer = msm_subsystem_map_buffer(
+ (unsigned long)vcd_h264_mv_buffer->physical_addr, len,
+ flags, vidc_mmu_subsystem,
+ sizeof(vidc_mmu_subsystem)/
+ sizeof(unsigned int));
+ if (IS_ERR(mapped_buffer)) {
+ pr_err("buffer map failed");
+ return false;
+ }
+ vcd_h264_mv_buffer->client_data = (void *) mapped_buffer;
+ vcd_h264_mv_buffer->dev_addr = (u8 *)mapped_buffer->iova[0];
} else {
client_ctx->h264_mv_ion_handle = ion_import_fd(
client_ctx->user_ion_client,
vcd_h264_mv_buffer->pmem_fd);
if (!client_ctx->h264_mv_ion_handle) {
ERR("%s(): get_ION_handle failed\n", __func__);
- goto ion_error;
+ goto import_ion_error;
}
rc = ion_handle_get_flags(client_ctx->user_ion_client,
client_ctx->h264_mv_ion_handle,
@@ -853,7 +867,7 @@
if (rc) {
ERR("%s():get_ION_flags fail\n",
__func__);
- goto ion_error;
+ goto import_ion_error;
}
vcd_h264_mv_buffer->kernel_virtual_addr = (u8 *) ion_map_kernel(
client_ctx->user_ion_client,
@@ -862,29 +876,22 @@
if (!vcd_h264_mv_buffer->kernel_virtual_addr) {
ERR("%s(): get_ION_kernel virtual addr failed\n",
__func__);
- goto ion_error;
+ goto import_ion_error;
}
- rc = ion_phys(client_ctx->user_ion_client,
+ rc = ion_map_iommu(client_ctx->user_ion_client,
client_ctx->h264_mv_ion_handle,
- (unsigned long *) (&(vcd_h264_mv_buffer->
- physical_addr)), &len);
+ VIDEO_DOMAIN, VIDEO_MAIN_POOL,
+ SZ_4K, 0, (unsigned long *)&iova,
+ (unsigned long *)&buffer_size, UNCACHED, 0);
if (rc) {
ERR("%s():get_ION_kernel physical addr fail\n",
__func__);
- goto ion_error;
+ goto ion_map_error;
}
+ vcd_h264_mv_buffer->physical_addr = (u8 *) iova;
+ vcd_h264_mv_buffer->client_data = NULL;
+ vcd_h264_mv_buffer->dev_addr = (u8 *) iova;
}
- flags = MSM_SUBSYSTEM_MAP_IOVA;
- mapped_buffer = msm_subsystem_map_buffer(
- (unsigned long)vcd_h264_mv_buffer->physical_addr, len,
- flags, vidc_mmu_subsystem,
- sizeof(vidc_mmu_subsystem)/sizeof(unsigned int));
- if (IS_ERR(mapped_buffer)) {
- pr_err("buffer map failed");
- return false;
- }
- vcd_h264_mv_buffer->client_data = (void *) mapped_buffer;
- vcd_h264_mv_buffer->dev_addr = (u8 *)mapped_buffer->iova[0];
DBG("Virt: %p, Phys %p, fd: %d", vcd_h264_mv_buffer->
kernel_virtual_addr, vcd_h264_mv_buffer->physical_addr,
vcd_h264_mv_buffer->pmem_fd);
@@ -896,13 +903,14 @@
return false;
else
return true;
-ion_error:
+ion_map_error:
if (vcd_h264_mv_buffer->kernel_virtual_addr)
ion_unmap_kernel(client_ctx->user_ion_client,
client_ctx->h264_mv_ion_handle);
if (client_ctx->h264_mv_ion_handle)
ion_free(client_ctx->user_ion_client,
client_ctx->h264_mv_ion_handle);
+import_ion_error:
return false;
}
@@ -973,6 +981,10 @@
if (client_ctx->h264_mv_ion_handle != NULL) {
ion_unmap_kernel(client_ctx->user_ion_client,
client_ctx->h264_mv_ion_handle);
+ ion_unmap_iommu(client_ctx->user_ion_client,
+ client_ctx->h264_mv_ion_handle,
+ VIDEO_DOMAIN,
+ VIDEO_MAIN_POOL);
ion_free(client_ctx->user_ion_client,
client_ctx->h264_mv_ion_handle);
}
diff --git a/drivers/video/msm/vidc/common/enc/venc_internal.c b/drivers/video/msm/vidc/common/enc/venc_internal.c
index cac5dc4..37f1001 100644
--- a/drivers/video/msm/vidc/common/enc/venc_internal.c
+++ b/drivers/video/msm/vidc/common/enc/venc_internal.c
@@ -1766,10 +1766,11 @@
struct vcd_property_hdr vcd_property_hdr;
struct vcd_property_enc_recon_buffer *control = NULL;
struct msm_mapped_buffer *mapped_buffer = NULL;
- size_t ion_len = -1;
- unsigned long phy_addr;
int rc = -1;
- unsigned long ionflag;
+ unsigned long ionflag = 0;
+ unsigned long iova = 0;
+ unsigned long buffer_size = 0;
+
if (!client_ctx || !venc_recon) {
pr_err("%s() Invalid params", __func__);
return false;
@@ -1802,12 +1803,23 @@
return false;
}
put_pmem_file(file);
+ flags = MSM_SUBSYSTEM_MAP_IOVA;
+ mapped_buffer = msm_subsystem_map_buffer(
+ (unsigned long)control->physical_addr, len,
+ flags, vidc_mmu_subsystem,
+ sizeof(vidc_mmu_subsystem)/sizeof(unsigned int));
+ if (IS_ERR(mapped_buffer)) {
+ pr_err("buffer map failed");
+ return false;
+ }
+ control->client_data = (void *) mapped_buffer;
+ control->dev_addr = (u8 *)mapped_buffer->iova[0];
} else {
client_ctx->recon_buffer_ion_handle[i] = ion_import_fd(
client_ctx->user_ion_client, control->pmem_fd);
if (IS_ERR_OR_NULL(client_ctx->recon_buffer_ion_handle[i])) {
ERR("%s(): get_ION_handle failed\n", __func__);
- goto ion_error;
+ goto import_ion_error;
}
rc = ion_handle_get_flags(client_ctx->user_ion_client,
client_ctx->recon_buffer_ion_handle[i],
@@ -1815,7 +1827,7 @@
if (rc) {
ERR("%s():get_ION_flags fail\n",
__func__);
- goto ion_error;
+ goto import_ion_error;
}
control->kernel_virtual_addr = (u8 *) ion_map_kernel(
client_ctx->user_ion_client,
@@ -1824,30 +1836,27 @@
if (!control->kernel_virtual_addr) {
ERR("%s(): get_ION_kernel virtual addr fail\n",
__func__);
- goto ion_error;
+ goto import_ion_error;
}
- rc = ion_phys(client_ctx->user_ion_client,
+ rc = ion_map_iommu(client_ctx->user_ion_client,
client_ctx->recon_buffer_ion_handle[i],
- &phy_addr, &ion_len);
+ VIDEO_DOMAIN,
+ VIDEO_MAIN_POOL,
+ SZ_4K,
+ 0,
+ (unsigned long *)&iova,
+ (unsigned long *)&buffer_size,
+ UNCACHED, 0);
if (rc) {
- ERR("%s():get_ION_kernel physical addr fail\n",
+ ERR("%s():ION map iommu addr fail\n",
__func__);
- goto ion_error;
+ goto map_ion_error;
}
- control->physical_addr = (u8 *) phy_addr;
- len = (unsigned long) ion_len;
+ control->physical_addr = (u8 *) iova;
+ len = buffer_size;
+ control->client_data = NULL;
+ control->dev_addr = (u8 *)iova;
}
- flags = MSM_SUBSYSTEM_MAP_IOVA;
- mapped_buffer = msm_subsystem_map_buffer(
- (unsigned long)control->physical_addr, len,
- flags, vidc_mmu_subsystem,
- sizeof(vidc_mmu_subsystem)/sizeof(unsigned int));
- if (IS_ERR(mapped_buffer)) {
- pr_err("buffer map failed");
- return false;
- }
- control->client_data = (void *) mapped_buffer;
- control->dev_addr = (u8 *)mapped_buffer->iova[0];
vcd_property_hdr.prop_id = VCD_I_RECON_BUFFERS;
vcd_property_hdr.sz =
@@ -1863,7 +1872,7 @@
__func__, vcd_status);
return false;
}
-ion_error:
+map_ion_error:
if (control->kernel_virtual_addr)
ion_unmap_kernel(client_ctx->user_ion_client,
client_ctx->recon_buffer_ion_handle[i]);
@@ -1871,6 +1880,7 @@
ion_free(client_ctx->user_ion_client,
client_ctx->recon_buffer_ion_handle[i]);
client_ctx->recon_buffer_ion_handle[i] = NULL;
+import_ion_error:
return false;
}
@@ -1914,6 +1924,10 @@
if (client_ctx->recon_buffer_ion_handle[i]) {
ion_unmap_kernel(client_ctx->user_ion_client,
client_ctx->recon_buffer_ion_handle[i]);
+ ion_unmap_iommu(client_ctx->user_ion_client,
+ client_ctx->recon_buffer_ion_handle[i],
+ VIDEO_DOMAIN,
+ VIDEO_MAIN_POOL);
ion_free(client_ctx->user_ion_client,
client_ctx->recon_buffer_ion_handle[i]);
client_ctx->recon_buffer_ion_handle[i] = NULL;
diff --git a/drivers/video/msm/vidc/common/init/vidc_init.c b/drivers/video/msm/vidc/common/init/vidc_init.c
index 0ea64d4..cd128dd 100644
--- a/drivers/video/msm/vidc/common/init/vidc_init.c
+++ b/drivers/video/msm/vidc/common/init/vidc_init.c
@@ -415,6 +415,10 @@
if (buf_addr_table[i].buff_ion_handle) {
ion_unmap_kernel(client_ctx->user_ion_client,
buf_addr_table[i].buff_ion_handle);
+ ion_unmap_iommu(client_ctx->user_ion_client,
+ buf_addr_table[i].buff_ion_handle,
+ VIDEO_DOMAIN,
+ VIDEO_MAIN_POOL);
ion_free(client_ctx->user_ion_client,
buf_addr_table[i].buff_ion_handle);
buf_addr_table[i].buff_ion_handle = NULL;
@@ -428,6 +432,10 @@
if (client_ctx->h264_mv_ion_handle) {
ion_unmap_kernel(client_ctx->user_ion_client,
client_ctx->h264_mv_ion_handle);
+ ion_unmap_iommu(client_ctx->user_ion_client,
+ client_ctx->h264_mv_ion_handle,
+ VIDEO_DOMAIN,
+ VIDEO_MAIN_POOL);
ion_free(client_ctx->user_ion_client,
client_ctx->h264_mv_ion_handle);
client_ctx->h264_mv_ion_handle = NULL;
@@ -530,9 +538,11 @@
u32 i, flags;
struct buf_addr_table *buf_addr_table;
struct msm_mapped_buffer *mapped_buffer = NULL;
- size_t ion_len;
struct ion_handle *buff_ion_handle = NULL;
unsigned long ionflag = 0;
+ unsigned long iova = 0;
+ int ret = 0;
+ unsigned long buffer_size = 0;
if (!client_ctx || !length)
return false;
@@ -548,6 +558,7 @@
num_of_buffers = &client_ctx->num_of_output_buffers;
DBG("%s(): buffer = OUTPUT #Buf = %d\n",
__func__, *num_of_buffers);
+ length = length * 2; /* workaround for iommu video h/w bug */
}
if (*num_of_buffers == max_num_buffers) {
@@ -573,6 +584,20 @@
goto bail_out_add;
}
put_pmem_file(file);
+ flags = (buffer == BUFFER_TYPE_INPUT)
+ ? MSM_SUBSYSTEM_MAP_IOVA :
+ MSM_SUBSYSTEM_MAP_IOVA|MSM_SUBSYSTEM_ALIGN_IOVA_8K;
+ mapped_buffer = msm_subsystem_map_buffer(phys_addr,
+ length, flags, vidc_mmu_subsystem,
+ sizeof(vidc_mmu_subsystem)/sizeof(unsigned int));
+ if (IS_ERR(mapped_buffer)) {
+ pr_err("buffer map failed");
+ goto bail_out_add;
+ }
+ buf_addr_table[*num_of_buffers].client_data = (void *)
+ mapped_buffer;
+ buf_addr_table[*num_of_buffers].dev_addr =
+ mapped_buffer->iova[0];
} else {
buff_ion_handle = ion_import_fd(
client_ctx->user_ion_client, pmem_fd);
@@ -586,7 +611,7 @@
&ionflag)) {
ERR("%s():ION flags fail\n",
__func__);
- goto ion_error;
+ goto bail_out_add;
}
*kernel_vaddr = (unsigned long)
ion_map_kernel(
@@ -597,32 +622,28 @@
ERR("%s():ION virtual addr fail\n",
__func__);
*kernel_vaddr = (unsigned long)NULL;
- goto ion_error;
+ goto ion_free_error;
}
- if (ion_phys(client_ctx->user_ion_client,
+ ret = ion_map_iommu(client_ctx->user_ion_client,
buff_ion_handle,
- &phys_addr, &ion_len)) {
- ERR("%s():ION physical addr fail\n",
+ VIDEO_DOMAIN,
+ VIDEO_MAIN_POOL,
+ SZ_8K,
+ length,
+ (unsigned long *) &iova,
+ (unsigned long *) &buffer_size,
+ UNCACHED, ION_IOMMU_UNMAP_DELAYED);
+ if (ret) {
+ ERR("%s():ION iommu map fail\n",
__func__);
- goto ion_error;
+ goto ion_map_error;
}
- len = (unsigned long) ion_len;
+ phys_addr = iova;
+ buf_addr_table[*num_of_buffers].client_data = NULL;
+ buf_addr_table[*num_of_buffers].dev_addr = iova;
}
phys_addr += buffer_addr_offset;
(*kernel_vaddr) += buffer_addr_offset;
- flags = (buffer == BUFFER_TYPE_INPUT) ? MSM_SUBSYSTEM_MAP_IOVA :
- MSM_SUBSYSTEM_MAP_IOVA|MSM_SUBSYSTEM_ALIGN_IOVA_8K;
- mapped_buffer = msm_subsystem_map_buffer(phys_addr, length,
- flags, vidc_mmu_subsystem,
- sizeof(vidc_mmu_subsystem)/sizeof(unsigned int));
- if (IS_ERR(mapped_buffer)) {
- pr_err("buffer map failed");
- goto ion_error;
- }
- buf_addr_table[*num_of_buffers].client_data = (void *)
- mapped_buffer;
- buf_addr_table[*num_of_buffers].dev_addr =
- mapped_buffer->iova[0];
buf_addr_table[*num_of_buffers].user_vaddr = user_vaddr;
buf_addr_table[*num_of_buffers].kernel_vaddr = *kernel_vaddr;
buf_addr_table[*num_of_buffers].pmem_fd = pmem_fd;
@@ -640,9 +661,10 @@
}
mutex_unlock(&client_ctx->enrty_queue_lock);
return true;
-ion_error:
+ion_map_error:
if (*kernel_vaddr && buff_ion_handle)
ion_unmap_kernel(client_ctx->user_ion_client, buff_ion_handle);
+ion_free_error:
if (!IS_ERR_OR_NULL(buff_ion_handle))
ion_free(client_ctx->user_ion_client, buff_ion_handle);
bail_out_add:
@@ -685,12 +707,19 @@
__func__, client_ctx, user_vaddr);
goto bail_out_del;
}
- msm_subsystem_unmap_buffer(
- (struct msm_mapped_buffer *)buf_addr_table[i].client_data);
+ if (buf_addr_table[i].client_data) {
+ msm_subsystem_unmap_buffer(
+ (struct msm_mapped_buffer *)buf_addr_table[i].client_data);
+ buf_addr_table[i].client_data = NULL;
+ }
*kernel_vaddr = buf_addr_table[i].kernel_vaddr;
if (buf_addr_table[i].buff_ion_handle) {
ion_unmap_kernel(client_ctx->user_ion_client,
buf_addr_table[i].buff_ion_handle);
+ ion_unmap_iommu(client_ctx->user_ion_client,
+ buf_addr_table[i].buff_ion_handle,
+ VIDEO_DOMAIN,
+ VIDEO_MAIN_POOL);
ion_free(client_ctx->user_ion_client,
buf_addr_table[i].buff_ion_handle);
buf_addr_table[i].buff_ion_handle = NULL;
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_sub.c b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
index ebc30fd..b5f643f 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_sub.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
@@ -36,9 +36,10 @@
u32 memtype, i = 0, flags = 0;
struct vcd_msm_map_buffer *map_buffer = NULL;
struct msm_mapped_buffer *mapped_buffer = NULL;
- int rc = 0;
- ion_phys_addr_t phyaddr = 0;
- size_t len = 0;
+ unsigned long iova = 0;
+ unsigned long buffer_size = 0;
+ int ret = 0;
+ unsigned long ionflag = 0;
if (!kernel_vaddr || !phy_addr || !cctxt) {
pr_err("\n%s: Invalid parameters", __func__);
@@ -66,6 +67,22 @@
pr_err("%s() acm alloc failed", __func__);
goto free_map_table;
}
+ flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR;
+ map_buffer->mapped_buffer =
+ msm_subsystem_map_buffer((unsigned long)map_buffer->phy_addr,
+ sz, flags, vidc_mmu_subsystem,
+ sizeof(vidc_mmu_subsystem)/sizeof(unsigned int));
+ if (IS_ERR(map_buffer->mapped_buffer)) {
+ pr_err(" %s() buffer map failed", __func__);
+ goto free_acm_alloc;
+ }
+ mapped_buffer = map_buffer->mapped_buffer;
+ if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) {
+ pr_err("%s() map buffers failed", __func__);
+ goto free_map_buffers;
+ }
+ *phy_addr = (u8 *) mapped_buffer->iova[0];
+ *kernel_vaddr = (u8 *) mapped_buffer->vaddr;
} else {
map_buffer->alloc_handle = ion_alloc(
cctxt->vcd_ion_client, sz, SZ_4K,
@@ -74,48 +91,58 @@
pr_err("%s() ION alloc failed", __func__);
goto bailout;
}
- rc = ion_phys(cctxt->vcd_ion_client,
- map_buffer->alloc_handle, &phyaddr, &len);
- if (rc) {
- pr_err("%s() : ION client physical fail\n",
- __func__);
- goto free_acm_alloc;
+ if (ion_handle_get_flags(cctxt->vcd_ion_client,
+ map_buffer->alloc_handle,
+ &ionflag)) {
+ pr_err("%s() ION get flag failed", __func__);
+ goto bailout;
}
- map_buffer->phy_addr = phyaddr;
+ *kernel_vaddr = (u8 *) ion_map_kernel(
+ cctxt->vcd_ion_client,
+ map_buffer->alloc_handle,
+ ionflag);
+ if (!(*kernel_vaddr)) {
+ pr_err("%s() ION map failed", __func__);
+ goto ion_free_bailout;
+ }
+ ret = ion_map_iommu(cctxt->vcd_ion_client,
+ map_buffer->alloc_handle,
+ VIDEO_DOMAIN,
+ VIDEO_MAIN_POOL,
+ SZ_4K,
+ 0,
+ (unsigned long *)&iova,
+ (unsigned long *)&buffer_size,
+ UNCACHED, 0);
+ if (ret) {
+ pr_err("%s() ION iommu map failed", __func__);
+ goto ion_map_bailout;
+ }
+ map_buffer->phy_addr = iova;
if (!map_buffer->phy_addr) {
pr_err("%s() acm alloc failed", __func__);
goto free_map_table;
}
-
+ *phy_addr = (u8 *)iova;
+ mapped_buffer = NULL;
+ map_buffer->mapped_buffer = NULL;
}
- flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR;
- map_buffer->mapped_buffer =
- msm_subsystem_map_buffer((unsigned long)map_buffer->phy_addr,
- sz, flags, vidc_mmu_subsystem,
- sizeof(vidc_mmu_subsystem)/sizeof(unsigned int));
- if (IS_ERR(map_buffer->mapped_buffer)) {
- pr_err(" %s() buffer map failed", __func__);
- goto free_acm_alloc;
- }
- mapped_buffer = map_buffer->mapped_buffer;
- if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) {
- pr_err("%s() map buffers failed", __func__);
- goto free_map_buffers;
- }
- *phy_addr = (u8 *) mapped_buffer->iova[0];
- *kernel_vaddr = (u8 *) mapped_buffer->vaddr;
return 0;
free_map_buffers:
- msm_subsystem_unmap_buffer(map_buffer->mapped_buffer);
+ if (map_buffer->mapped_buffer)
+ msm_subsystem_unmap_buffer(map_buffer->mapped_buffer);
free_acm_alloc:
if (!cctxt->vcd_enable_ion) {
free_contiguous_memory_by_paddr(
(unsigned long)map_buffer->phy_addr);
- } else {
- ion_free(cctxt->vcd_ion_client, map_buffer->alloc_handle);
}
+ return -ENOMEM;
+ion_map_bailout:
+ ion_unmap_kernel(cctxt->vcd_ion_client, map_buffer->alloc_handle);
+ion_free_bailout:
+ ion_free(cctxt->vcd_ion_client, map_buffer->alloc_handle);
free_map_table:
map_buffer->in_use = 0;
bailout:
@@ -145,9 +172,16 @@
pr_err("%s() Entry not found", __func__);
goto bailout;
}
- msm_subsystem_unmap_buffer(map_buffer->mapped_buffer);
+ if (map_buffer->mapped_buffer)
+ msm_subsystem_unmap_buffer(map_buffer->mapped_buffer);
if (cctxt->vcd_enable_ion) {
if (map_buffer->alloc_handle) {
+ ion_unmap_kernel(cctxt->vcd_ion_client,
+ map_buffer->alloc_handle);
+ ion_unmap_iommu(cctxt->vcd_ion_client,
+ map_buffer->alloc_handle,
+ VIDEO_DOMAIN,
+ VIDEO_MAIN_POOL);
ion_free(cctxt->vcd_ion_client,
map_buffer->alloc_handle);
}
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index 5d3a6a1..15737d6 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -679,7 +679,7 @@
/* LOG CODES */
#define LOG_0 0x0
-#define LOG_1 0x1520
+#define LOG_1 0x15A7
#define LOG_2 0x0
#define LOG_3 0x0
#define LOG_4 0x4910
diff --git a/include/linux/mfd/pm8xxx/pm8921-charger.h b/include/linux/mfd/pm8xxx/pm8921-charger.h
index 31af535..8a3b999 100644
--- a/include/linux/mfd/pm8xxx/pm8921-charger.h
+++ b/include/linux/mfd/pm8xxx/pm8921-charger.h
@@ -100,6 +100,10 @@
* VBAT_THERM goes below 35% of VREF_THERM, if low the
* battery will be considered hot when VBAT_THERM goes
* below 25% of VREF_THERM. Hardware defaults to low.
+ * @rconn_mohm: resistance in milliOhm from the vbat sense to ground
+ * with the battery terminals shorted. This indicates
+ * resistance of the pads, connectors, battery terminals
+ * and rsense.
*/
struct pm8921_charger_platform_data {
struct pm8xxx_charger_core_data charger_cdata;
@@ -131,6 +135,7 @@
int thermal_levels;
enum pm8921_chg_cold_thr cold_thr;
enum pm8921_chg_hot_thr hot_thr;
+ int rconn_mohm;
};
enum pm8921_charger_source {
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index d2690c3..3b6d5b7 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -51,14 +51,14 @@
# Make sure we're at the tag that matches the Makefile.
# If not place the hash of the tag as well for
# v2.6.30-rc5-g314aef
- if [ "x$atag" -ne "x$VERSION" ]; then
+ if [ "x$atag" != "x$VERSION" ]; then
# If only the short version is requested,
# don't bother running further git commands
if $short; then
echo "+"
return
fi
- printf '%s%s' -g "`git show-ref -s --abbrev $atag 2>/dev/null`"
+ printf '%s%s' -g "`git show-ref -s --abbrev --tags $atag 2>/dev/null`"
fi
else
@@ -74,7 +74,7 @@
if atag="`git describe 2>/dev/null`"; then
tag="`git describe --abbrev=0 2>/dev/null`"
commit="`echo "$atag" | awk -F- '{printf("-%05d-%s", $(NF-1),$(NF))}'`"
- printf '%s%s%s' -g "`git show-ref -s --abbrev $tag 2>/dev/null`" $commit
+ printf '%s%s%s' -g "`git show-ref -s --abbrev --tags $tag 2>/dev/null`" $commit
# If we don't have a tag at all we print -g{commitish}.
else
printf '%s%s' -g $head