Merge "msm: 8064: Disable write-protect for MTP/LIQUID" into msm-3.0
diff --git a/arch/arm/configs/msm7627a-perf_defconfig b/arch/arm/configs/msm7627a-perf_defconfig
index 44986a4..7bca2ec 100644
--- a/arch/arm/configs/msm7627a-perf_defconfig
+++ b/arch/arm/configs/msm7627a-perf_defconfig
@@ -1,3 +1,4 @@
+CONFIG_CFG80211=m
CONFIG_EXPERIMENTAL=y
CONFIG_LOCALVERSION="$(KERNEL_LOCAL_VERSION)-perf"
# CONFIG_SWAP is not set
@@ -169,7 +170,6 @@
CONFIG_BT_HCIUART_IBS=y
CONFIG_BT_MSM_SLEEP=y
CONFIG_MSM_BT_POWER=y
-CONFIG_CFG80211=y
# CONFIG_CFG80211_WEXT is not set
CONFIG_RFKILL=y
CONFIG_GENLOCK=y
diff --git a/arch/arm/configs/msm7627a_defconfig b/arch/arm/configs/msm7627a_defconfig
index 6ba9790..ce9e6e5 100644
--- a/arch/arm/configs/msm7627a_defconfig
+++ b/arch/arm/configs/msm7627a_defconfig
@@ -1,3 +1,4 @@
+CONFIG_CFG80211=m
CONFIG_EXPERIMENTAL=y
CONFIG_LOCALVERSION="$(KERNEL_LOCAL_VERSION)"
# CONFIG_SWAP is not set
@@ -169,7 +170,6 @@
CONFIG_BT_HCIUART_IBS=y
CONFIG_BT_MSM_SLEEP=y
CONFIG_MSM_BT_POWER=y
-CONFIG_CFG80211=y
# CONFIG_CFG80211_WEXT is not set
CONFIG_RFKILL=y
CONFIG_GENLOCK=y
diff --git a/arch/arm/mach-msm/board-8064-camera.c b/arch/arm/mach-msm/board-8064-camera.c
index 70d76fb..c714bc8 100644
--- a/arch/arm/mach-msm/board-8064-camera.c
+++ b/arch/arm/mach-msm/board-8064-camera.c
@@ -440,12 +440,18 @@
.flash_src = &msm_flash_src
};
+static struct msm_camera_csi_lane_params imx074_csi_lane_params = {
+ .csi_lane_assign = 0xE4,
+ .csi_lane_mask = 0xF,
+};
+
static struct msm_camera_sensor_platform_info sensor_board_info_imx074 = {
.mount_angle = 90,
.cam_vreg = apq_8064_back_cam_vreg,
.num_vreg = ARRAY_SIZE(apq_8064_back_cam_vreg),
.gpio_conf = &apq8064_back_cam_gpio_conf,
.i2c_conf = &apq8064_back_cam_i2c_conf,
+ .csi_lane_params = &imx074_csi_lane_params,
};
static struct msm_camera_sensor_info msm_camera_sensor_imx074_data = {
@@ -469,12 +475,18 @@
.flash_type = MSM_CAMERA_FLASH_NONE
};
+static struct msm_camera_csi_lane_params mt9m114_csi_lane_params = {
+ .csi_lane_assign = 0xE4,
+ .csi_lane_mask = 0x1,
+};
+
static struct msm_camera_sensor_platform_info sensor_board_info_mt9m114 = {
.mount_angle = 90,
.cam_vreg = apq_8064_mt9m114_vreg,
.num_vreg = ARRAY_SIZE(apq_8064_mt9m114_vreg),
.gpio_conf = &apq8064_front_cam_gpio_conf,
.i2c_conf = &apq8064_front_cam_i2c_conf,
+ .csi_lane_params = &mt9m114_csi_lane_params,
};
static struct msm_camera_sensor_info msm_camera_sensor_mt9m114_data = {
@@ -490,12 +502,18 @@
.flash_type = MSM_CAMERA_FLASH_NONE,
};
+static struct msm_camera_csi_lane_params ov2720_csi_lane_params = {
+ .csi_lane_assign = 0xE4,
+ .csi_lane_mask = 0x3,
+};
+
static struct msm_camera_sensor_platform_info sensor_board_info_ov2720 = {
.mount_angle = 0,
.cam_vreg = apq_8064_front_cam_vreg,
.num_vreg = ARRAY_SIZE(apq_8064_front_cam_vreg),
.gpio_conf = &apq8064_front_cam_gpio_conf,
.i2c_conf = &apq8064_front_cam_i2c_conf,
+ .csi_lane_params = &ov2720_csi_lane_params,
};
static struct msm_camera_sensor_info msm_camera_sensor_ov2720_data = {
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index 2e92bb0..f66c8a7 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -1009,6 +1009,11 @@
.dev = {.platform_data = &qcom_wcnss_pdata},
};
+static struct platform_device msm_device_iris_fm __devinitdata = {
+ .name = "iris_fm",
+ .id = -1,
+};
+
#if defined(CONFIG_CRYPTO_DEV_QCRYPTO) || \
defined(CONFIG_CRYPTO_DEV_QCRYPTO_MODULE) || \
defined(CONFIG_CRYPTO_DEV_QCEDEV) || \
@@ -1640,6 +1645,7 @@
&apq8064_device_hsusb_host,
&android_usb_device,
&msm_device_wcnss_wlan,
+ &msm_device_iris_fm,
#ifdef CONFIG_ANDROID_PMEM
#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
&android_pmem_device,
diff --git a/arch/arm/mach-msm/board-8930-camera.c b/arch/arm/mach-msm/board-8930-camera.c
index 1d743d8..5f4e62c 100644
--- a/arch/arm/mach-msm/board-8930-camera.c
+++ b/arch/arm/mach-msm/board-8930-camera.c
@@ -425,11 +425,17 @@
#endif
};
+static struct msm_camera_csi_lane_params imx074_csi_lane_params = {
+ .csi_lane_assign = 0xE4,
+ .csi_lane_mask = 0xF,
+};
+
static struct msm_camera_sensor_platform_info sensor_board_info_imx074 = {
.mount_angle = 90,
.cam_vreg = msm_8930_back_cam_vreg,
.num_vreg = ARRAY_SIZE(msm_8930_back_cam_vreg),
.gpio_conf = &msm_8930_back_cam_gpio_conf,
+ .csi_lane_params = &imx074_csi_lane_params,
};
static struct msm_camera_sensor_info msm_camera_sensor_imx074_data = {
@@ -454,11 +460,17 @@
.flash_type = MSM_CAMERA_FLASH_NONE
};
+static struct msm_camera_csi_lane_params mt9m114_csi_lane_params = {
+ .csi_lane_assign = 0xE4,
+ .csi_lane_mask = 0x1,
+};
+
static struct msm_camera_sensor_platform_info sensor_board_info_mt9m114 = {
.mount_angle = 90,
.cam_vreg = msm_8930_mt9m114_vreg,
.num_vreg = ARRAY_SIZE(msm_8930_mt9m114_vreg),
.gpio_conf = &msm_8930_front_cam_gpio_conf,
+ .csi_lane_params = &mt9m114_csi_lane_params,
};
static struct msm_camera_sensor_info msm_camera_sensor_mt9m114_data = {
@@ -474,11 +486,17 @@
.flash_type = MSM_CAMERA_FLASH_NONE,
};
+static struct msm_camera_csi_lane_params ov2720_csi_lane_params = {
+ .csi_lane_assign = 0xE4,
+ .csi_lane_mask = 0x3,
+};
+
static struct msm_camera_sensor_platform_info sensor_board_info_ov2720 = {
.mount_angle = 0,
.cam_vreg = msm_8930_front_cam_vreg,
.num_vreg = ARRAY_SIZE(msm_8930_front_cam_vreg),
.gpio_conf = &msm_8930_front_cam_gpio_conf,
+ .csi_lane_params = &ov2720_csi_lane_params,
};
static struct msm_camera_sensor_info msm_camera_sensor_ov2720_data = {
@@ -501,11 +519,17 @@
.flash_type = MSM_CAMERA_FLASH_NONE,
};
+static struct msm_camera_csi_lane_params s5k3l1yx_csi_lane_params = {
+ .csi_lane_assign = 0xE4,
+ .csi_lane_mask = 0xF,
+};
+
static struct msm_camera_sensor_platform_info sensor_board_info_s5k3l1yx = {
.mount_angle = 0,
.cam_vreg = msm_8930_s5k3l1yx_vreg,
.num_vreg = ARRAY_SIZE(msm_8930_s5k3l1yx_vreg),
.gpio_conf = &msm_8930_back_cam_gpio_conf,
+ .csi_lane_params = &s5k3l1yx_csi_lane_params,
};
static struct msm_camera_sensor_info msm_camera_sensor_s5k3l1yx_data = {
diff --git a/arch/arm/mach-msm/board-8930.c b/arch/arm/mach-msm/board-8930.c
index 46c60ff..6a2a6dc 100644
--- a/arch/arm/mach-msm/board-8930.c
+++ b/arch/arm/mach-msm/board-8930.c
@@ -1796,7 +1796,7 @@
&msm_etb_device,
&msm_tpiu_device,
&msm_funnel_device,
- &msm_ptm_device,
+ &msm_etm_device,
#endif
&msm_device_dspcrashd_8960,
&msm8960_device_watchdog,
diff --git a/arch/arm/mach-msm/board-8960-camera.c b/arch/arm/mach-msm/board-8960-camera.c
index 3a697bf..3392f46 100644
--- a/arch/arm/mach-msm/board-8960-camera.c
+++ b/arch/arm/mach-msm/board-8960-camera.c
@@ -496,11 +496,17 @@
#endif
};
+static struct msm_camera_csi_lane_params imx074_csi_lane_params = {
+ .csi_lane_assign = 0xE4,
+ .csi_lane_mask = 0xF,
+};
+
static struct msm_camera_sensor_platform_info sensor_board_info_imx074 = {
.mount_angle = 90,
.cam_vreg = msm_8960_back_cam_vreg,
.num_vreg = ARRAY_SIZE(msm_8960_back_cam_vreg),
.gpio_conf = &msm_8960_back_cam_gpio_conf,
+ .csi_lane_params = &imx074_csi_lane_params,
};
static struct msm_camera_sensor_info msm_camera_sensor_imx074_data = {
@@ -525,11 +531,17 @@
.flash_type = MSM_CAMERA_FLASH_NONE
};
+static struct msm_camera_csi_lane_params mt9m114_csi_lane_params = {
+ .csi_lane_assign = 0xE4,
+ .csi_lane_mask = 0x1,
+};
+
static struct msm_camera_sensor_platform_info sensor_board_info_mt9m114 = {
.mount_angle = 90,
.cam_vreg = msm_8960_mt9m114_vreg,
.num_vreg = ARRAY_SIZE(msm_8960_mt9m114_vreg),
.gpio_conf = &msm_8960_front_cam_gpio_conf,
+ .csi_lane_params = &mt9m114_csi_lane_params,
};
static struct msm_camera_sensor_info msm_camera_sensor_mt9m114_data = {
@@ -545,11 +557,17 @@
.flash_type = MSM_CAMERA_FLASH_NONE,
};
+static struct msm_camera_csi_lane_params ov2720_csi_lane_params = {
+ .csi_lane_assign = 0xE4,
+ .csi_lane_mask = 0x3,
+};
+
static struct msm_camera_sensor_platform_info sensor_board_info_ov2720 = {
.mount_angle = 0,
.cam_vreg = msm_8960_front_cam_vreg,
.num_vreg = ARRAY_SIZE(msm_8960_front_cam_vreg),
.gpio_conf = &msm_8960_front_cam_gpio_conf,
+ .csi_lane_params = &ov2720_csi_lane_params,
};
static struct msm_camera_sensor_info msm_camera_sensor_ov2720_data = {
@@ -572,11 +590,17 @@
.flash_type = MSM_CAMERA_FLASH_NONE,
};
+static struct msm_camera_csi_lane_params s5k3l1yx_csi_lane_params = {
+ .csi_lane_assign = 0xE4,
+ .csi_lane_mask = 0xF,
+};
+
static struct msm_camera_sensor_platform_info sensor_board_info_s5k3l1yx = {
.mount_angle = 0,
.cam_vreg = msm_8960_s5k3l1yx_vreg,
.num_vreg = ARRAY_SIZE(msm_8960_s5k3l1yx_vreg),
.gpio_conf = &msm_8960_back_cam_gpio_conf,
+ .csi_lane_params = &s5k3l1yx_csi_lane_params,
};
static struct msm_camera_sensor_info msm_camera_sensor_s5k3l1yx_data = {
diff --git a/arch/arm/mach-msm/board-8960-pmic.c b/arch/arm/mach-msm/board-8960-pmic.c
index 4e18f89..3b84875 100644
--- a/arch/arm/mach-msm/board-8960-pmic.c
+++ b/arch/arm/mach-msm/board-8960-pmic.c
@@ -432,6 +432,7 @@
.warm_bat_voltage = 4100,
.thermal_mitigation = pm8921_therm_mitigation,
.thermal_levels = ARRAY_SIZE(pm8921_therm_mitigation),
+ .rconn_mohm = 18,
};
static struct pm8xxx_misc_platform_data pm8xxx_misc_pdata = {
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index de42371..e2ba303 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -2183,7 +2183,7 @@
&msm_etb_device,
&msm_tpiu_device,
&msm_funnel_device,
- &msm_ptm_device,
+ &msm_etm_device,
#endif
&msm_device_dspcrashd_8960,
&msm8960_device_watchdog,
diff --git a/arch/arm/mach-msm/board-msm7x27a.c b/arch/arm/mach-msm/board-msm7x27a.c
index ec2be96..c91f2b6 100644
--- a/arch/arm/mach-msm/board-msm7x27a.c
+++ b/arch/arm/mach-msm/board-msm7x27a.c
@@ -587,7 +587,8 @@
/* Concurrency 6 */
(DEC0_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)),
- 0, 0, 0, 0,
+ (DEC1_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)),
+ 0, 0, 0,
/* Concurrency 7 */
(DEC0_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)),
diff --git a/arch/arm/mach-msm/clock-8960.c b/arch/arm/mach-msm/clock-8960.c
index 98a32c6..69b92af 100644
--- a/arch/arm/mach-msm/clock-8960.c
+++ b/arch/arm/mach-msm/clock-8960.c
@@ -4445,6 +4445,7 @@
static DEFINE_CLK_VOTER(dfab_bam_dmux_clk, &dfab_clk.c);
static DEFINE_CLK_VOTER(dfab_scm_clk, &dfab_clk.c);
static DEFINE_CLK_VOTER(dfab_qseecom_clk, &dfab_clk.c);
+static DEFINE_CLK_VOTER(dfab_tzcom_clk, &dfab_clk.c);
static DEFINE_CLK_VOTER(ebi1_msmbus_clk, &ebi1_clk.c);
static DEFINE_CLK_VOTER(ebi1_adm_clk, &ebi1_clk.c);
@@ -5103,6 +5104,7 @@
CLK_LOOKUP("bus_clk", dfab_bam_dmux_clk.c, "BAM_RMNT"),
CLK_LOOKUP("bus_clk", dfab_scm_clk.c, "scm"),
CLK_LOOKUP("bus_clk", dfab_qseecom_clk.c, "qseecom"),
+ CLK_LOOKUP("bus_clk", dfab_tzcom_clk.c, "tzcom"),
CLK_LOOKUP("alt_core_clk", usb_hsic_xcvr_fs_clk.c, "msm_hsic_host"),
CLK_LOOKUP("phy_clk", usb_hsic_hsic_clk.c, "msm_hsic_host"),
@@ -5411,6 +5413,7 @@
CLK_LOOKUP("bus_clk", dfab_bam_dmux_clk.c, "BAM_RMNT"),
CLK_LOOKUP("bus_clk", dfab_scm_clk.c, "scm"),
CLK_LOOKUP("bus_clk", dfab_qseecom_clk.c, "qseecom"),
+ CLK_LOOKUP("bus_clk", dfab_tzcom_clk.c, "tzcom"),
CLK_LOOKUP("mem_clk", ebi1_adm_clk.c, "msm_dmov"),
diff --git a/arch/arm/mach-msm/devices-8930.c b/arch/arm/mach-msm/devices-8930.c
index c75a4e3..0ad25c0 100644
--- a/arch/arm/mach-msm/devices-8930.c
+++ b/arch/arm/mach-msm/devices-8930.c
@@ -116,6 +116,7 @@
MSM_RPM_MAP(8930, USB_OTG_SWITCH, USB_OTG_SWITCH, 1),
MSM_RPM_MAP(8930, HDMI_SWITCH, HDMI_SWITCH, 1),
MSM_RPM_MAP(8930, QDSS_CLK, QDSS_CLK, 1),
+ MSM_RPM_MAP(8930, VOLTAGE_CORNER, VOLTAGE_CORNER, 1),
},
.target_status = {
MSM_RPM_STATUS_ID_MAP(8930, VERSION_MAJOR),
@@ -219,6 +220,7 @@
MSM_RPM_STATUS_ID_MAP(8930, USB_OTG_SWITCH),
MSM_RPM_STATUS_ID_MAP(8930, HDMI_SWITCH),
MSM_RPM_STATUS_ID_MAP(8930, QDSS_CLK),
+ MSM_RPM_STATUS_ID_MAP(8930, VOLTAGE_CORNER),
},
.target_ctrl_id = {
MSM_RPM_CTRL_MAP(8930, VERSION_MAJOR),
diff --git a/arch/arm/mach-msm/devices-8960.c b/arch/arm/mach-msm/devices-8960.c
index 4c02215..dbf26d9 100644
--- a/arch/arm/mach-msm/devices-8960.c
+++ b/arch/arm/mach-msm/devices-8960.c
@@ -3127,7 +3127,7 @@
#define MSM_ETB_PHYS_BASE (MSM_QDSS_PHYS_BASE + 0x1000)
#define MSM_TPIU_PHYS_BASE (MSM_QDSS_PHYS_BASE + 0x3000)
#define MSM_FUNNEL_PHYS_BASE (MSM_QDSS_PHYS_BASE + 0x4000)
-#define MSM_PTM_PHYS_BASE (MSM_QDSS_PHYS_BASE + 0x1C000)
+#define MSM_ETM_PHYS_BASE (MSM_QDSS_PHYS_BASE + 0x1C000)
static struct resource msm_etb_resources[] = {
{
@@ -3174,19 +3174,19 @@
.resource = msm_funnel_resources,
};
-static struct resource msm_ptm_resources[] = {
+static struct resource msm_etm_resources[] = {
{
- .start = MSM_PTM_PHYS_BASE,
- .end = MSM_PTM_PHYS_BASE + (SZ_4K * 2) - 1,
+ .start = MSM_ETM_PHYS_BASE,
+ .end = MSM_ETM_PHYS_BASE + (SZ_4K * 2) - 1,
.flags = IORESOURCE_MEM,
},
};
-struct platform_device msm_ptm_device = {
- .name = "msm_ptm",
+struct platform_device msm_etm_device = {
+ .name = "msm_etm",
.id = 0,
- .num_resources = ARRAY_SIZE(msm_ptm_resources),
- .resource = msm_ptm_resources,
+ .num_resources = ARRAY_SIZE(msm_etm_resources),
+ .resource = msm_etm_resources,
};
#endif
diff --git a/arch/arm/mach-msm/devices-msm7x27a.c b/arch/arm/mach-msm/devices-msm7x27a.c
index beb0c55..b879d8b 100644
--- a/arch/arm/mach-msm/devices-msm7x27a.c
+++ b/arch/arm/mach-msm/devices-msm7x27a.c
@@ -645,16 +645,18 @@
/* Command sequence for simple WFI */
static uint8_t spm_wfi_cmd_sequence[] __initdata = {
- 0x00, 0x40, 0x40, 0x03,
- 0x00, 0x40, 0x40, 0x0f,
+ 0x04, 0x03, 0x04, 0x0f,
};
/* Command sequence for GDFS, this won't send any interrupt to the modem */
static uint8_t spm_pc_without_modem[] __initdata = {
0x20, 0x00, 0x30, 0x10,
- 0x40, 0x40, 0x03, 0x10,
- 0x00, 0x30, 0x2E, 0x40,
- 0x40, 0x0f,
+ 0x03, 0x1e, 0x0e, 0x3e,
+ 0x4e, 0x4e, 0x4e, 0x4e,
+ 0x4e, 0x4e, 0x4e, 0x4e,
+ 0x4e, 0x4e, 0x4e, 0x4e,
+ 0x4e, 0x4e, 0x4e, 0x4e,
+ 0x2E, 0x0f,
};
static struct msm_spm_seq_entry msm_spm_seq_list[] __initdata = {
diff --git a/arch/arm/mach-msm/devices.h b/arch/arm/mach-msm/devices.h
index ab55a25..0a14db0 100644
--- a/arch/arm/mach-msm/devices.h
+++ b/arch/arm/mach-msm/devices.h
@@ -316,7 +316,7 @@
extern struct platform_device msm_etb_device;
extern struct platform_device msm_tpiu_device;
extern struct platform_device msm_funnel_device;
-extern struct platform_device msm_ptm_device;
+extern struct platform_device msm_etm_device;
#endif
extern struct platform_device msm_bus_8064_apps_fabric;
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index 7fa4a07..7e17120 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -191,6 +191,11 @@
uint32_t delay;
};
+struct msm_camera_csi_lane_params {
+ uint8_t csi_lane_assign;
+ uint8_t csi_lane_mask;
+};
+
struct msm_camera_gpio_conf {
void *cam_gpiomux_conf_tbl;
uint8_t cam_gpiomux_conf_tbl_size;
@@ -227,6 +232,7 @@
int32_t (*ext_power_ctrl) (int enable);
struct msm_camera_gpio_conf *gpio_conf;
struct msm_camera_i2c_conf *i2c_conf;
+ struct msm_camera_csi_lane_params *csi_lane_params;
};
struct msm_actuator_info {
diff --git a/arch/arm/mach-msm/include/mach/camera.h b/arch/arm/mach-msm/include/mach/camera.h
index ef7be45..2aae5dd 100644
--- a/arch/arm/mach-msm/include/mach/camera.h
+++ b/arch/arm/mach-msm/include/mach/camera.h
@@ -192,6 +192,7 @@
struct msm_camera_csiphy_params {
uint8_t lane_cnt;
uint8_t settle_cnt;
+ uint8_t lane_mask;
};
struct msm_camera_csi2_params {
diff --git a/arch/arm/mach-msm/include/mach/rpm-8930.h b/arch/arm/mach-msm/include/mach/rpm-8930.h
index 04218b2..3bcd42e 100644
--- a/arch/arm/mach-msm/include/mach/rpm-8930.h
+++ b/arch/arm/mach-msm/include/mach/rpm-8930.h
@@ -100,7 +100,8 @@
MSM_RPM_8930_SEL_CXO_BUFFERS = 81,
MSM_RPM_8930_SEL_USB_OTG_SWITCH = 82,
MSM_RPM_8930_SEL_HDMI_SWITCH = 83,
- MSM_RPM_8930_SEL_LAST = MSM_RPM_8930_SEL_HDMI_SWITCH,
+ MSM_RPM_8930_SEL_VOLTAGE_CORNER = 87,
+ MSM_RPM_8930_SEL_LAST = MSM_RPM_8930_SEL_VOLTAGE_CORNER,
};
/* RPM resource (4 byte) word ID enum */
@@ -239,7 +240,8 @@
MSM_RPM_8930_ID_USB_OTG_SWITCH = 192,
MSM_RPM_8930_ID_HDMI_SWITCH = 193,
MSM_RPM_8930_ID_QDSS_CLK = 194,
- MSM_RPM_8930_ID_LAST = MSM_RPM_8930_ID_QDSS_CLK,
+ MSM_RPM_8930_ID_VOLTAGE_CORNER = 195,
+ MSM_RPM_8930_ID_LAST = MSM_RPM_8930_ID_VOLTAGE_CORNER,
};
/* RPM status ID enum */
@@ -349,7 +351,8 @@
MSM_RPM_8930_STATUS_ID_USB_OTG_SWITCH = 106,
MSM_RPM_8930_STATUS_ID_HDMI_SWITCH = 107,
MSM_RPM_8930_STATUS_ID_QDSS_CLK = 108,
- MSM_RPM_8930_STATUS_ID_LAST = MSM_RPM_8930_STATUS_ID_QDSS_CLK,
+ MSM_RPM_8930_STATUS_ID_VOLTAGE_CORNER = 109,
+ MSM_RPM_8930_STATUS_ID_LAST = MSM_RPM_8930_STATUS_ID_VOLTAGE_CORNER,
};
#endif /* __ARCH_ARM_MACH_MSM_RPM_8930_H */
diff --git a/arch/arm/mach-msm/include/mach/rpm-regulator-8930.h b/arch/arm/mach-msm/include/mach/rpm-regulator-8930.h
index 9e654ed..684f9d3 100644
--- a/arch/arm/mach-msm/include/mach/rpm-regulator-8930.h
+++ b/arch/arm/mach-msm/include/mach/rpm-regulator-8930.h
@@ -121,7 +121,8 @@
RPM_VREG_ID_PM8038_S6,
RPM_VREG_ID_PM8038_LVS1,
RPM_VREG_ID_PM8038_LVS2,
- RPM_VREG_ID_PM8038_MAX_REAL = RPM_VREG_ID_PM8038_LVS2,
+ RPM_VREG_ID_PM8038_VDD_DIG_CORNER,
+ RPM_VREG_ID_PM8038_MAX_REAL = RPM_VREG_ID_PM8038_VDD_DIG_CORNER,
/* The following are IDs for regulator devices to enable pin control. */
RPM_VREG_ID_PM8038_L2_PC,
diff --git a/arch/arm/mach-msm/include/mach/rpm-regulator.h b/arch/arm/mach-msm/include/mach/rpm-regulator.h
index f8f4c87..d56f1b6 100644
--- a/arch/arm/mach-msm/include/mach/rpm-regulator.h
+++ b/arch/arm/mach-msm/include/mach/rpm-regulator.h
@@ -68,6 +68,19 @@
};
/**
+ * enum rpm_vreg_voltage_corner - possible voltage corner values
+ *
+ * These should be used in regulator_set_voltage and rpm_vreg_set_voltage calls
+ * for corner type regulators as if they had units of uV.
+ */
+enum rpm_vreg_voltage_corner {
+ RPM_VREG_CORNER_NONE = 1,
+ RPM_VREG_CORNER_LOW,
+ RPM_VREG_CORNER_NOMINAL,
+ RPM_VREG_CORNER_HIGH,
+};
+
+/**
* struct rpm_regulator_init_data - RPM regulator initialization data
* @init_data: regulator constraints
* @id: regulator id; from enum rpm_vreg_id
diff --git a/arch/arm/mach-msm/include/mach/rpm.h b/arch/arm/mach-msm/include/mach/rpm.h
index bcb1240..95a2ccf 100644
--- a/arch/arm/mach-msm/include/mach/rpm.h
+++ b/arch/arm/mach-msm/include/mach/rpm.h
@@ -451,6 +451,7 @@
MSM_RPM_ID_PM8038_CLK2_1,
MSM_RPM_ID_PM8038_LVS1,
MSM_RPM_ID_PM8038_LVS2,
+ MSM_RPM_ID_VOLTAGE_CORNER,
/* 8064 specific */
MSM_RPM_ID_PM8821_S1_0,
@@ -815,6 +816,7 @@
MSM_RPM_STATUS_ID_PM8038_CLK2_1,
MSM_RPM_STATUS_ID_PM8038_LVS1,
MSM_RPM_STATUS_ID_PM8038_LVS2,
+ MSM_RPM_STATUS_ID_VOLTAGE_CORNER,
/* 8064 specific */
MSM_RPM_STATUS_ID_PM8821_S1_0,
diff --git a/arch/arm/mach-msm/msm_dsps.c b/arch/arm/mach-msm/msm_dsps.c
index efb2bf6..057665b 100644
--- a/arch/arm/mach-msm/msm_dsps.c
+++ b/arch/arm/mach-msm/msm_dsps.c
@@ -39,7 +39,7 @@
#include <mach/subsystem_restart.h>
#include <mach/subsystem_notif.h>
-#include <timer.h>
+#include "timer.h"
#define DRV_NAME "msm_dsps"
#define DRV_VERSION "3.02"
diff --git a/arch/arm/mach-msm/qdsp5/Makefile b/arch/arm/mach-msm/qdsp5/Makefile
index a4a43ed..2ce0031 100644
--- a/arch/arm/mach-msm/qdsp5/Makefile
+++ b/arch/arm/mach-msm/qdsp5/Makefile
@@ -17,3 +17,4 @@
obj-y += snd.o snd_adie.o
obj-$(CONFIG_ARCH_MSM7X27A) += audio_fm.o
obj-$(CONFIG_ARCH_MSM7X27A) += audio_mvs.o
+obj-$(CONFIG_ARCH_MSM7X27A) += audio_lpa.o
diff --git a/arch/arm/mach-msm/qdsp5/audio_lpa.c b/arch/arm/mach-msm/qdsp5/audio_lpa.c
new file mode 100644
index 0000000..dab53dc
--- /dev/null
+++ b/arch/arm/mach-msm/qdsp5/audio_lpa.c
@@ -0,0 +1,1485 @@
+
+/* audio_lpa.c - low power audio driver
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * Based on the PCM decoder driver in arch/arm/mach-msm/qdsp5/audio_pcm.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (C) 2008 HTC Corporation
+ *
+ * All source code in this file is licensed under the following license except
+ * where indicated.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can find it at http://www.fsf.org
+ */
+
+#include <asm/atomic.h>
+#include <asm/ioctls.h>
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/earlysuspend.h>
+#include <linux/list.h>
+#include <linux/android_pmem.h>
+#include <linux/slab.h>
+#include <linux/msm_audio.h>
+
+
+#include <mach/msm_adsp.h>
+#include <mach/iommu.h>
+#include <mach/iommu_domains.h>
+#include <mach/msm_subsystem_map.h>
+#include <mach/qdsp5/qdsp5audppcmdi.h>
+#include <mach/qdsp5/qdsp5audppmsg.h>
+#include <mach/qdsp5/qdsp5audplaycmdi.h>
+#include <mach/qdsp5/qdsp5audplaymsg.h>
+#include <mach/qdsp5/qdsp5rmtcmdi.h>
+#include <mach/debug_mm.h>
+#include <linux/memory_alloc.h>
+#include <mach/msm_memtypes.h>
+
+#include "audmgr.h"
+
+/* for queue ids - should be relative to module number*/
+#include "adsp.h"
+
+#define ADRV_STATUS_AIO_INTF 0x00000001
+#define ADRV_STATUS_OBUF_GIVEN 0x00000002
+#define ADRV_STATUS_IBUF_GIVEN 0x00000004
+#define ADRV_STATUS_FSYNC 0x00000008
+
+#define MSM_MAX_VOLUME 0x2000
+/* 17 added to avoid more deviation */
+#define MSM_VOLUME_STEP (MSM_MAX_VOLUME+17)
+#define MSM_VOLUME_FACTOR (10000)
+
+/* Size must be power of 2 */
+#define MAX_BUF 2
+#define BUFSZ (524288)
+
+#define AUDDEC_DEC_PCM 0
+
+/* Decoder status received from AUDPPTASK */
+#define AUDPP_DEC_STATUS_SLEEP 0
+#define AUDPP_DEC_STATUS_INIT 1
+#define AUDPP_DEC_STATUS_CFG 2
+#define AUDPP_DEC_STATUS_PLAY 3
+
+#define AUDPCM_EVENT_NUM 10 /* Default number of pre-allocated event packets */
+
+#define __CONTAINS(r, v, l) ({ \
+ typeof(r) __r = r; \
+ typeof(v) __v = v; \
+ typeof(v) __e = __v + l; \
+ int res = ((__v >= __r->vaddr) && \
+ (__e <= __r->vaddr + __r->len)); \
+ res; \
+})
+
+#define CONTAINS(r1, r2) ({ \
+ typeof(r2) __r2 = r2; \
+ __CONTAINS(r1, __r2->vaddr, __r2->len); \
+})
+
+#define IN_RANGE(r, v) ({ \
+ typeof(r) __r = r; \
+ typeof(v) __vv = v; \
+ int res = ((__vv >= __r->vaddr) && \
+ (__vv < (__r->vaddr + __r->len))); \
+ res; \
+})
+
+#define OVERLAPS(r1, r2) ({ \
+ typeof(r1) __r1 = r1; \
+ typeof(r2) __r2 = r2; \
+ typeof(__r2->vaddr) __v = __r2->vaddr; \
+ typeof(__v) __e = __v + __r2->len - 1; \
+ int res = (IN_RANGE(__r1, __v) || IN_RANGE(__r1, __e)); \
+ res; \
+})
+
+struct audio;
+
+struct buffer {
+ void *data;
+ unsigned size;
+ unsigned used; /* Input usage actual DSP produced PCM size */
+ unsigned addr;
+};
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+struct audpcm_suspend_ctl {
+struct early_suspend node;
+struct audio *audio;
+};
+#endif
+
+struct audpcm_event {
+ struct list_head list;
+ int event_type;
+ union msm_audio_event_payload payload;
+};
+
+struct audlpa_pmem_region {
+ struct list_head list;
+ struct file *file;
+ int fd;
+ void *vaddr;
+ unsigned long paddr;
+ unsigned long kvaddr;
+ unsigned long len;
+ unsigned ref_cnt;
+};
+
+struct audpcm_buffer_node {
+ struct list_head list;
+ struct msm_audio_aio_buf buf;
+ unsigned long paddr;
+};
+
+struct audio {
+ struct buffer out[2];
+
+ spinlock_t dsp_lock;
+
+ uint8_t out_head;
+ uint8_t out_tail;
+ uint8_t out_needed; /* number of buffers the dsp is waiting for */
+ struct list_head out_queue; /* queue to retain output buffers */
+ atomic_t out_bytes;
+
+ struct mutex lock;
+ struct mutex write_lock;
+ wait_queue_head_t write_wait;
+
+ struct msm_adsp_module *audplay;
+
+ /* configuration to use on next enable */
+ uint32_t out_sample_rate;
+ uint32_t out_channel_mode;
+ uint32_t out_bits; /* bits per sample */
+
+ struct audmgr audmgr;
+
+ /* data allocated for various buffers */
+ char *data;
+ int32_t phys;
+ struct msm_mapped_buffer *map_v_write;
+
+ uint32_t drv_status;
+ int wflush; /* Write flush */
+ int opened;
+ int enabled;
+ int running;
+ int stopped; /* set when stopped, cleared on flush */
+ int teos; /* valid only if tunnel mode & no data left for decoder */
+ int rmt_resource_released;
+ enum msm_aud_decoder_state dec_state; /* Represents decoder state */
+ int reserved; /* A byte is being reserved */
+ char rsv_byte; /* Handle odd length user data */
+
+ const char *module_name;
+ unsigned queue_id;
+
+ unsigned long volume;
+
+ uint16_t dec_id;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct audpcm_suspend_ctl suspend_ctl;
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dentry;
+#endif
+ wait_queue_head_t wait;
+ struct list_head free_event_queue;
+ struct list_head event_queue;
+ wait_queue_head_t event_wait;
+ spinlock_t event_queue_lock;
+ struct mutex get_event_lock;
+ int event_abort;
+
+ struct list_head pmem_region_queue;
+ int buffer_count;
+ int buffer_size;
+};
+
+static int auddec_dsp_config(struct audio *audio, int enable);
+static void audpp_cmd_cfg_adec_params(struct audio *audio);
+static void audio_dsp_event(void *private, unsigned id, uint16_t *msg);
+static void audpcm_post_event(struct audio *audio, int type,
+ union msm_audio_event_payload payload);
+static unsigned long audlpa_pmem_fixup(struct audio *audio, void *addr,
+ unsigned long len, int ref_up);
+static void audpcm_async_send_data(struct audio *audio,
+ unsigned needed);
+
+
+static int rmt_put_resource(struct audio *audio)
+{
+ struct aud_codec_config_cmd cmd;
+ unsigned short client_idx;
+
+ cmd.cmd_id = RM_CMD_AUD_CODEC_CFG;
+ cmd.client_id = RM_AUD_CLIENT_ID;
+ cmd.task_id = audio->dec_id;
+ cmd.enable = RMT_DISABLE;
+ cmd.dec_type = AUDDEC_DEC_PCM;
+ client_idx = ((cmd.client_id << 8) | cmd.task_id);
+
+ return put_adsp_resource(client_idx, &cmd, sizeof(cmd));
+}
+
+static int rmt_get_resource(struct audio *audio)
+{
+ struct aud_codec_config_cmd cmd;
+ unsigned short client_idx;
+
+ cmd.cmd_id = RM_CMD_AUD_CODEC_CFG;
+ cmd.client_id = RM_AUD_CLIENT_ID;
+ cmd.task_id = audio->dec_id;
+ cmd.enable = RMT_ENABLE;
+ cmd.dec_type = AUDDEC_DEC_PCM;
+ client_idx = ((cmd.client_id << 8) | cmd.task_id);
+
+ return get_adsp_resource(client_idx, &cmd, sizeof(cmd));
+}
+
+/* must be called with audio->lock held */
+static int audio_enable(struct audio *audio)
+{
+ struct audmgr_config cfg;
+ int rc;
+
+ MM_DBG("\n"); /* Macro prints the file name and function */
+ if (audio->enabled)
+ return 0;
+
+ if (audio->rmt_resource_released == 1) {
+ audio->rmt_resource_released = 0;
+ rc = rmt_get_resource(audio);
+ if (rc)
+ MM_ERR("ADSP resources are not available");
+ }
+
+ audio->dec_state = MSM_AUD_DECODER_STATE_NONE;
+ audio->out_tail = 0;
+ audio->out_needed = 0;
+
+ cfg.tx_rate = RPC_AUD_DEF_SAMPLE_RATE_NONE;
+ cfg.rx_rate = RPC_AUD_DEF_SAMPLE_RATE_48000;
+ cfg.def_method = RPC_AUD_DEF_METHOD_PLAYBACK;
+ cfg.codec = RPC_AUD_DEF_CODEC_PCM;
+ cfg.snd_method = RPC_SND_METHOD_MIDI;
+
+ rc = audmgr_enable(&audio->audmgr, &cfg);
+ if (rc < 0)
+ return rc;
+
+ if (msm_adsp_enable(audio->audplay)) {
+ MM_ERR("msm_adsp_enable(audplay) failed\n");
+ audmgr_disable(&audio->audmgr);
+ return -ENODEV;
+ }
+
+ if (audpp_enable(audio->dec_id, audio_dsp_event, audio)) {
+ MM_ERR("audpp_enable() failed\n");
+ msm_adsp_disable(audio->audplay);
+ audmgr_disable(&audio->audmgr);
+ return -ENODEV;
+ }
+
+ audio->enabled = 1;
+ return 0;
+}
+
+/* must be called with audio->lock held */
+static int audio_disable(struct audio *audio)
+{
+ int rc = 0;
+ MM_DBG("\n"); /* Macro prints the file name and function */
+ if (audio->enabled) {
+ audio->enabled = 0;
+ audio->dec_state = MSM_AUD_DECODER_STATE_NONE;
+ auddec_dsp_config(audio, 0);
+ rc = wait_event_interruptible_timeout(audio->wait,
+ audio->dec_state != MSM_AUD_DECODER_STATE_NONE,
+ msecs_to_jiffies(MSM_AUD_DECODER_WAIT_MS));
+ if (rc == 0)
+ rc = -ETIMEDOUT;
+ else if (audio->dec_state != MSM_AUD_DECODER_STATE_CLOSE)
+ rc = -EFAULT;
+ else
+ rc = 0;
+ audio->stopped = 1;
+ wake_up(&audio->write_wait);
+ msm_adsp_disable(audio->audplay);
+ audpp_disable(audio->dec_id, audio);
+ audmgr_disable(&audio->audmgr);
+ audio->out_needed = 0;
+ rmt_put_resource(audio);
+ audio->rmt_resource_released = 1;
+ }
+ return rc;
+}
+
+/* ------------------- dsp --------------------- */
+static void audplay_dsp_event(void *data, unsigned id, size_t len,
+ void (*getevent) (void *ptr, size_t len))
+{
+ struct audio *audio = data;
+ uint32_t msg[28];
+ getevent(msg, sizeof(msg));
+
+ MM_DBG("msg_id=%x\n", id);
+
+ switch (id) {
+ case AUDPLAY_MSG_DEC_NEEDS_DATA:
+ audpcm_async_send_data(audio, 1);
+ break;
+ case ADSP_MESSAGE_ID:
+ MM_DBG("Received ADSP event: module enable(audplaytask)\n");
+ break;
+ default:
+ MM_ERR("unexpected message from decoder\n");
+ break;
+ }
+}
+
+static void audio_dsp_event(void *private, unsigned id, uint16_t *msg)
+{
+ struct audio *audio = private;
+
+ switch (id) {
+ case AUDPP_MSG_STATUS_MSG:{
+ unsigned status = msg[1];
+
+ switch (status) {
+ case AUDPP_DEC_STATUS_SLEEP: {
+ uint16_t reason = msg[2];
+ MM_DBG("decoder status: sleep reason =0x%04x\n",
+ reason);
+ if ((reason == AUDPP_MSG_REASON_MEM)
+ || (reason ==
+ AUDPP_MSG_REASON_NODECODER)) {
+ audio->dec_state =
+ MSM_AUD_DECODER_STATE_FAILURE;
+ wake_up(&audio->wait);
+ } else if (reason == AUDPP_MSG_REASON_NONE) {
+ /* decoder is in disable state */
+ audio->dec_state =
+ MSM_AUD_DECODER_STATE_CLOSE;
+ wake_up(&audio->wait);
+ }
+ break;
+ }
+ case AUDPP_DEC_STATUS_INIT:
+ MM_DBG("decoder status: init\n");
+ audpp_cmd_cfg_adec_params(audio);
+ break;
+
+ case AUDPP_DEC_STATUS_CFG:
+ MM_DBG("decoder status: cfg\n");
+ break;
+ case AUDPP_DEC_STATUS_PLAY:
+ MM_DBG("decoder status: play\n");
+ audio->dec_state =
+ MSM_AUD_DECODER_STATE_SUCCESS;
+ wake_up(&audio->wait);
+ break;
+ default:
+ MM_ERR("unknown decoder status\n");
+ break;
+ }
+ break;
+ }
+ case AUDPP_MSG_CFG_MSG:
+ if (msg[0] == AUDPP_MSG_ENA_ENA) {
+ MM_DBG("CFG_MSG ENABLE\n");
+ auddec_dsp_config(audio, 1);
+ audio->out_needed = 0;
+ audio->running = 1;
+ audpp_set_volume_and_pan(audio->dec_id, audio->volume,
+ 0);
+ } else if (msg[0] == AUDPP_MSG_ENA_DIS) {
+ MM_DBG("CFG_MSG DISABLE\n");
+ audio->running = 0;
+ } else {
+ MM_ERR("CFG_MSG %d?\n", msg[0]);
+ }
+ break;
+ case AUDPP_MSG_FLUSH_ACK:
+ MM_DBG("FLUSH_ACK\n");
+ audio->wflush = 0;
+ wake_up(&audio->write_wait);
+ break;
+
+ case AUDPP_MSG_PCMDMAMISSED:
+ MM_DBG("PCMDMAMISSED\n");
+ audio->teos = 1;
+ wake_up(&audio->write_wait);
+ break;
+
+ default:
+ MM_ERR("UNKNOWN (%d)\n", id);
+ }
+
+}
+
+
+struct msm_adsp_ops audlpadec_adsp_ops = {
+ .event = audplay_dsp_event,
+};
+
+
+#define audplay_send_queue0(audio, cmd, len) \
+ msm_adsp_write(audio->audplay, audio->queue_id, \
+ cmd, len)
+
+static int auddec_dsp_config(struct audio *audio, int enable)
+{
+ u16 cfg_dec_cmd[AUDPP_CMD_CFG_DEC_TYPE_LEN / sizeof(unsigned short)];
+
+ memset(cfg_dec_cmd, 0, sizeof(cfg_dec_cmd));
+
+ cfg_dec_cmd[0] = AUDPP_CMD_CFG_DEC_TYPE;
+ if (enable)
+ cfg_dec_cmd[1 + audio->dec_id] = AUDPP_CMD_UPDATDE_CFG_DEC |
+ AUDPP_CMD_ENA_DEC_V | AUDDEC_DEC_PCM;
+ else
+ cfg_dec_cmd[1 + audio->dec_id] = AUDPP_CMD_UPDATDE_CFG_DEC |
+ AUDPP_CMD_DIS_DEC_V;
+
+ return audpp_send_queue1(&cfg_dec_cmd, sizeof(cfg_dec_cmd));
+}
+
+static void audpp_cmd_cfg_adec_params(struct audio *audio)
+{
+ audpp_cmd_cfg_adec_params_wav cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.common.cmd_id = AUDPP_CMD_CFG_ADEC_PARAMS;
+ cmd.common.length = AUDPP_CMD_CFG_ADEC_PARAMS_WAV_LEN;
+ cmd.common.dec_id = audio->dec_id;
+ cmd.common.input_sampling_frequency = audio->out_sample_rate;
+ cmd.stereo_cfg = audio->out_channel_mode;
+ cmd.pcm_width = audio->out_bits;
+ cmd.sign = 0;
+ audpp_send_queue2(&cmd, sizeof(cmd));
+}
+static void audpcm_async_send_data(struct audio *audio, unsigned needed)
+{
+ unsigned long flags;
+
+ if (!audio->running)
+ return;
+
+ spin_lock_irqsave(&audio->dsp_lock, flags);
+
+ if (needed && !audio->wflush) {
+ audio->out_needed = 1;
+ if (audio->drv_status & ADRV_STATUS_OBUF_GIVEN) {
+ /* pop one node out of queue */
+ union msm_audio_event_payload payload;
+ struct audpcm_buffer_node *used_buf;
+
+ MM_DBG("consumed\n");
+
+ BUG_ON(list_empty(&audio->out_queue));
+ used_buf = list_first_entry(&audio->out_queue,
+ struct audpcm_buffer_node, list);
+ list_del(&used_buf->list);
+ payload.aio_buf = used_buf->buf;
+ audpcm_post_event(audio, AUDIO_EVENT_WRITE_DONE,
+ payload);
+ kfree(used_buf);
+ audio->drv_status &= ~ADRV_STATUS_OBUF_GIVEN;
+ }
+ }
+ if (audio->out_needed) {
+ struct audpcm_buffer_node *next_buf;
+ audplay_cmd_bitstream_data_avail cmd;
+ if (!list_empty(&audio->out_queue)) {
+ next_buf = list_first_entry(&audio->out_queue,
+ struct audpcm_buffer_node, list);
+ MM_DBG("next_buf %p\n", next_buf);
+ if (next_buf) {
+ MM_DBG("next buf phy %lx len %d\n",
+ next_buf->paddr, next_buf->buf.data_len);
+
+ cmd.cmd_id = AUDPLAY_CMD_BITSTREAM_DATA_AVAIL;
+ if (next_buf->buf.data_len)
+ cmd.decoder_id = audio->dec_id;
+ else {
+ cmd.decoder_id = -1;
+ MM_DBG("input EOS signaled\n");
+ }
+ cmd.buf_ptr = (unsigned) next_buf->paddr;
+ cmd.buf_size = next_buf->buf.data_len >> 1;
+ cmd.partition_number = 0;
+ /* complete writes to the input buffer */
+ wmb();
+ audplay_send_queue0(audio, &cmd, sizeof(cmd));
+ audio->out_needed = 0;
+ audio->drv_status |= ADRV_STATUS_OBUF_GIVEN;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&audio->dsp_lock, flags);
+}
+
+/* ------------------- device --------------------- */
+static void audpcm_async_flush(struct audio *audio)
+{
+ struct audpcm_buffer_node *buf_node;
+ struct list_head *ptr, *next;
+ union msm_audio_event_payload payload;
+
+ MM_DBG("\n"); /* Macro prints the file name and function */
+ list_for_each_safe(ptr, next, &audio->out_queue) {
+ buf_node = list_entry(ptr, struct audpcm_buffer_node, list);
+ list_del(&buf_node->list);
+ payload.aio_buf = buf_node->buf;
+ audpcm_post_event(audio, AUDIO_EVENT_WRITE_DONE,
+ payload);
+ kfree(buf_node);
+ }
+ audio->drv_status &= ~ADRV_STATUS_OBUF_GIVEN;
+ audio->out_needed = 0;
+ atomic_set(&audio->out_bytes, 0);
+}
+static void audio_ioport_reset(struct audio *audio)
+{
+ if (audio->drv_status & ADRV_STATUS_AIO_INTF) {
+ /* If fsync is in progress, make sure
+ * return value of fsync indicates
+ * abort due to flush
+ */
+ if (audio->drv_status & ADRV_STATUS_FSYNC) {
+ MM_DBG("fsync in progress\n");
+ wake_up(&audio->write_wait);
+ mutex_lock(&audio->write_lock);
+ audpcm_async_flush(audio);
+ mutex_unlock(&audio->write_lock);
+ } else
+ audpcm_async_flush(audio);
+ } else {
+ /* Make sure read/write thread are free from
+ * sleep and knowing that system is not able
+ * to process io request at the moment
+ */
+ wake_up(&audio->write_wait);
+ mutex_lock(&audio->write_lock);
+ audpcm_async_flush(audio);
+ mutex_unlock(&audio->write_lock);
+ }
+}
+
+static int audpcm_events_pending(struct audio *audio)
+{
+ unsigned long flags;
+ int empty;
+
+ spin_lock_irqsave(&audio->event_queue_lock, flags);
+ empty = !list_empty(&audio->event_queue);
+ spin_unlock_irqrestore(&audio->event_queue_lock, flags);
+ return empty || audio->event_abort;
+}
+
+static void audpcm_reset_event_queue(struct audio *audio)
+{
+ unsigned long flags;
+ struct audpcm_event *drv_evt;
+ struct list_head *ptr, *next;
+
+ spin_lock_irqsave(&audio->event_queue_lock, flags);
+ list_for_each_safe(ptr, next, &audio->event_queue) {
+ drv_evt = list_first_entry(&audio->event_queue,
+ struct audpcm_event, list);
+ list_del(&drv_evt->list);
+ kfree(drv_evt);
+ }
+ list_for_each_safe(ptr, next, &audio->free_event_queue) {
+ drv_evt = list_first_entry(&audio->free_event_queue,
+ struct audpcm_event, list);
+ list_del(&drv_evt->list);
+ kfree(drv_evt);
+ }
+ spin_unlock_irqrestore(&audio->event_queue_lock, flags);
+
+ return;
+}
+
+static long audpcm_process_event_req(struct audio *audio, void __user *arg)
+{
+ long rc;
+ struct msm_audio_event usr_evt;
+ struct audpcm_event *drv_evt = NULL;
+ int timeout;
+ unsigned long flags;
+
+ if (copy_from_user(&usr_evt, arg, sizeof(struct msm_audio_event)))
+ return -EFAULT;
+
+ timeout = (int) usr_evt.timeout_ms;
+
+ if (timeout > 0) {
+ rc = wait_event_interruptible_timeout(
+ audio->event_wait, audpcm_events_pending(audio),
+ msecs_to_jiffies(timeout));
+ if (rc == 0)
+ return -ETIMEDOUT;
+ } else {
+ rc = wait_event_interruptible(
+ audio->event_wait, audpcm_events_pending(audio));
+ }
+
+ if (rc < 0)
+ return rc;
+
+ if (audio->event_abort) {
+ audio->event_abort = 0;
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&audio->event_queue_lock, flags);
+ if (!list_empty(&audio->event_queue)) {
+ drv_evt = list_first_entry(&audio->event_queue,
+ struct audpcm_event, list);
+ list_del(&drv_evt->list);
+ }
+ if (drv_evt) {
+ usr_evt.event_type = drv_evt->event_type;
+ usr_evt.event_payload = drv_evt->payload;
+ list_add_tail(&drv_evt->list, &audio->free_event_queue);
+ } else
+ rc = -1;
+ spin_unlock_irqrestore(&audio->event_queue_lock, flags);
+
+ if (drv_evt && drv_evt->event_type == AUDIO_EVENT_WRITE_DONE) {
+ mutex_lock(&audio->lock);
+ audlpa_pmem_fixup(audio, drv_evt->payload.aio_buf.buf_addr,
+ drv_evt->payload.aio_buf.buf_len, 0);
+ mutex_unlock(&audio->lock);
+ }
+ if (!rc && copy_to_user(arg, &usr_evt, sizeof(usr_evt)))
+ rc = -EFAULT;
+
+ return rc;
+}
+
+static int audlpa_pmem_check(struct audio *audio,
+ void *vaddr, unsigned long len)
+{
+ struct audlpa_pmem_region *region_elt;
+ struct audlpa_pmem_region t = { .vaddr = vaddr, .len = len };
+
+ list_for_each_entry(region_elt, &audio->pmem_region_queue, list) {
+ if (CONTAINS(region_elt, &t) || CONTAINS(&t, region_elt) ||
+ OVERLAPS(region_elt, &t)) {
+ MM_ERR("region (vaddr %p len %ld)"
+ " clashes with registered region"
+ " (vaddr %p paddr %p len %ld)\n",
+ vaddr, len,
+ region_elt->vaddr,
+ (void *)region_elt->paddr,
+ region_elt->len);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int audlpa_pmem_add(struct audio *audio,
+ struct msm_audio_pmem_info *info)
+{
+ unsigned long paddr, kvaddr, len;
+ struct file *file;
+ struct audlpa_pmem_region *region;
+ int rc = -EINVAL;
+
+ MM_DBG("\n"); /* Macro prints the file name and function */
+ region = kmalloc(sizeof(*region), GFP_KERNEL);
+
+ if (!region) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ if (get_pmem_file(info->fd, &paddr, &kvaddr, &len, &file)) {
+ kfree(region);
+ goto end;
+ }
+
+ rc = audlpa_pmem_check(audio, info->vaddr, len);
+ if (rc < 0) {
+ put_pmem_file(file);
+ kfree(region);
+ goto end;
+ }
+
+ region->vaddr = info->vaddr;
+ region->fd = info->fd;
+ region->paddr = paddr;
+ region->kvaddr = kvaddr;
+ region->len = len;
+ region->file = file;
+ region->ref_cnt = 0;
+ MM_DBG("add region paddr %lx vaddr %p, len %lu\n", region->paddr,
+ region->vaddr, region->len);
+ list_add_tail(®ion->list, &audio->pmem_region_queue);
+end:
+ return rc;
+}
+
+static int audlpa_pmem_remove(struct audio *audio,
+ struct msm_audio_pmem_info *info)
+{
+ struct audlpa_pmem_region *region;
+ struct list_head *ptr, *next;
+ int rc = -EINVAL;
+
+ MM_DBG("info fd %d vaddr %p\n", info->fd, info->vaddr);
+
+ list_for_each_safe(ptr, next, &audio->pmem_region_queue) {
+ region = list_entry(ptr, struct audlpa_pmem_region, list);
+
+ if ((region->fd == info->fd) &&
+ (region->vaddr == info->vaddr)) {
+ if (region->ref_cnt) {
+ MM_DBG("region %p in use ref_cnt %d\n",
+ region, region->ref_cnt);
+ break;
+ }
+ MM_DBG("remove region fd %d vaddr %p\n",
+ info->fd, info->vaddr);
+ list_del(®ion->list);
+ put_pmem_file(region->file);
+ kfree(region);
+ rc = 0;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static int audlpa_pmem_lookup_vaddr(struct audio *audio, void *addr,
+ unsigned long len, struct audlpa_pmem_region **region)
+{
+ struct audlpa_pmem_region *region_elt;
+
+ int match_count = 0;
+
+ *region = NULL;
+
+ /* returns physical address or zero */
+ list_for_each_entry(region_elt, &audio->pmem_region_queue,
+ list) {
+ if (addr >= region_elt->vaddr &&
+ addr < region_elt->vaddr + region_elt->len &&
+ addr + len <= region_elt->vaddr + region_elt->len) {
+ /* offset since we could pass vaddr inside a registerd
+ * pmem buffer
+ */
+
+ match_count++;
+ if (!*region)
+ *region = region_elt;
+ }
+ }
+
+ if (match_count > 1) {
+ MM_ERR("multiple hits for vaddr %p, len %ld\n", addr, len);
+ list_for_each_entry(region_elt,
+ &audio->pmem_region_queue, list) {
+ if (addr >= region_elt->vaddr &&
+ addr < region_elt->vaddr + region_elt->len &&
+ addr + len <= region_elt->vaddr + region_elt->len)
+ MM_ERR("\t%p, %ld --> %p\n", region_elt->vaddr,
+ region_elt->len,
+ (void *)region_elt->paddr);
+ }
+ }
+
+ return *region ? 0 : -1;
+}
+
+unsigned long audlpa_pmem_fixup(struct audio *audio, void *addr,
+ unsigned long len, int ref_up)
+{
+ struct audlpa_pmem_region *region;
+ unsigned long paddr;
+ int ret;
+
+ ret = audlpa_pmem_lookup_vaddr(audio, addr, len, ®ion);
+ if (ret) {
+ MM_ERR("lookup (%p, %ld) failed\n", addr, len);
+ return 0;
+ }
+ if (ref_up)
+ region->ref_cnt++;
+ else
+ region->ref_cnt--;
+ MM_DBG("found region %p ref_cnt %d\n", region, region->ref_cnt);
+ paddr = region->paddr + (addr - region->vaddr);
+ return paddr;
+}
+
+/* audio -> lock must be held at this point */
+static int audlpa_aio_buf_add(struct audio *audio, unsigned dir,
+ void __user *arg)
+{
+ unsigned long flags;
+ struct audpcm_buffer_node *buf_node;
+
+ buf_node = kmalloc(sizeof(*buf_node), GFP_KERNEL);
+
+ if (!buf_node)
+ return -ENOMEM;
+
+ if (copy_from_user(&buf_node->buf, arg, sizeof(buf_node->buf))) {
+ kfree(buf_node);
+ return -EFAULT;
+ }
+
+ MM_DBG("node %p dir %x buf_addr %p buf_len %d data_len"
+ "%d\n", buf_node, dir,
+ buf_node->buf.buf_addr, buf_node->buf.buf_len,
+ buf_node->buf.data_len);
+
+ buf_node->paddr = audlpa_pmem_fixup(
+ audio, buf_node->buf.buf_addr,
+ buf_node->buf.buf_len, 1);
+
+ if (dir) {
+ /* write */
+ if (!buf_node->paddr ||
+ (buf_node->paddr & 0x1) ||
+ (buf_node->buf.data_len & 0x1)) {
+ kfree(buf_node);
+ return -EINVAL;
+ }
+ spin_lock_irqsave(&audio->dsp_lock, flags);
+ list_add_tail(&buf_node->list, &audio->out_queue);
+ spin_unlock_irqrestore(&audio->dsp_lock, flags);
+ audpcm_async_send_data(audio, 0);
+ }
+ MM_DBG("Add buf_node %p paddr %lx\n", buf_node, buf_node->paddr);
+
+ return 0;
+}
+
+static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct audio *audio = file->private_data;
+ int rc = 0;
+
+ MM_DBG("cmd = %d\n", cmd);
+
+ if (cmd == AUDIO_GET_STATS) {
+ struct msm_audio_stats stats;
+ stats.byte_count = audpp_avsync_byte_count(audio->dec_id);
+ stats.sample_count = audpp_avsync_sample_count(audio->dec_id);
+ if (copy_to_user((void *) arg, &stats, sizeof(stats)))
+ return -EFAULT;
+ return 0;
+ }
+ if (cmd == AUDIO_SET_VOLUME) {
+ unsigned long flags;
+ spin_lock_irqsave(&audio->dsp_lock, flags);
+
+ audio->volume = MSM_VOLUME_STEP * arg;
+ audio->volume /= MSM_VOLUME_FACTOR;
+
+ if (audio->volume > MSM_MAX_VOLUME)
+ audio->volume = MSM_MAX_VOLUME;
+
+ if (audio->running)
+ audpp_set_volume_and_pan(audio->dec_id,
+ audio->volume, 0);
+ spin_unlock_irqrestore(&audio->dsp_lock, flags);
+ return 0;
+ }
+ if (cmd == AUDIO_GET_EVENT) {
+ MM_DBG("AUDIO_GET_EVENT\n");
+ if (mutex_trylock(&audio->get_event_lock)) {
+ rc = audpcm_process_event_req(audio,
+ (void __user *) arg);
+ mutex_unlock(&audio->get_event_lock);
+ } else
+ rc = -EBUSY;
+ return rc;
+ }
+
+ if (cmd == AUDIO_ABORT_GET_EVENT) {
+ audio->event_abort = 1;
+ wake_up(&audio->event_wait);
+ return 0;
+ }
+
+ mutex_lock(&audio->lock);
+ switch (cmd) {
+ case AUDIO_START:
+ MM_DBG("AUDIO_START\n");
+ rc = audio_enable(audio);
+ if (!rc) {
+ rc = wait_event_interruptible_timeout(audio->wait,
+ audio->dec_state != MSM_AUD_DECODER_STATE_NONE,
+ msecs_to_jiffies(MSM_AUD_DECODER_WAIT_MS));
+ MM_INFO("dec_state %d rc = %d\n", audio->dec_state, rc);
+
+ if (audio->dec_state != MSM_AUD_DECODER_STATE_SUCCESS)
+ rc = -ENODEV;
+ else
+ rc = 0;
+ }
+ break;
+ case AUDIO_STOP:
+ MM_DBG("AUDIO_STOP\n");
+ rc = audio_disable(audio);
+ audio_ioport_reset(audio);
+ audio->stopped = 0;
+ break;
+ case AUDIO_FLUSH:
+ MM_DBG("AUDIO_FLUSH\n");
+ audio->wflush = 1;
+ audio_ioport_reset(audio);
+ if (audio->running) {
+ audpp_flush(audio->dec_id);
+ rc = wait_event_interruptible(audio->write_wait,
+ !audio->wflush);
+ if (rc < 0) {
+ MM_ERR("AUDIO_FLUSH interrupted\n");
+ rc = -EINTR;
+ }
+ } else {
+ audio->wflush = 0;
+ }
+ break;
+
+ case AUDIO_SET_CONFIG: {
+ struct msm_audio_config config;
+ if (copy_from_user(&config, (void *) arg, sizeof(config))) {
+ rc = -EFAULT;
+ break;
+ }
+ if (config.channel_count == 1) {
+ config.channel_count = AUDPP_CMD_PCM_INTF_MONO_V;
+ } else if (config.channel_count == 2) {
+ config.channel_count = AUDPP_CMD_PCM_INTF_STEREO_V;
+ } else {
+ rc = -EINVAL;
+ break;
+ }
+ if (config.bits == 8)
+ config.bits = AUDPP_CMD_WAV_PCM_WIDTH_8;
+ else if (config.bits == 16)
+ config.bits = AUDPP_CMD_WAV_PCM_WIDTH_16;
+ else {
+ rc = -EINVAL;
+ break;
+ }
+ audio->out_sample_rate = config.sample_rate;
+ audio->out_channel_mode = config.channel_count;
+ audio->out_bits = config.bits;
+ audio->buffer_count = config.buffer_count;
+ audio->buffer_size = config.buffer_size;
+ MM_DBG("AUDIO_SET_CONFIG\n");
+ break;
+ }
+ case AUDIO_GET_CONFIG: {
+ struct msm_audio_config config;
+ config.buffer_count = audio->buffer_count;
+ config.buffer_size = audio->buffer_size;
+ config.sample_rate = audio->out_sample_rate;
+ if (audio->out_channel_mode == AUDPP_CMD_PCM_INTF_MONO_V)
+ config.channel_count = 1;
+ else
+ config.channel_count = 2;
+ if (audio->out_bits == AUDPP_CMD_WAV_PCM_WIDTH_8)
+ config.bits = 8;
+ else if (audio->out_bits == AUDPP_CMD_WAV_PCM_WIDTH_16)
+ config.bits = 16;
+ else
+ config.bits = 16;
+ config.unused[0] = 0;
+ config.unused[1] = 0;
+
+ if (copy_to_user((void *) arg, &config, sizeof(config)))
+ rc = -EFAULT;
+ else
+ rc = 0;
+ MM_DBG("AUDIO_GET_CONFIG\n");
+ break;
+ }
+
+
+ case AUDIO_PAUSE:
+ MM_DBG("AUDIO_PAUSE %ld\n", arg);
+ rc = audpp_pause(audio->dec_id, (int) arg);
+ break;
+
+ case AUDIO_REGISTER_PMEM: {
+ struct msm_audio_pmem_info info;
+ MM_DBG("AUDIO_REGISTER_PMEM\n");
+ if (copy_from_user(&info, (void *) arg, sizeof(info)))
+ rc = -EFAULT;
+ else
+ rc = audlpa_pmem_add(audio, &info);
+ break;
+ }
+
+ case AUDIO_DEREGISTER_PMEM: {
+ struct msm_audio_pmem_info info;
+ MM_DBG("AUDIO_DEREGISTER_PMEM\n");
+ if (copy_from_user(&info, (void *) arg, sizeof(info)))
+ rc = -EFAULT;
+ else
+ rc = audlpa_pmem_remove(audio, &info);
+ break;
+ }
+
+ case AUDIO_ASYNC_WRITE:
+ if (audio->drv_status & ADRV_STATUS_FSYNC)
+ rc = -EBUSY;
+ else
+ rc = audlpa_aio_buf_add(audio, 1, (void __user *) arg);
+ break;
+
+ case AUDIO_ASYNC_READ:
+ MM_ERR("AUDIO_ASYNC_READ not supported\n");
+ rc = -EPERM;
+ break;
+
+ default:
+ rc = -EINVAL;
+ }
+ mutex_unlock(&audio->lock);
+ return rc;
+}
+
+/* Only useful in tunnel-mode */
+int audlpa_async_fsync(struct audio *audio)
+{
+ int rc = 0;
+
+ MM_DBG("\n"); /* Macro prints the file name and function */
+
+ /* Blocking client sends more data */
+ mutex_lock(&audio->lock);
+ audio->drv_status |= ADRV_STATUS_FSYNC;
+ mutex_unlock(&audio->lock);
+
+ mutex_lock(&audio->write_lock);
+ /* pcm dmamiss message is sent continously
+ * when decoder is starved so no race
+ * condition concern
+ */
+ audio->teos = 0;
+
+ rc = wait_event_interruptible(audio->write_wait,
+ (audio->teos && audio->out_needed &&
+ list_empty(&audio->out_queue))
+ || audio->wflush || audio->stopped);
+
+ if (audio->stopped || audio->wflush)
+ rc = -EBUSY;
+
+ mutex_unlock(&audio->write_lock);
+ mutex_lock(&audio->lock);
+ audio->drv_status &= ~ADRV_STATUS_FSYNC;
+ mutex_unlock(&audio->lock);
+
+ return rc;
+}
+
+int audlpa_sync_fsync(struct audio *audio)
+{
+ struct buffer *frame;
+ int rc = 0;
+
+ MM_DBG("\n"); /* Macro prints the file name and function */
+
+ mutex_lock(&audio->write_lock);
+
+ rc = wait_event_interruptible(audio->write_wait,
+ (!audio->out[0].used &&
+ !audio->out[1].used &&
+ audio->out_needed) || audio->wflush);
+
+ if (rc < 0)
+ goto done;
+ else if (audio->wflush) {
+ rc = -EBUSY;
+ goto done;
+ }
+
+ if (audio->reserved) {
+ MM_DBG("send reserved byte\n");
+ frame = audio->out + audio->out_tail;
+ ((char *) frame->data)[0] = audio->rsv_byte;
+ ((char *) frame->data)[1] = 0;
+ frame->used = 2;
+ audpcm_async_send_data(audio, 0);
+
+ rc = wait_event_interruptible(audio->write_wait,
+ (!audio->out[0].used &&
+ !audio->out[1].used &&
+ audio->out_needed) || audio->wflush);
+
+ if (rc < 0)
+ goto done;
+ else if (audio->wflush) {
+ rc = -EBUSY;
+ goto done;
+ }
+ }
+
+ /* pcm dmamiss message is sent continously
+ * when decoder is starved so no race
+ * condition concern
+ */
+ audio->teos = 0;
+
+ rc = wait_event_interruptible(audio->write_wait,
+ audio->teos || audio->wflush);
+
+ if (audio->wflush)
+ rc = -EBUSY;
+
+done:
+ mutex_unlock(&audio->write_lock);
+ return rc;
+}
+
+int audlpa_fsync(struct file *file, int datasync)
+{
+ struct audio *audio = file->private_data;
+
+ if (!audio->running)
+ return -EINVAL;
+
+ return audlpa_async_fsync(audio);
+}
+
+static void audpcm_reset_pmem_region(struct audio *audio)
+{
+ struct audlpa_pmem_region *region;
+ struct list_head *ptr, *next;
+
+ list_for_each_safe(ptr, next, &audio->pmem_region_queue) {
+ region = list_entry(ptr, struct audlpa_pmem_region, list);
+ list_del(®ion->list);
+ put_pmem_file(region->file);
+ kfree(region);
+ }
+
+ return;
+}
+
+static int audio_release(struct inode *inode, struct file *file)
+{
+ struct audio *audio = file->private_data;
+
+ MM_DBG("audio instance 0x%08x freeing\n", (int)audio);
+ mutex_lock(&audio->lock);
+ audio_disable(audio);
+ if (audio->rmt_resource_released == 0)
+ rmt_put_resource(audio);
+ audpcm_async_flush(audio);
+ audpcm_reset_pmem_region(audio);
+
+ msm_adsp_put(audio->audplay);
+ audpp_adec_free(audio->dec_id);
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ unregister_early_suspend(&audio->suspend_ctl.node);
+#endif
+ audio->opened = 0;
+ audio->event_abort = 1;
+ wake_up(&audio->event_wait);
+ audpcm_reset_event_queue(audio);
+ MM_DBG("pmem area = 0x%8x\n", (unsigned int)audio->data);
+ if (audio->data) {
+ msm_subsystem_unmap_buffer(audio->map_v_write);
+ free_contiguous_memory_by_paddr(audio->phys);
+ }
+ mutex_unlock(&audio->lock);
+#ifdef CONFIG_DEBUG_FS
+ if (audio->dentry)
+ debugfs_remove(audio->dentry);
+#endif
+ kfree(audio);
+ return 0;
+}
+
+static void audpcm_post_event(struct audio *audio, int type,
+ union msm_audio_event_payload payload)
+{
+ struct audpcm_event *e_node = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&audio->event_queue_lock, flags);
+
+ if (!list_empty(&audio->free_event_queue)) {
+ e_node = list_first_entry(&audio->free_event_queue,
+ struct audpcm_event, list);
+ list_del(&e_node->list);
+ } else {
+ e_node = kmalloc(sizeof(struct audpcm_event), GFP_ATOMIC);
+ if (!e_node) {
+ MM_ERR("No mem to post event %d\n", type);
+ spin_unlock_irqrestore(&audio->event_queue_lock, flags);
+ return;
+ }
+ }
+
+ e_node->event_type = type;
+ e_node->payload = payload;
+
+ list_add_tail(&e_node->list, &audio->event_queue);
+ spin_unlock_irqrestore(&audio->event_queue_lock, flags);
+ wake_up(&audio->event_wait);
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void audpcm_suspend(struct early_suspend *h)
+{
+ struct audpcm_suspend_ctl *ctl =
+ container_of(h, struct audpcm_suspend_ctl, node);
+ union msm_audio_event_payload payload;
+
+ MM_DBG("\n"); /* Macro prints the file name and function */
+ audpcm_post_event(ctl->audio, AUDIO_EVENT_SUSPEND, payload);
+}
+
+static void audpcm_resume(struct early_suspend *h)
+{
+ struct audpcm_suspend_ctl *ctl =
+ container_of(h, struct audpcm_suspend_ctl, node);
+ union msm_audio_event_payload payload;
+
+ MM_DBG("\n"); /* Macro prints the file name and function */
+ audpcm_post_event(ctl->audio, AUDIO_EVENT_RESUME, payload);
+}
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t audpcm_debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t audpcm_debug_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ const int debug_bufmax = 4096;
+ static char buffer[4096];
+ int n = 0;
+ struct audio *audio = file->private_data;
+
+ mutex_lock(&audio->lock);
+ n = scnprintf(buffer, debug_bufmax, "opened %d\n", audio->opened);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "enabled %d\n", audio->enabled);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "stopped %d\n", audio->stopped);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "out_buf_sz %d\n", audio->out[0].size);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "volume %lx\n", audio->volume);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "sample rate %d\n", audio->out_sample_rate);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "channel mode %d\n", audio->out_channel_mode);
+ mutex_unlock(&audio->lock);
+ /* Following variables are only useful for debugging when
+ * when playback halts unexpectedly. Thus, no mutual exclusion
+ * enforced
+ */
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "wflush %d\n", audio->wflush);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "running %d\n", audio->running);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "dec state %d\n", audio->dec_state);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "out_needed %d\n", audio->out_needed);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "out_head %d\n", audio->out_head);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "out_tail %d\n", audio->out_tail);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "out[0].used %d\n", audio->out[0].used);
+ n += scnprintf(buffer + n, debug_bufmax - n,
+ "out[1].used %d\n", audio->out[1].used);
+ buffer[n] = 0;
+ return simple_read_from_buffer(buf, count, ppos, buffer, n);
+}
+
+static const struct file_operations audpcm_debug_fops = {
+ .read = audpcm_debug_read,
+ .open = audpcm_debug_open,
+};
+#endif
+
+static int audio_open(struct inode *inode, struct file *file)
+{
+ struct audio *audio = NULL;
+ int rc, i, dec_attrb, decid;
+ struct audpcm_event *e_node = NULL;
+
+#ifdef CONFIG_DEBUG_FS
+ /* 4 bytes represents decoder number, 1 byte for terminate string */
+ char name[sizeof "msm_lpa_" + 5];
+#endif
+
+ /* Allocate audio instance, set to zero */
+ audio = kzalloc(sizeof(struct audio), GFP_KERNEL);
+ if (!audio) {
+ MM_ERR("no memory to allocate audio instance\n");
+ rc = -ENOMEM;
+ goto done;
+ }
+ MM_DBG("audio instance 0x%08x created\n", (int)audio);
+
+ /* Allocate the decoder */
+ dec_attrb = AUDDEC_DEC_PCM;
+ if (file->f_mode & FMODE_READ) {
+ MM_ERR("Non-Tunneled mode not supported\n");
+ rc = -EPERM;
+ kfree(audio);
+ goto done;
+ } else
+ dec_attrb |= MSM_AUD_MODE_TUNNEL;
+
+ decid = audpp_adec_alloc(dec_attrb, &audio->module_name,
+ &audio->queue_id);
+ if (decid < 0) {
+ MM_ERR("No free decoder available\n");
+ rc = -ENODEV;
+ MM_DBG("audio instance 0x%08x freeing\n", (int)audio);
+ kfree(audio);
+ goto done;
+ }
+ audio->dec_id = decid & MSM_AUD_DECODER_MASK;
+
+ audio->buffer_size = BUFSZ;
+ audio->buffer_count = MAX_BUF;
+ rc = audmgr_open(&audio->audmgr);
+ if (rc)
+ goto err;
+
+ rc = msm_adsp_get(audio->module_name, &audio->audplay,
+ &audlpadec_adsp_ops, audio);
+ if (rc) {
+ MM_ERR("failed to get %s module\n", audio->module_name);
+ audmgr_close(&audio->audmgr);
+ goto err;
+ }
+
+ rc = rmt_get_resource(audio);
+ if (rc) {
+ MM_ERR("ADSP resources are not available for PCM session");
+ audmgr_close(&audio->audmgr);
+ msm_adsp_put(audio->audplay);
+ goto err;
+ }
+
+ /* Initialize all locks of audio instance */
+ mutex_init(&audio->lock);
+ mutex_init(&audio->write_lock);
+ mutex_init(&audio->get_event_lock);
+ spin_lock_init(&audio->dsp_lock);
+ init_waitqueue_head(&audio->write_wait);
+ INIT_LIST_HEAD(&audio->out_queue);
+ INIT_LIST_HEAD(&audio->pmem_region_queue);
+ INIT_LIST_HEAD(&audio->free_event_queue);
+ INIT_LIST_HEAD(&audio->event_queue);
+ init_waitqueue_head(&audio->wait);
+ init_waitqueue_head(&audio->event_wait);
+ spin_lock_init(&audio->event_queue_lock);
+
+ audio->out_sample_rate = 44100;
+ audio->out_channel_mode = AUDPP_CMD_PCM_INTF_STEREO_V;
+ audio->out_bits = AUDPP_CMD_WAV_PCM_WIDTH_16;
+ audio->volume = 0x2000;
+ audpcm_async_flush(audio);
+
+ file->private_data = audio;
+ audio->opened = 1;
+
+#ifdef CONFIG_DEBUG_FS
+ snprintf(name, sizeof name, "msm_pcm_lp_dec_%04x", audio->dec_id);
+ audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
+ NULL, (void *) audio, &audpcm_debug_fops);
+
+ if (IS_ERR(audio->dentry))
+ MM_DBG("debugfs_create_file failed\n");
+#endif
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ audio->suspend_ctl.node.level = EARLY_SUSPEND_LEVEL_DISABLE_FB;
+ audio->suspend_ctl.node.resume = audpcm_resume;
+ audio->suspend_ctl.node.suspend = audpcm_suspend;
+ audio->suspend_ctl.audio = audio;
+ register_early_suspend(&audio->suspend_ctl.node);
+#endif
+ for (i = 0; i < AUDPCM_EVENT_NUM; i++) {
+ e_node = kmalloc(sizeof(struct audpcm_event), GFP_KERNEL);
+ if (e_node)
+ list_add_tail(&e_node->list, &audio->free_event_queue);
+ else {
+ MM_ERR("event pkt alloc failed\n");
+ break;
+ }
+ }
+done:
+ return rc;
+err:
+ audpp_adec_free(audio->dec_id);
+ MM_DBG("audio instance 0x%08x freeing\n", (int)audio);
+ kfree(audio);
+ return rc;
+}
+
+static const struct file_operations audio_pcm_lp_fops = {
+ .owner = THIS_MODULE,
+ .open = audio_open,
+ .release = audio_release,
+ .unlocked_ioctl = audio_ioctl,
+ .fsync = audlpa_fsync,
+};
+
+struct miscdevice audio_lpa_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "msm_pcm_lp_dec",
+ .fops = &audio_pcm_lp_fops,
+};
+
+static int __init audio_init(void)
+{
+ return misc_register(&audio_lpa_misc);
+}
+
+device_initcall(audio_init);
diff --git a/arch/arm/mach-msm/qdsp6v2/audio_acdb.c b/arch/arm/mach-msm/qdsp6v2/audio_acdb.c
index 9b03985..e7a81d3 100644
--- a/arch/arm/mach-msm/qdsp6v2/audio_acdb.c
+++ b/arch/arm/mach-msm/qdsp6v2/audio_acdb.c
@@ -630,6 +630,7 @@
{
int result;
unsigned long paddr;
+ void *kvptr;
unsigned long kvaddr;
unsigned long mem_len;
@@ -638,6 +639,7 @@
msm_ion_client_create(UINT_MAX, "audio_acdb_client");
if (IS_ERR_OR_NULL(acdb_data.ion_client)) {
pr_err("%s: Could not register ION client!!!\n", __func__);
+ result = PTR_ERR(acdb_data.ion_client);
goto err;
}
@@ -645,6 +647,7 @@
atomic_read(&acdb_data.map_handle));
if (IS_ERR_OR_NULL(acdb_data.ion_handle)) {
pr_err("%s: Could not import map handle!!!\n", __func__);
+ result = PTR_ERR(acdb_data.ion_handle);
goto err_ion_client;
}
@@ -655,12 +658,14 @@
goto err_ion_handle;
}
- kvaddr = (unsigned long)ion_map_kernel(acdb_data.ion_client,
+ kvptr = ion_map_kernel(acdb_data.ion_client,
acdb_data.ion_handle, 0);
- if (IS_ERR_OR_NULL(&kvaddr)) {
+ if (IS_ERR_OR_NULL(kvptr)) {
pr_err("%s: Could not get kernel virt addr!!!\n", __func__);
+ result = PTR_ERR(kvptr);
goto err_ion_handle;
}
+ kvaddr = (unsigned long)kvptr;
mutex_unlock(&acdb_data.acdb_mutex);
atomic64_set(&acdb_data.paddr, paddr);
diff --git a/arch/arm/mach-msm/qdss-etb.c b/arch/arm/mach-msm/qdss-etb.c
index 39b7670..252352c 100644
--- a/arch/arm/mach-msm/qdss-etb.c
+++ b/arch/arm/mach-msm/qdss-etb.c
@@ -69,9 +69,11 @@
void __iomem *base;
bool enabled;
bool reading;
- struct mutex lock;
+ struct mutex mutex;
atomic_t in_use;
struct device *dev;
+ struct kobject *kobj;
+ uint32_t trigger_cntr;
};
static struct etb_ctx etb;
@@ -89,6 +91,7 @@
etb_writel(etb, 0x0, ETB_RAM_WRITE_POINTER);
etb_writel(etb, 0x0, ETB_RAM_READ_POINTER);
+ etb_writel(etb, etb.trigger_cntr, ETB_TRG);
etb_writel(etb, BIT(13) | BIT(0), ETB_FFCR);
etb_writel(etb, BIT(0), ETB_CTL_REG);
@@ -97,37 +100,48 @@
void etb_enable(void)
{
- mutex_lock(&etb.lock);
+ mutex_lock(&etb.mutex);
__etb_enable();
etb.enabled = true;
- dev_info(etb.dev, "etb enabled\n");
- mutex_unlock(&etb.lock);
+ dev_info(etb.dev, "ETB enabled\n");
+ mutex_unlock(&etb.mutex);
}
static void __etb_disable(void)
{
int count;
+ uint32_t ffcr;
ETB_UNLOCK();
- etb_writel(etb, BIT(12) | BIT(13), ETB_FFCR);
+ ffcr = etb_readl(etb, ETB_FFCR);
+ ffcr |= (BIT(12) | BIT(6));
+ etb_writel(etb, ffcr, ETB_FFCR);
+
+ for (count = TIMEOUT_US; BVAL(etb_readl(etb, ETB_FFCR), 6) != 0
+ && count > 0; count--)
+ udelay(1);
+ WARN(count == 0, "timeout while flushing ETB, ETB_FFCR: %#x\n",
+ etb_readl(etb, ETB_FFCR));
+
etb_writel(etb, 0x0, ETB_CTL_REG);
for (count = TIMEOUT_US; BVAL(etb_readl(etb, ETB_FFSR), 1) != 1
&& count > 0; count--)
udelay(1);
- WARN(count == 0, "timeout while disabling etb\n");
+ WARN(count == 0, "timeout while disabling ETB, ETB_FFSR: %#x\n",
+ etb_readl(etb, ETB_FFSR));
ETB_LOCK();
}
void etb_disable(void)
{
- mutex_lock(&etb.lock);
+ mutex_lock(&etb.mutex);
__etb_disable();
etb.enabled = false;
- dev_info(etb.dev, "etb disabled\n");
- mutex_unlock(&etb.lock);
+ dev_info(etb.dev, "ETB disabled\n");
+ mutex_unlock(&etb.mutex);
}
static void __etb_dump(void)
@@ -186,15 +200,15 @@
void etb_dump(void)
{
- mutex_lock(&etb.lock);
+ mutex_lock(&etb.mutex);
if (etb.enabled) {
__etb_disable();
__etb_dump();
__etb_enable();
- dev_info(etb.dev, "etb dumped\n");
+ dev_info(etb.dev, "ETB dumped\n");
}
- mutex_unlock(&etb.lock);
+ mutex_unlock(&etb.mutex);
}
static int etb_open(struct inode *inode, struct file *file)
@@ -254,6 +268,62 @@
.fops = &etb_fops,
};
+#define ETB_ATTR(__name) \
+static struct kobj_attribute __name##_attr = \
+ __ATTR(__name, S_IRUGO | S_IWUSR, __name##_show, __name##_store)
+
+static ssize_t trigger_cntr_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ etb.trigger_cntr = val;
+ return n;
+}
+static ssize_t trigger_cntr_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long val = etb.trigger_cntr;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+ETB_ATTR(trigger_cntr);
+
+static int __init etb_sysfs_init(void)
+{
+ int ret;
+
+ etb.kobj = kobject_create_and_add("etb", qdss_get_modulekobj());
+ if (!etb.kobj) {
+ dev_err(etb.dev, "failed to create ETB sysfs kobject\n");
+ ret = -ENOMEM;
+ goto err_create;
+ }
+
+ ret = sysfs_create_file(etb.kobj, &trigger_cntr_attr.attr);
+ if (ret) {
+ dev_err(etb.dev, "failed to create ETB sysfs trigger_cntr"
+ " attribute\n");
+ goto err_file;
+ }
+
+ return 0;
+err_file:
+ kobject_put(etb.kobj);
+err_create:
+ return ret;
+}
+
+static void etb_sysfs_exit(void)
+{
+ sysfs_remove_file(etb.kobj, &trigger_cntr_attr.attr);
+ kobject_put(etb.kobj);
+}
+
static int __devinit etb_probe(struct platform_device *pdev)
{
int ret;
@@ -273,6 +343,8 @@
etb.dev = &pdev->dev;
+ mutex_init(&etb.mutex);
+
ret = misc_register(&etb_misc);
if (ret)
goto err_misc;
@@ -283,16 +355,19 @@
goto err_alloc;
}
- mutex_init(&etb.lock);
+ etb_sysfs_init();
+ dev_info(etb.dev, "ETB initialized\n");
return 0;
err_alloc:
misc_deregister(&etb_misc);
err_misc:
+ mutex_destroy(&etb.mutex);
iounmap(etb.base);
err_ioremap:
err_res:
+ dev_err(etb.dev, "ETB init failed\n");
return ret;
}
@@ -300,9 +375,10 @@
{
if (etb.enabled)
etb_disable();
- mutex_destroy(&etb.lock);
+ etb_sysfs_exit();
kfree(etb.buf);
misc_deregister(&etb_misc);
+ mutex_destroy(&etb.mutex);
iounmap(etb.base);
return 0;
diff --git a/arch/arm/mach-msm/qdss-etm.c b/arch/arm/mach-msm/qdss-etm.c
index c0dc58e..4bc3f495 100644
--- a/arch/arm/mach-msm/qdss-etm.c
+++ b/arch/arm/mach-msm/qdss-etm.c
@@ -19,21 +19,22 @@
#include <linux/io.h>
#include <linux/err.h>
#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/smp.h>
#include <linux/wakelock.h>
#include <linux/pm_qos_params.h>
-#include <asm/atomic.h>
+#include <linux/sysfs.h>
+#include <linux/stat.h>
+#include <asm/sections.h>
+#include <mach/socinfo.h>
#include "qdss.h"
-#define ptm_writel(ptm, cpu, val, off) \
- __raw_writel((val), ptm.base + (SZ_4K * cpu) + off)
-#define ptm_readl(ptm, cpu, off) \
- __raw_readl(ptm.base + (SZ_4K * cpu) + off)
+#define etm_writel(etm, cpu, val, off) \
+ __raw_writel((val), etm.base + (SZ_4K * cpu) + off)
+#define etm_readl(etm, cpu, off) \
+ __raw_readl(etm.base + (SZ_4K * cpu) + off)
/*
* Device registers:
@@ -99,92 +100,120 @@
#define ETMPDCR (0x310)
#define ETMPDSR (0x314)
-#define PTM_LOCK(cpu) \
+#define ETM_MAX_ADDR_CMP (16)
+#define ETM_MAX_CNTR (4)
+#define ETM_MAX_CTXID_CMP (3)
+
+#define ETM_MODE_EXCLUDE BIT(0)
+#define ETM_MODE_CYCACC BIT(1)
+#define ETM_MODE_STALL BIT(2)
+#define ETM_MODE_TIMESTAMP BIT(3)
+#define ETM_MODE_CTXID BIT(4)
+#define ETM_MODE_ALL (0x1F)
+
+#define ETM_EVENT_MASK (0x1FFFF)
+#define ETM_SYNC_MASK (0xFFF)
+#define ETM_ALL_MASK (0xFFFFFFFF)
+
+#define ETM_SEQ_STATE_MAX_VAL (0x2)
+
+enum {
+ ETM_ADDR_TYPE_NONE,
+ ETM_ADDR_TYPE_SINGLE,
+ ETM_ADDR_TYPE_RANGE,
+ ETM_ADDR_TYPE_START,
+ ETM_ADDR_TYPE_STOP,
+};
+
+#define ETM_LOCK(cpu) \
do { \
mb(); \
- ptm_writel(ptm, cpu, 0x0, CS_LAR); \
+ etm_writel(etm, cpu, 0x0, CS_LAR); \
} while (0)
-#define PTM_UNLOCK(cpu) \
+#define ETM_UNLOCK(cpu) \
do { \
- ptm_writel(ptm, cpu, CS_UNLOCK_MAGIC, CS_LAR); \
+ etm_writel(etm, cpu, CS_UNLOCK_MAGIC, CS_LAR); \
mb(); \
} while (0)
-/* Forward declarations */
-static void ptm_cfg_rw_init(void);
+#ifdef MODULE_PARAM_PREFIX
+#undef MODULE_PARAM_PREFIX
+#endif
+#define MODULE_PARAM_PREFIX "qdss."
#ifdef CONFIG_MSM_QDSS_ETM_DEFAULT_ENABLE
-static int trace_on_boot = 1;
+static int etm_boot_enable = 1;
#else
-static int trace_on_boot;
+static int etm_boot_enable;
#endif
module_param_named(
- trace_on_boot, trace_on_boot, int, S_IRUGO
+ etm_boot_enable, etm_boot_enable, int, S_IRUGO
);
-struct ptm_config {
- /* read only config registers */
- uint32_t config_code;
- /* derived values */
- uint8_t nr_addr_comp;
- uint8_t nr_cntr;
- uint8_t nr_ext_input;
- uint8_t nr_ext_output;
- uint8_t nr_context_id_comp;
-
- uint32_t config_code_extn;
- /* derived values */
- uint8_t nr_extnd_ext_input_sel;
- uint8_t nr_instr_resources;
-
- uint32_t system_config;
- /* derived values */
- uint8_t fifofull_supported;
- uint8_t nr_procs_supported;
-
- /* read-write registers */
- uint32_t main_control;
- uint32_t trigger_event;
- uint32_t te_start_stop_control;
- uint32_t te_event;
- uint32_t te_control;
- uint32_t fifofull_level;
- uint32_t addr_comp_value[16];
- uint32_t addr_comp_access_type[16];
- uint32_t cntr_reload_value[4];
- uint32_t cntr_enable_event[4];
- uint32_t cntr_reload_event[4];
- uint32_t cntr_value[4];
- uint32_t seq_state_12_event;
- uint32_t seq_state_21_event;
- uint32_t seq_state_23_event;
- uint32_t seq_state_32_event;
- uint32_t seq_state_13_event;
- uint32_t seq_state_31_event;
- uint32_t current_seq_state;
- uint32_t ext_output_event[4];
- uint32_t context_id_comp_value[3];
- uint32_t context_id_comp_mask;
- uint32_t sync_freq;
- uint32_t extnd_ext_input_sel;
- uint32_t ts_event;
- uint32_t aux_control;
- uint32_t coresight_trace_id;
- uint32_t vmid_comp_value;
-};
-
-struct ptm_ctx {
- struct ptm_config cfg;
+struct etm_ctx {
void __iomem *base;
- bool trace_enabled;
+ bool enabled;
struct wake_lock wake_lock;
struct pm_qos_request_list qos_req;
- atomic_t in_use;
+ struct mutex mutex;
struct device *dev;
+ struct kobject *kobj;
+ uint8_t arch;
+ uint8_t nr_addr_cmp;
+ uint8_t nr_cntr;
+ uint8_t nr_ext_inp;
+ uint8_t nr_ext_out;
+ uint8_t nr_ctxid_cmp;
+ uint8_t reset;
+ uint32_t mode;
+ uint32_t ctrl;
+ uint32_t trigger_event;
+ uint32_t startstop_ctrl;
+ uint32_t enable_event;
+ uint32_t enable_ctrl1;
+ uint32_t fifofull_level;
+ uint8_t addr_idx;
+ uint32_t addr_val[ETM_MAX_ADDR_CMP];
+ uint32_t addr_acctype[ETM_MAX_ADDR_CMP];
+ uint32_t addr_type[ETM_MAX_ADDR_CMP];
+ uint8_t cntr_idx;
+ uint32_t cntr_rld_val[ETM_MAX_CNTR];
+ uint32_t cntr_event[ETM_MAX_CNTR];
+ uint32_t cntr_rld_event[ETM_MAX_CNTR];
+ uint32_t cntr_val[ETM_MAX_CNTR];
+ uint32_t seq_12_event;
+ uint32_t seq_21_event;
+ uint32_t seq_23_event;
+ uint32_t seq_31_event;
+ uint32_t seq_32_event;
+ uint32_t seq_13_event;
+ uint32_t seq_curr_state;
+ uint8_t ctxid_idx;
+ uint32_t ctxid_val[ETM_MAX_CTXID_CMP];
+ uint32_t ctxid_mask;
+ uint32_t sync_freq;
+ uint32_t timestamp_event;
};
-static struct ptm_ctx ptm;
+static struct etm_ctx etm = {
+ .trigger_event = 0x406F,
+ .enable_event = 0x6F,
+ .enable_ctrl1 = 0x1,
+ .fifofull_level = 0x28,
+ .addr_val = {(uint32_t) _stext, (uint32_t) _etext},
+ .addr_type = {ETM_ADDR_TYPE_RANGE, ETM_ADDR_TYPE_RANGE},
+ .cntr_event = {[0 ... (ETM_MAX_CNTR - 1)] = 0x406F},
+ .cntr_rld_event = {[0 ... (ETM_MAX_CNTR - 1)] = 0x406F},
+ .seq_12_event = 0x406F,
+ .seq_21_event = 0x406F,
+ .seq_23_event = 0x406F,
+ .seq_31_event = 0x406F,
+ .seq_32_event = 0x406F,
+ .seq_13_event = 0x406F,
+ .sync_freq = 0x80,
+ .timestamp_event = 0x406F,
+};
/* ETM clock is derived from the processor clock and gets enabled on a
@@ -202,117 +231,119 @@
* clock vote in the driver and the save-restore code uses 1. above
* for its vote
*/
-static void ptm_set_powerdown(int cpu)
+static void etm_set_pwrdwn(int cpu)
{
uint32_t etmcr;
- etmcr = ptm_readl(ptm, cpu, ETMCR);
+ etmcr = etm_readl(etm, cpu, ETMCR);
etmcr |= BIT(0);
- ptm_writel(ptm, cpu, etmcr, ETMCR);
+ etm_writel(etm, cpu, etmcr, ETMCR);
}
-static void ptm_clear_powerdown(int cpu)
+static void etm_clr_pwrdwn(int cpu)
{
uint32_t etmcr;
- etmcr = ptm_readl(ptm, cpu, ETMCR);
+ etmcr = etm_readl(etm, cpu, ETMCR);
etmcr &= ~BIT(0);
- ptm_writel(ptm, cpu, etmcr, ETMCR);
+ etm_writel(etm, cpu, etmcr, ETMCR);
}
-static void ptm_set_prog(int cpu)
+static void etm_set_prog(int cpu)
{
uint32_t etmcr;
int count;
- etmcr = ptm_readl(ptm, cpu, ETMCR);
+ etmcr = etm_readl(etm, cpu, ETMCR);
etmcr |= BIT(10);
- ptm_writel(ptm, cpu, etmcr, ETMCR);
+ etm_writel(etm, cpu, etmcr, ETMCR);
- for (count = TIMEOUT_US; BVAL(ptm_readl(ptm, cpu, ETMSR), 1) != 1
+ for (count = TIMEOUT_US; BVAL(etm_readl(etm, cpu, ETMSR), 1) != 1
&& count > 0; count--)
udelay(1);
- WARN(count == 0, "timeout while setting prog bit\n");
+ WARN(count == 0, "timeout while setting prog bit, ETMSR: %#x\n",
+ etm_readl(etm, cpu, ETMSR));
}
-static void ptm_clear_prog(int cpu)
+static void etm_clr_prog(int cpu)
{
uint32_t etmcr;
int count;
- etmcr = ptm_readl(ptm, cpu, ETMCR);
+ etmcr = etm_readl(etm, cpu, ETMCR);
etmcr &= ~BIT(10);
- ptm_writel(ptm, cpu, etmcr, ETMCR);
+ etm_writel(etm, cpu, etmcr, ETMCR);
- for (count = TIMEOUT_US; BVAL(ptm_readl(ptm, cpu, ETMSR), 1) != 0
+ for (count = TIMEOUT_US; BVAL(etm_readl(etm, cpu, ETMSR), 1) != 0
&& count > 0; count--)
udelay(1);
- WARN(count == 0, "timeout while clearing prog bit\n");
+ WARN(count == 0, "timeout while clearing prog bit, ETMSR: %#x\n",
+ etm_readl(etm, cpu, ETMSR));
}
-static void __ptm_trace_enable(int cpu)
+static void __etm_enable(int cpu)
{
int i;
- PTM_UNLOCK(cpu);
+ ETM_UNLOCK(cpu);
/* Vote for ETM power/clock enable */
- ptm_clear_powerdown(cpu);
- ptm_set_prog(cpu);
+ etm_clr_pwrdwn(cpu);
+ etm_set_prog(cpu);
- ptm_writel(ptm, cpu, ptm.cfg.main_control | BIT(10), ETMCR);
- ptm_writel(ptm, cpu, ptm.cfg.trigger_event, ETMTRIGGER);
- ptm_writel(ptm, cpu, ptm.cfg.te_start_stop_control, ETMTSSCR);
- ptm_writel(ptm, cpu, ptm.cfg.te_event, ETMTEEVR);
- ptm_writel(ptm, cpu, ptm.cfg.te_control, ETMTECR1);
- ptm_writel(ptm, cpu, ptm.cfg.fifofull_level, ETMFFLR);
- for (i = 0; i < ptm.cfg.nr_addr_comp; i++) {
- ptm_writel(ptm, cpu, ptm.cfg.addr_comp_value[i], ETMACVRn(i));
- ptm_writel(ptm, cpu, ptm.cfg.addr_comp_access_type[i],
- ETMACTRn(i));
+ etm_writel(etm, cpu, etm.ctrl | BIT(10), ETMCR);
+ etm_writel(etm, cpu, etm.trigger_event, ETMTRIGGER);
+ etm_writel(etm, cpu, etm.startstop_ctrl, ETMTSSCR);
+ etm_writel(etm, cpu, etm.enable_event, ETMTEEVR);
+ etm_writel(etm, cpu, etm.enable_ctrl1, ETMTECR1);
+ etm_writel(etm, cpu, etm.fifofull_level, ETMFFLR);
+ for (i = 0; i < etm.nr_addr_cmp; i++) {
+ etm_writel(etm, cpu, etm.addr_val[i], ETMACVRn(i));
+ etm_writel(etm, cpu, etm.addr_acctype[i], ETMACTRn(i));
}
- for (i = 0; i < ptm.cfg.nr_cntr; i++) {
- ptm_writel(ptm, cpu, ptm.cfg.cntr_reload_value[i],
- ETMCNTRLDVRn(i));
- ptm_writel(ptm, cpu, ptm.cfg.cntr_enable_event[i],
- ETMCNTENRn(i));
- ptm_writel(ptm, cpu, ptm.cfg.cntr_reload_event[i],
- ETMCNTRLDEVRn(i));
- ptm_writel(ptm, cpu, ptm.cfg.cntr_value[i], ETMCNTVRn(i));
+ for (i = 0; i < etm.nr_cntr; i++) {
+ etm_writel(etm, cpu, etm.cntr_rld_val[i], ETMCNTRLDVRn(i));
+ etm_writel(etm, cpu, etm.cntr_event[i], ETMCNTENRn(i));
+ etm_writel(etm, cpu, etm.cntr_rld_event[i], ETMCNTRLDEVRn(i));
+ etm_writel(etm, cpu, etm.cntr_val[i], ETMCNTVRn(i));
}
- ptm_writel(ptm, cpu, ptm.cfg.seq_state_12_event, ETMSQ12EVR);
- ptm_writel(ptm, cpu, ptm.cfg.seq_state_21_event, ETMSQ21EVR);
- ptm_writel(ptm, cpu, ptm.cfg.seq_state_23_event, ETMSQ23EVR);
- ptm_writel(ptm, cpu, ptm.cfg.seq_state_32_event, ETMSQ32EVR);
- ptm_writel(ptm, cpu, ptm.cfg.seq_state_13_event, ETMSQ13EVR);
- ptm_writel(ptm, cpu, ptm.cfg.seq_state_31_event, ETMSQ31EVR);
- ptm_writel(ptm, cpu, ptm.cfg.current_seq_state, ETMSQR);
- for (i = 0; i < ptm.cfg.nr_ext_output; i++)
- ptm_writel(ptm, cpu, ptm.cfg.ext_output_event[i],
- ETMEXTOUTEVRn(i));
- for (i = 0; i < ptm.cfg.nr_context_id_comp; i++)
- ptm_writel(ptm, cpu, ptm.cfg.context_id_comp_value[i],
- ETMCIDCVRn(i));
- ptm_writel(ptm, cpu, ptm.cfg.context_id_comp_mask, ETMCIDCMR);
- ptm_writel(ptm, cpu, ptm.cfg.sync_freq, ETMSYNCFR);
- ptm_writel(ptm, cpu, ptm.cfg.extnd_ext_input_sel, ETMEXTINSELR);
- ptm_writel(ptm, cpu, ptm.cfg.ts_event, ETMTSEVR);
- ptm_writel(ptm, cpu, ptm.cfg.aux_control, ETMAUXCR);
- ptm_writel(ptm, cpu, cpu+1, ETMTRACEIDR);
- ptm_writel(ptm, cpu, ptm.cfg.vmid_comp_value, ETMVMIDCVR);
+ etm_writel(etm, cpu, etm.seq_12_event, ETMSQ12EVR);
+ etm_writel(etm, cpu, etm.seq_21_event, ETMSQ21EVR);
+ etm_writel(etm, cpu, etm.seq_23_event, ETMSQ23EVR);
+ etm_writel(etm, cpu, etm.seq_31_event, ETMSQ31EVR);
+ etm_writel(etm, cpu, etm.seq_32_event, ETMSQ32EVR);
+ etm_writel(etm, cpu, etm.seq_13_event, ETMSQ13EVR);
+ etm_writel(etm, cpu, etm.seq_curr_state, ETMSQR);
+ for (i = 0; i < etm.nr_ext_out; i++)
+ etm_writel(etm, cpu, 0x0000406F, ETMEXTOUTEVRn(i));
+ for (i = 0; i < etm.nr_ctxid_cmp; i++)
+ etm_writel(etm, cpu, etm.ctxid_val[i], ETMCIDCVRn(i));
+ etm_writel(etm, cpu, etm.ctxid_mask, ETMCIDCMR);
+ etm_writel(etm, cpu, etm.sync_freq, ETMSYNCFR);
+ etm_writel(etm, cpu, 0x00000000, ETMEXTINSELR);
+ etm_writel(etm, cpu, etm.timestamp_event, ETMTSEVR);
+ etm_writel(etm, cpu, 0x00000000, ETMAUXCR);
+ etm_writel(etm, cpu, cpu+1, ETMTRACEIDR);
+ etm_writel(etm, cpu, 0x00000000, ETMVMIDCVR);
- ptm_clear_prog(cpu);
- PTM_LOCK(cpu);
+ etm_clr_prog(cpu);
+ ETM_LOCK(cpu);
}
-static int ptm_trace_enable(void)
+static int etm_enable(void)
{
int ret, cpu;
+ if (etm.enabled) {
+ dev_err(etm.dev, "ETM tracing already enabled\n");
+ ret = -EPERM;
+ goto err;
+ }
+
ret = qdss_clk_enable();
if (ret)
- return ret;
+ goto err;
- wake_lock(&ptm.wake_lock);
+ wake_lock(&etm.wake_lock);
/* 1. causes all online cpus to come out of idle PC
* 2. prevents idle PC until save restore flag is enabled atomically
*
@@ -320,7 +351,7 @@
* operation and to ensure cores where trace is expected to be turned
* on are already hotplugged on
*/
- pm_qos_update_request(&ptm.qos_req, 0);
+ pm_qos_update_request(&etm.qos_req, 0);
etb_disable();
tpiu_disable();
@@ -328,34 +359,43 @@
etb_enable();
funnel_enable(0x0, 0x3);
for_each_online_cpu(cpu)
- __ptm_trace_enable(cpu);
+ __etm_enable(cpu);
- ptm.trace_enabled = true;
+ etm.enabled = true;
- pm_qos_update_request(&ptm.qos_req, PM_QOS_DEFAULT_VALUE);
- wake_unlock(&ptm.wake_lock);
+ pm_qos_update_request(&etm.qos_req, PM_QOS_DEFAULT_VALUE);
+ wake_unlock(&etm.wake_lock);
+ dev_info(etm.dev, "ETM tracing enabled\n");
return 0;
+err:
+ return ret;
}
-static void __ptm_trace_disable(int cpu)
+static void __etm_disable(int cpu)
{
- PTM_UNLOCK(cpu);
- ptm_set_prog(cpu);
+ ETM_UNLOCK(cpu);
+ etm_set_prog(cpu);
/* program trace enable to low by using always false event */
- ptm_writel(ptm, cpu, 0x6F | BIT(14), ETMTEEVR);
+ etm_writel(etm, cpu, 0x6F | BIT(14), ETMTEEVR);
/* Vote for ETM power/clock disable */
- ptm_set_powerdown(cpu);
- PTM_LOCK(cpu);
+ etm_set_pwrdwn(cpu);
+ ETM_LOCK(cpu);
}
-static void ptm_trace_disable(void)
+static int etm_disable(void)
{
- int cpu;
+ int ret, cpu;
- wake_lock(&ptm.wake_lock);
+ if (!etm.enabled) {
+ dev_err(etm.dev, "ETM tracing already disabled\n");
+ ret = -EPERM;
+ goto err;
+ }
+
+ wake_lock(&etm.wake_lock);
/* 1. causes all online cpus to come out of idle PC
* 2. prevents idle PC until save restore flag is disabled atomically
*
@@ -363,219 +403,29 @@
* operation and to ensure cores where trace is expected to be turned
* off are already hotplugged on
*/
- pm_qos_update_request(&ptm.qos_req, 0);
+ pm_qos_update_request(&etm.qos_req, 0);
for_each_online_cpu(cpu)
- __ptm_trace_disable(cpu);
+ __etm_disable(cpu);
etb_dump();
etb_disable();
funnel_disable(0x0, 0x3);
- ptm.trace_enabled = false;
+ etm.enabled = false;
- pm_qos_update_request(&ptm.qos_req, PM_QOS_DEFAULT_VALUE);
- wake_unlock(&ptm.wake_lock);
+ pm_qos_update_request(&etm.qos_req, PM_QOS_DEFAULT_VALUE);
+ wake_unlock(&etm.wake_lock);
qdss_clk_disable();
-}
-static int ptm_open(struct inode *inode, struct file *file)
-{
- if (atomic_cmpxchg(&ptm.in_use, 0, 1))
- return -EBUSY;
-
- dev_dbg(ptm.dev, "%s: successfully opened\n", __func__);
+ dev_info(etm.dev, "ETM tracing disabled\n");
return 0;
-}
-
-static void ptm_range_filter(char range, uint32_t reg1,
- uint32_t addr1, uint32_t reg2, uint32_t addr2)
-{
- ptm.cfg.addr_comp_value[reg1] = addr1;
- ptm.cfg.addr_comp_value[reg2] = addr2;
-
- ptm.cfg.te_control |= (1 << (reg1/2));
- if (range == 'i')
- ptm.cfg.te_control &= ~BIT(24);
- else if (range == 'e')
- ptm.cfg.te_control |= BIT(24);
-}
-
-static void ptm_start_stop_filter(char start_stop,
- uint32_t reg, uint32_t addr)
-{
- ptm.cfg.addr_comp_value[reg] = addr;
-
- if (start_stop == 's')
- ptm.cfg.te_start_stop_control |= (1 << reg);
- else if (start_stop == 't')
- ptm.cfg.te_start_stop_control |= (1 << (reg + 16));
-
- ptm.cfg.te_control |= BIT(25);
-}
-
-#define MAX_COMMAND_STRLEN 40
-static ssize_t ptm_write(struct file *file, const char __user *data,
- size_t len, loff_t *ppos)
-{
- char command[MAX_COMMAND_STRLEN];
- int str_len;
- unsigned long reg1, reg2;
- unsigned long addr1, addr2;
-
- str_len = strnlen_user(data, MAX_COMMAND_STRLEN);
- dev_dbg(ptm.dev, "string length: %d", str_len);
- if (str_len == 0 || str_len == (MAX_COMMAND_STRLEN+1)) {
- dev_err(ptm.dev, "error in str_len: %d", str_len);
- return -EFAULT;
- }
- /* includes the null character */
- if (copy_from_user(command, data, str_len)) {
- dev_err(ptm.dev, "error in copy_from_user: %d", str_len);
- return -EFAULT;
- }
-
- dev_dbg(ptm.dev, "input = %s", command);
-
- switch (command[0]) {
- case '0':
- if (ptm.trace_enabled) {
- ptm_trace_disable();
- dev_info(ptm.dev, "tracing disabled\n");
- } else
- dev_err(ptm.dev, "trace already disabled\n");
-
- break;
- case '1':
- if (!ptm.trace_enabled) {
- if (!ptm_trace_enable())
- dev_info(ptm.dev, "tracing enabled\n");
- else
- dev_err(ptm.dev, "error enabling trace\n");
- } else
- dev_err(ptm.dev, "trace already enabled\n");
- break;
- case 'f':
- switch (command[2]) {
- case 'i':
- switch (command[4]) {
- case 'i':
- if (sscanf(&command[6], "%lx:%lx:%lx:%lx\\0",
- ®1, &addr1, ®2, &addr2) != 4)
- goto err_out;
- if (reg1 > 7 || reg2 > 7 || (reg1 % 2))
- goto err_out;
- ptm_range_filter('i',
- reg1, addr1, reg2, addr2);
- break;
- case 'e':
- if (sscanf(&command[6], "%lx:%lx:%lx:%lx\\0",
- ®1, &addr1, ®2, &addr2) != 4)
- goto err_out;
- if (reg1 > 7 || reg2 > 7 || (reg1 % 2)
- || command[2] == 'd')
- goto err_out;
- ptm_range_filter('e',
- reg1, addr1, reg2, addr2);
- break;
- case 's':
- if (sscanf(&command[6], "%lx:%lx\\0",
- ®1, &addr1) != 2)
- goto err_out;
- if (reg1 > 7)
- goto err_out;
- ptm_start_stop_filter('s', reg1, addr1);
- break;
- case 't':
- if (sscanf(&command[6], "%lx:%lx\\0",
- ®1, &addr1) != 2)
- goto err_out;
- if (reg1 > 7)
- goto err_out;
- ptm_start_stop_filter('t', reg1, addr1);
- break;
- default:
- goto err_out;
- }
- break;
- case 'r':
- ptm_cfg_rw_init();
- break;
- default:
- goto err_out;
- }
- break;
- default:
- goto err_out;
- }
-
- return len;
-
-err_out:
- return -EFAULT;
-}
-
-static int ptm_release(struct inode *inode, struct file *file)
-{
- atomic_set(&ptm.in_use, 0);
- dev_dbg(ptm.dev, "%s: released\n", __func__);
- return 0;
-}
-
-static const struct file_operations ptm_fops = {
- .owner = THIS_MODULE,
- .open = ptm_open,
- .write = ptm_write,
- .release = ptm_release,
-};
-
-static struct miscdevice ptm_misc = {
- .name = "msm_ptm",
- .minor = MISC_DYNAMIC_MINOR,
- .fops = &ptm_fops,
-};
-
-static void ptm_cfg_rw_init(void)
-{
- int i;
-
- ptm.cfg.main_control = 0x00001000;
- ptm.cfg.trigger_event = 0x0000406F;
- ptm.cfg.te_start_stop_control = 0x00000000;
- ptm.cfg.te_event = 0x0000006F;
- ptm.cfg.te_control = 0x01000000;
- ptm.cfg.fifofull_level = 0x00000028;
- for (i = 0; i < ptm.cfg.nr_addr_comp; i++) {
- ptm.cfg.addr_comp_value[i] = 0x00000000;
- ptm.cfg.addr_comp_access_type[i] = 0x00000000;
- }
- for (i = 0; i < ptm.cfg.nr_cntr; i++) {
- ptm.cfg.cntr_reload_value[i] = 0x00000000;
- ptm.cfg.cntr_enable_event[i] = 0x0000406F;
- ptm.cfg.cntr_reload_event[i] = 0x0000406F;
- ptm.cfg.cntr_value[i] = 0x00000000;
- }
- ptm.cfg.seq_state_12_event = 0x0000406F;
- ptm.cfg.seq_state_21_event = 0x0000406F;
- ptm.cfg.seq_state_23_event = 0x0000406F;
- ptm.cfg.seq_state_32_event = 0x0000406F;
- ptm.cfg.seq_state_13_event = 0x0000406F;
- ptm.cfg.seq_state_31_event = 0x0000406F;
- ptm.cfg.current_seq_state = 0x00000000;
- for (i = 0; i < ptm.cfg.nr_ext_output; i++)
- ptm.cfg.ext_output_event[i] = 0x0000406F;
- for (i = 0; i < ptm.cfg.nr_context_id_comp; i++)
- ptm.cfg.context_id_comp_value[i] = 0x00000000;
- ptm.cfg.context_id_comp_mask = 0x00000000;
- ptm.cfg.sync_freq = 0x00000080;
- ptm.cfg.extnd_ext_input_sel = 0x00000000;
- ptm.cfg.ts_event = 0x0000406F;
- ptm.cfg.aux_control = 0x00000000;
- ptm.cfg.vmid_comp_value = 0x00000000;
+err:
+ return ret;
}
/* Memory mapped writes to clear os lock not supported */
-static void ptm_os_unlock(void *unused)
+static void etm_os_unlock(void *unused)
{
unsigned long value = 0x0;
@@ -583,42 +433,800 @@
asm("isb\n\t");
}
-static void ptm_cfg_ro_init(void)
-{
- /* use cpu 0 for setup */
- int cpu = 0;
-
- /* Unlock OS lock first to allow memory mapped reads and writes */
- ptm_os_unlock(NULL);
- smp_call_function(ptm_os_unlock, NULL, 1);
- PTM_UNLOCK(cpu);
- /* Vote for ETM power/clock enable */
- ptm_clear_powerdown(cpu);
- ptm_set_prog(cpu);
-
- /* find all capabilities */
- ptm.cfg.config_code = ptm_readl(ptm, cpu, ETMCCR);
- ptm.cfg.nr_addr_comp = BMVAL(ptm.cfg.config_code, 0, 3) * 2;
- ptm.cfg.nr_cntr = BMVAL(ptm.cfg.config_code, 13, 15);
- ptm.cfg.nr_ext_input = BMVAL(ptm.cfg.config_code, 17, 19);
- ptm.cfg.nr_ext_output = BMVAL(ptm.cfg.config_code, 20, 22);
- ptm.cfg.nr_context_id_comp = BMVAL(ptm.cfg.config_code, 24, 25);
-
- ptm.cfg.config_code_extn = ptm_readl(ptm, cpu, ETMCCER);
- ptm.cfg.nr_extnd_ext_input_sel =
- BMVAL(ptm.cfg.config_code_extn, 0, 2);
- ptm.cfg.nr_instr_resources = BMVAL(ptm.cfg.config_code_extn, 13, 15);
-
- ptm.cfg.system_config = ptm_readl(ptm, cpu, ETMSCR);
- ptm.cfg.fifofull_supported = BVAL(ptm.cfg.system_config, 8);
- ptm.cfg.nr_procs_supported = BMVAL(ptm.cfg.system_config, 12, 14);
-
- /* Vote for ETM power/clock disable */
- ptm_set_powerdown(cpu);
- PTM_LOCK(cpu);
+#define ETM_STORE(__name, mask) \
+static ssize_t __name##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ const char *buf, size_t n) \
+{ \
+ unsigned long val; \
+ \
+ if (sscanf(buf, "%lx", &val) != 1) \
+ return -EINVAL; \
+ \
+ etm.__name = val & mask; \
+ return n; \
}
-static int __devinit ptm_probe(struct platform_device *pdev)
+#define ETM_SHOW(__name) \
+static ssize_t __name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ char *buf) \
+{ \
+ unsigned long val = etm.__name; \
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); \
+}
+
+#define ETM_ATTR(__name) \
+static struct kobj_attribute __name##_attr = \
+ __ATTR(__name, S_IRUGO | S_IWUSR, __name##_show, __name##_store)
+#define ETM_ATTR_RO(__name) \
+static struct kobj_attribute __name##_attr = \
+ __ATTR(__name, S_IRUGO, __name##_show, NULL)
+
+static ssize_t enabled_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int ret = 0;
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&etm.mutex);
+ if (val)
+ ret = etm_enable();
+ else
+ ret = etm_disable();
+ mutex_unlock(&etm.mutex);
+
+ if (ret)
+ return ret;
+ return n;
+}
+ETM_SHOW(enabled);
+ETM_ATTR(enabled);
+
+ETM_SHOW(nr_addr_cmp);
+ETM_ATTR_RO(nr_addr_cmp);
+ETM_SHOW(nr_cntr);
+ETM_ATTR_RO(nr_cntr);
+ETM_SHOW(nr_ctxid_cmp);
+ETM_ATTR_RO(nr_ctxid_cmp);
+
+/* Reset to trace everything i.e. exclude nothing. */
+static ssize_t reset_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ int i;
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&etm.mutex);
+ if (val) {
+ etm.mode = ETM_MODE_EXCLUDE;
+ etm.ctrl = 0x0;
+ if (cpu_is_krait_v1()) {
+ etm.mode |= ETM_MODE_CYCACC;
+ etm.ctrl |= BIT(12);
+ }
+ etm.trigger_event = 0x406F;
+ etm.startstop_ctrl = 0x0;
+ etm.enable_event = 0x6F;
+ etm.enable_ctrl1 = 0x1000000;
+ etm.fifofull_level = 0x28;
+ etm.addr_idx = 0x0;
+ for (i = 0; i < etm.nr_addr_cmp; i++) {
+ etm.addr_val[i] = 0x0;
+ etm.addr_acctype[i] = 0x0;
+ etm.addr_type[i] = ETM_ADDR_TYPE_NONE;
+ }
+ etm.cntr_idx = 0x0;
+ for (i = 0; i < etm.nr_cntr; i++) {
+ etm.cntr_rld_val[i] = 0x0;
+ etm.cntr_event[i] = 0x406F;
+ etm.cntr_rld_event[i] = 0x406F;
+ etm.cntr_val[i] = 0x0;
+ }
+ etm.seq_12_event = 0x406F;
+ etm.seq_21_event = 0x406F;
+ etm.seq_23_event = 0x406F;
+ etm.seq_31_event = 0x406F;
+ etm.seq_32_event = 0x406F;
+ etm.seq_13_event = 0x406F;
+ etm.seq_curr_state = 0x0;
+ etm.ctxid_idx = 0x0;
+ for (i = 0; i < etm.nr_ctxid_cmp; i++)
+ etm.ctxid_val[i] = 0x0;
+ etm.ctxid_mask = 0x0;
+ etm.sync_freq = 0x80;
+ etm.timestamp_event = 0x406F;
+ }
+ mutex_unlock(&etm.mutex);
+ return n;
+}
+ETM_SHOW(reset);
+ETM_ATTR(reset);
+
+static ssize_t mode_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&etm.mutex);
+ etm.mode = val & ETM_MODE_ALL;
+
+ if (etm.mode & ETM_MODE_EXCLUDE)
+ etm.enable_ctrl1 |= BIT(24);
+ else
+ etm.enable_ctrl1 &= ~BIT(24);
+
+ if (etm.mode & ETM_MODE_CYCACC)
+ etm.ctrl |= BIT(12);
+ else
+ etm.ctrl &= ~BIT(12);
+
+ if (etm.mode & ETM_MODE_STALL)
+ etm.ctrl |= BIT(7);
+ else
+ etm.ctrl &= ~BIT(7);
+
+ if (etm.mode & ETM_MODE_TIMESTAMP)
+ etm.ctrl |= BIT(28);
+ else
+ etm.ctrl &= ~BIT(28);
+ if (etm.mode & ETM_MODE_CTXID)
+ etm.ctrl |= (BIT(14) | BIT(15));
+ else
+ etm.ctrl &= ~(BIT(14) | BIT(15));
+ mutex_unlock(&etm.mutex);
+
+ return n;
+}
+ETM_SHOW(mode);
+ETM_ATTR(mode);
+
+ETM_STORE(trigger_event, ETM_EVENT_MASK);
+ETM_SHOW(trigger_event);
+ETM_ATTR(trigger_event);
+
+ETM_STORE(enable_event, ETM_EVENT_MASK);
+ETM_SHOW(enable_event);
+ETM_ATTR(enable_event);
+
+ETM_STORE(fifofull_level, ETM_ALL_MASK);
+ETM_SHOW(fifofull_level);
+ETM_ATTR(fifofull_level);
+
+static ssize_t addr_idx_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+ if (val >= etm.nr_addr_cmp)
+ return -EINVAL;
+
+ /* Use mutex to ensure index doesn't change while it gets dereferenced
+ * multiple times within a mutex block elsewhere.
+ */
+ mutex_lock(&etm.mutex);
+ etm.addr_idx = val;
+ mutex_unlock(&etm.mutex);
+ return n;
+}
+ETM_SHOW(addr_idx);
+ETM_ATTR(addr_idx);
+
+static ssize_t addr_single_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+ uint8_t idx;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&etm.mutex);
+ idx = etm.addr_idx;
+ if (!(etm.addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ etm.addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+ mutex_unlock(&etm.mutex);
+ return -EPERM;
+ }
+
+ etm.addr_val[idx] = val;
+ etm.addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
+ mutex_unlock(&etm.mutex);
+ return n;
+}
+static ssize_t addr_single_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ uint8_t idx;
+
+ mutex_lock(&etm.mutex);
+ idx = etm.addr_idx;
+ if (!(etm.addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ etm.addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+ mutex_unlock(&etm.mutex);
+ return -EPERM;
+ }
+
+ val = etm.addr_val[idx];
+ mutex_unlock(&etm.mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+ETM_ATTR(addr_single);
+
+static ssize_t addr_range_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val1, val2;
+ uint8_t idx;
+
+ if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ return -EINVAL;
+ /* lower address comparator cannot have a higher address value */
+ if (val1 > val2)
+ return -EINVAL;
+
+ mutex_lock(&etm.mutex);
+ idx = etm.addr_idx;
+ if (idx % 2 != 0) {
+ mutex_unlock(&etm.mutex);
+ return -EPERM;
+ }
+ if (!((etm.addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+ etm.addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+ (etm.addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+ etm.addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+ mutex_unlock(&etm.mutex);
+ return -EPERM;
+ }
+
+ etm.addr_val[idx] = val1;
+ etm.addr_type[idx] = ETM_ADDR_TYPE_RANGE;
+ etm.addr_val[idx + 1] = val2;
+ etm.addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
+ etm.enable_ctrl1 |= (1 << (idx/2));
+ mutex_unlock(&etm.mutex);
+ return n;
+}
+static ssize_t addr_range_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long val1, val2;
+ uint8_t idx;
+
+ mutex_lock(&etm.mutex);
+ idx = etm.addr_idx;
+ if (idx % 2 != 0) {
+ mutex_unlock(&etm.mutex);
+ return -EPERM;
+ }
+ if (!((etm.addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+ etm.addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+ (etm.addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+ etm.addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+ mutex_unlock(&etm.mutex);
+ return -EPERM;
+ }
+
+ val1 = etm.addr_val[idx];
+ val2 = etm.addr_val[idx + 1];
+ mutex_unlock(&etm.mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
+}
+ETM_ATTR(addr_range);
+
+static ssize_t addr_start_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+ uint8_t idx;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&etm.mutex);
+ idx = etm.addr_idx;
+ if (!(etm.addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ etm.addr_type[idx] == ETM_ADDR_TYPE_START)) {
+ mutex_unlock(&etm.mutex);
+ return -EPERM;
+ }
+
+ etm.addr_val[idx] = val;
+ etm.addr_type[idx] = ETM_ADDR_TYPE_START;
+ etm.startstop_ctrl |= (1 << idx);
+ etm.enable_ctrl1 |= BIT(25);
+ mutex_unlock(&etm.mutex);
+ return n;
+}
+static ssize_t addr_start_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ uint8_t idx;
+
+ mutex_lock(&etm.mutex);
+ idx = etm.addr_idx;
+ if (!(etm.addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ etm.addr_type[idx] == ETM_ADDR_TYPE_START)) {
+ mutex_unlock(&etm.mutex);
+ return -EPERM;
+ }
+
+ val = etm.addr_val[idx];
+ mutex_unlock(&etm.mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+ETM_ATTR(addr_start);
+
+static ssize_t addr_stop_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+ uint8_t idx;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&etm.mutex);
+ idx = etm.addr_idx;
+ if (!(etm.addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ etm.addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+ mutex_unlock(&etm.mutex);
+ return -EPERM;
+ }
+
+ etm.addr_val[idx] = val;
+ etm.addr_type[idx] = ETM_ADDR_TYPE_STOP;
+ etm.startstop_ctrl |= (1 << (idx + 16));
+ etm.enable_ctrl1 |= BIT(25);
+ mutex_unlock(&etm.mutex);
+ return n;
+}
+static ssize_t addr_stop_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ uint8_t idx;
+
+ mutex_lock(&etm.mutex);
+ idx = etm.addr_idx;
+ if (!(etm.addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ etm.addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+ mutex_unlock(&etm.mutex);
+ return -EPERM;
+ }
+
+ val = etm.addr_val[idx];
+ mutex_unlock(&etm.mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+ETM_ATTR(addr_stop);
+
+static ssize_t addr_acctype_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&etm.mutex);
+ etm.addr_acctype[etm.addr_idx] = val;
+ mutex_unlock(&etm.mutex);
+ return n;
+}
+static ssize_t addr_acctype_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+
+ mutex_lock(&etm.mutex);
+ val = etm.addr_acctype[etm.addr_idx];
+ mutex_unlock(&etm.mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+ETM_ATTR(addr_acctype);
+
+static ssize_t cntr_idx_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+ if (val >= etm.nr_cntr)
+ return -EINVAL;
+
+ /* Use mutex to ensure index doesn't change while it gets dereferenced
+ * multiple times within a mutex block elsewhere.
+ */
+ mutex_lock(&etm.mutex);
+ etm.cntr_idx = val;
+ mutex_unlock(&etm.mutex);
+ return n;
+}
+ETM_SHOW(cntr_idx);
+ETM_ATTR(cntr_idx);
+
+static ssize_t cntr_rld_val_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&etm.mutex);
+ etm.cntr_rld_val[etm.cntr_idx] = val;
+ mutex_unlock(&etm.mutex);
+ return n;
+}
+static ssize_t cntr_rld_val_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ mutex_lock(&etm.mutex);
+ val = etm.cntr_rld_val[etm.cntr_idx];
+ mutex_unlock(&etm.mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+ETM_ATTR(cntr_rld_val);
+
+static ssize_t cntr_event_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&etm.mutex);
+ etm.cntr_event[etm.cntr_idx] = val & ETM_EVENT_MASK;
+ mutex_unlock(&etm.mutex);
+ return n;
+}
+static ssize_t cntr_event_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+
+ mutex_lock(&etm.mutex);
+ val = etm.cntr_event[etm.cntr_idx];
+ mutex_unlock(&etm.mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+ETM_ATTR(cntr_event);
+
+static ssize_t cntr_rld_event_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&etm.mutex);
+ etm.cntr_rld_event[etm.cntr_idx] = val & ETM_EVENT_MASK;
+ mutex_unlock(&etm.mutex);
+ return n;
+}
+static ssize_t cntr_rld_event_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+
+ mutex_lock(&etm.mutex);
+ val = etm.cntr_rld_event[etm.cntr_idx];
+ mutex_unlock(&etm.mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+ETM_ATTR(cntr_rld_event);
+
+static ssize_t cntr_val_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&etm.mutex);
+ etm.cntr_val[etm.cntr_idx] = val;
+ mutex_unlock(&etm.mutex);
+ return n;
+}
+static ssize_t cntr_val_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+
+ mutex_lock(&etm.mutex);
+ val = etm.cntr_val[etm.cntr_idx];
+ mutex_unlock(&etm.mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+ETM_ATTR(cntr_val);
+
+ETM_STORE(seq_12_event, ETM_EVENT_MASK);
+ETM_SHOW(seq_12_event);
+ETM_ATTR(seq_12_event);
+
+ETM_STORE(seq_21_event, ETM_EVENT_MASK);
+ETM_SHOW(seq_21_event);
+ETM_ATTR(seq_21_event);
+
+ETM_STORE(seq_23_event, ETM_EVENT_MASK);
+ETM_SHOW(seq_23_event);
+ETM_ATTR(seq_23_event);
+
+ETM_STORE(seq_31_event, ETM_EVENT_MASK);
+ETM_SHOW(seq_31_event);
+ETM_ATTR(seq_31_event);
+
+ETM_STORE(seq_32_event, ETM_EVENT_MASK);
+ETM_SHOW(seq_32_event);
+ETM_ATTR(seq_32_event);
+
+ETM_STORE(seq_13_event, ETM_EVENT_MASK);
+ETM_SHOW(seq_13_event);
+ETM_ATTR(seq_13_event);
+
+static ssize_t seq_curr_state_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+ if (val > ETM_SEQ_STATE_MAX_VAL)
+ return -EINVAL;
+
+ etm.seq_curr_state = val;
+ return n;
+}
+ETM_SHOW(seq_curr_state);
+ETM_ATTR(seq_curr_state);
+
+static ssize_t ctxid_idx_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+ if (val >= etm.nr_ctxid_cmp)
+ return -EINVAL;
+
+ /* Use mutex to ensure index doesn't change while it gets dereferenced
+ * multiple times within a mutex block elsewhere.
+ */
+ mutex_lock(&etm.mutex);
+ etm.ctxid_idx = val;
+ mutex_unlock(&etm.mutex);
+ return n;
+}
+ETM_SHOW(ctxid_idx);
+ETM_ATTR(ctxid_idx);
+
+static ssize_t ctxid_val_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&etm.mutex);
+ etm.ctxid_val[etm.ctxid_idx] = val;
+ mutex_unlock(&etm.mutex);
+ return n;
+}
+static ssize_t ctxid_val_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+
+ mutex_lock(&etm.mutex);
+ val = etm.ctxid_val[etm.ctxid_idx];
+ mutex_unlock(&etm.mutex);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+ETM_ATTR(ctxid_val);
+
+ETM_STORE(ctxid_mask, ETM_ALL_MASK);
+ETM_SHOW(ctxid_mask);
+ETM_ATTR(ctxid_mask);
+
+ETM_STORE(sync_freq, ETM_SYNC_MASK);
+ETM_SHOW(sync_freq);
+ETM_ATTR(sync_freq);
+
+ETM_STORE(timestamp_event, ETM_EVENT_MASK);
+ETM_SHOW(timestamp_event);
+ETM_ATTR(timestamp_event);
+
+static struct attribute *etm_attrs[] = {
+ &nr_addr_cmp_attr.attr,
+ &nr_cntr_attr.attr,
+ &nr_ctxid_cmp_attr.attr,
+ &reset_attr.attr,
+ &mode_attr.attr,
+ &trigger_event_attr.attr,
+ &enable_event_attr.attr,
+ &fifofull_level_attr.attr,
+ &addr_idx_attr.attr,
+ &addr_single_attr.attr,
+ &addr_range_attr.attr,
+ &addr_start_attr.attr,
+ &addr_stop_attr.attr,
+ &addr_acctype_attr.attr,
+ &cntr_idx_attr.attr,
+ &cntr_rld_val_attr.attr,
+ &cntr_event_attr.attr,
+ &cntr_rld_event_attr.attr,
+ &cntr_val_attr.attr,
+ &seq_12_event_attr.attr,
+ &seq_21_event_attr.attr,
+ &seq_23_event_attr.attr,
+ &seq_31_event_attr.attr,
+ &seq_32_event_attr.attr,
+ &seq_13_event_attr.attr,
+ &seq_curr_state_attr.attr,
+ &ctxid_idx_attr.attr,
+ &ctxid_val_attr.attr,
+ &ctxid_mask_attr.attr,
+ &sync_freq_attr.attr,
+ ×tamp_event_attr.attr,
+ NULL,
+};
+
+static struct attribute_group etm_attr_grp = {
+ .attrs = etm_attrs,
+};
+
+static int __init etm_sysfs_init(void)
+{
+ int ret;
+
+ etm.kobj = kobject_create_and_add("etm", qdss_get_modulekobj());
+ if (!etm.kobj) {
+ dev_err(etm.dev, "failed to create ETM sysfs kobject\n");
+ ret = -ENOMEM;
+ goto err_create;
+ }
+
+ ret = sysfs_create_file(etm.kobj, &enabled_attr.attr);
+ if (ret) {
+ dev_err(etm.dev, "failed to create ETM sysfs enabled"
+ " attribute\n");
+ goto err_file;
+ }
+
+ if (sysfs_create_group(etm.kobj, &etm_attr_grp))
+ dev_err(etm.dev, "failed to create ETM sysfs group\n");
+
+ return 0;
+err_file:
+ kobject_put(etm.kobj);
+err_create:
+ return ret;
+}
+
+static void etm_sysfs_exit(void)
+{
+ sysfs_remove_group(etm.kobj, &etm_attr_grp);
+ sysfs_remove_file(etm.kobj, &enabled_attr.attr);
+ kobject_put(etm.kobj);
+}
+
+static bool etm_arch_supported(uint8_t arch)
+{
+ switch (arch) {
+ case PFT_ARCH_V1_1:
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+static int __init etm_arch_init(void)
+{
+ int ret, i;
+ /* use cpu 0 for setup */
+ int cpu = 0;
+ uint32_t etmidr;
+ uint32_t etmccr;
+
+ /* Unlock OS lock first to allow memory mapped reads and writes */
+ etm_os_unlock(NULL);
+ smp_call_function(etm_os_unlock, NULL, 1);
+ ETM_UNLOCK(cpu);
+ /* Vote for ETM power/clock enable */
+ etm_clr_pwrdwn(cpu);
+ /* Set prog bit. It will be set from reset but this is included to
+ * ensure it is set
+ */
+ etm_set_prog(cpu);
+
+ /* find all capabilities */
+ etmidr = etm_readl(etm, cpu, ETMIDR);
+ etm.arch = BMVAL(etmidr, 4, 11);
+ if (etm_arch_supported(etm.arch) == false) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ etmccr = etm_readl(etm, cpu, ETMCCR);
+ etm.nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2;
+ etm.nr_cntr = BMVAL(etmccr, 13, 15);
+ etm.nr_ext_inp = BMVAL(etmccr, 17, 19);
+ etm.nr_ext_out = BMVAL(etmccr, 20, 22);
+ etm.nr_ctxid_cmp = BMVAL(etmccr, 24, 25);
+
+ if (cpu_is_krait_v1()) {
+ /* Krait pass1 doesn't support include filtering and non-cycle
+ * accurate tracing
+ */
+ etm.mode = (ETM_MODE_EXCLUDE | ETM_MODE_CYCACC);
+ etm.ctrl = 0x1000;
+ etm.enable_ctrl1 = 0x1000000;
+ for (i = 0; i < etm.nr_addr_cmp; i++) {
+ etm.addr_val[i] = 0x0;
+ etm.addr_acctype[i] = 0x0;
+ etm.addr_type[i] = ETM_ADDR_TYPE_NONE;
+ }
+ }
+
+ /* Vote for ETM power/clock disable */
+ etm_set_pwrdwn(cpu);
+ ETM_LOCK(cpu);
+
+ return 0;
+err:
+ return ret;
+}
+
+static int __devinit etm_probe(struct platform_device *pdev)
{
int ret;
struct resource *res;
@@ -629,80 +1237,82 @@
goto err_res;
}
- ptm.base = ioremap_nocache(res->start, resource_size(res));
- if (!ptm.base) {
+ etm.base = ioremap_nocache(res->start, resource_size(res));
+ if (!etm.base) {
ret = -EINVAL;
goto err_ioremap;
}
- ptm.dev = &pdev->dev;
+ etm.dev = &pdev->dev;
- ret = misc_register(&ptm_misc);
- if (ret)
- goto err_misc;
-
+ mutex_init(&etm.mutex);
+ wake_lock_init(&etm.wake_lock, WAKE_LOCK_SUSPEND, "msm_etm");
+ pm_qos_add_request(&etm.qos_req, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
ret = qdss_clk_enable();
if (ret)
goto err_clk;
- ptm_cfg_ro_init();
- ptm_cfg_rw_init();
+ ret = etm_arch_init();
+ if (ret)
+ goto err_arch;
- ptm.trace_enabled = false;
+ ret = etm_sysfs_init();
+ if (ret)
+ goto err_sysfs;
- wake_lock_init(&ptm.wake_lock, WAKE_LOCK_SUSPEND, "msm_ptm");
- pm_qos_add_request(&ptm.qos_req, PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
- atomic_set(&ptm.in_use, 0);
+ etm.enabled = false;
qdss_clk_disable();
- dev_info(ptm.dev, "PTM intialized.\n");
+ dev_info(etm.dev, "ETM initialized\n");
- if (trace_on_boot) {
- if (!ptm_trace_enable())
- dev_info(ptm.dev, "tracing enabled\n");
- else
- dev_err(ptm.dev, "error enabling trace\n");
- }
+ if (etm_boot_enable)
+ etm_enable();
return 0;
+err_sysfs:
+err_arch:
+ qdss_clk_disable();
err_clk:
- misc_deregister(&ptm_misc);
-err_misc:
- iounmap(ptm.base);
+ pm_qos_remove_request(&etm.qos_req);
+ wake_lock_destroy(&etm.wake_lock);
+ mutex_destroy(&etm.mutex);
+ iounmap(etm.base);
err_ioremap:
err_res:
+ dev_err(etm.dev, "ETM init failed\n");
return ret;
}
-static int ptm_remove(struct platform_device *pdev)
+static int etm_remove(struct platform_device *pdev)
{
- if (ptm.trace_enabled)
- ptm_trace_disable();
- pm_qos_remove_request(&ptm.qos_req);
- wake_lock_destroy(&ptm.wake_lock);
- misc_deregister(&ptm_misc);
- iounmap(ptm.base);
+ if (etm.enabled)
+ etm_disable();
+ etm_sysfs_exit();
+ pm_qos_remove_request(&etm.qos_req);
+ wake_lock_destroy(&etm.wake_lock);
+ mutex_destroy(&etm.mutex);
+ iounmap(etm.base);
return 0;
}
-static struct platform_driver ptm_driver = {
- .probe = ptm_probe,
- .remove = ptm_remove,
+static struct platform_driver etm_driver = {
+ .probe = etm_probe,
+ .remove = etm_remove,
.driver = {
- .name = "msm_ptm",
+ .name = "msm_etm",
},
};
-int __init ptm_init(void)
+int __init etm_init(void)
{
- return platform_driver_register(&ptm_driver);
+ return platform_driver_register(&etm_driver);
}
-void ptm_exit(void)
+void etm_exit(void)
{
- platform_driver_unregister(&ptm_driver);
+ platform_driver_unregister(&etm_driver);
}
diff --git a/arch/arm/mach-msm/qdss-funnel.c b/arch/arm/mach-msm/qdss-funnel.c
index dd61c15..3eec560 100644
--- a/arch/arm/mach-msm/qdss-funnel.c
+++ b/arch/arm/mach-msm/qdss-funnel.c
@@ -26,12 +26,12 @@
#define funnel_readl(funnel, id, off) \
__raw_readl(funnel.base + (SZ_4K * id) + off)
-#define CS_TFUNNEL_FUNCTL (0x000)
-#define CS_TFUNNEL_PRICTL (0x004)
-#define CS_TFUNNEL_ITATBDATA0 (0xEEC)
-#define CS_TFUNNEL_ITATBCTR2 (0xEF0)
-#define CS_TFUNNEL_ITATBCTR1 (0xEF4)
-#define CS_TFUNNEL_ITATBCTR0 (0xEF8)
+#define FUNNEL_FUNCTL (0x000)
+#define FUNNEL_PRICTL (0x004)
+#define FUNNEL_ITATBDATA0 (0xEEC)
+#define FUNNEL_ITATBCTR2 (0xEF0)
+#define FUNNEL_ITATBCTR1 (0xEF4)
+#define FUNNEL_ITATBCTR0 (0xEF8)
#define FUNNEL_LOCK(id) \
@@ -45,18 +45,21 @@
mb(); \
} while (0)
-#define DEFAULT_HOLDTIME_MASK (0xF00)
-#define DEFAULT_HOLDTIME_SHFT (0x8)
-#define DEFAULT_HOLDTIME (0x7 << DEFAULT_HOLDTIME_SHFT)
-#define DEFAULT_PRIORITY (0xFAC680)
+#define FUNNEL_HOLDTIME_MASK (0xF00)
+#define FUNNEL_HOLDTIME_SHFT (0x8)
+#define FUNNEL_HOLDTIME (0x7 << FUNNEL_HOLDTIME_SHFT)
struct funnel_ctx {
void __iomem *base;
bool enabled;
struct device *dev;
+ struct kobject *kobj;
+ uint32_t priority;
};
-static struct funnel_ctx funnel;
+static struct funnel_ctx funnel = {
+ .priority = 0xFAC680,
+};
static void __funnel_enable(uint8_t id, uint32_t port_mask)
{
@@ -64,12 +67,12 @@
FUNNEL_UNLOCK(id);
- functl = funnel_readl(funnel, id, CS_TFUNNEL_FUNCTL);
- functl &= ~DEFAULT_HOLDTIME_MASK;
- functl |= DEFAULT_HOLDTIME;
+ functl = funnel_readl(funnel, id, FUNNEL_FUNCTL);
+ functl &= ~FUNNEL_HOLDTIME_MASK;
+ functl |= FUNNEL_HOLDTIME;
functl |= port_mask;
- funnel_writel(funnel, id, functl, CS_TFUNNEL_FUNCTL);
- funnel_writel(funnel, id, DEFAULT_PRIORITY, CS_TFUNNEL_PRICTL);
+ funnel_writel(funnel, id, functl, FUNNEL_FUNCTL);
+ funnel_writel(funnel, id, funnel.priority, FUNNEL_PRICTL);
FUNNEL_LOCK(id);
}
@@ -78,7 +81,7 @@
{
__funnel_enable(id, port_mask);
funnel.enabled = true;
- dev_info(funnel.dev, "funnel port mask 0x%lx enabled\n",
+ dev_info(funnel.dev, "FUNNEL port mask 0x%lx enabled\n",
(unsigned long) port_mask);
}
@@ -88,9 +91,9 @@
FUNNEL_UNLOCK(id);
- functl = funnel_readl(funnel, id, CS_TFUNNEL_FUNCTL);
+ functl = funnel_readl(funnel, id, FUNNEL_FUNCTL);
functl &= ~port_mask;
- funnel_writel(funnel, id, functl, CS_TFUNNEL_FUNCTL);
+ funnel_writel(funnel, id, functl, FUNNEL_FUNCTL);
FUNNEL_LOCK(id);
}
@@ -99,10 +102,66 @@
{
__funnel_disable(id, port_mask);
funnel.enabled = false;
- dev_info(funnel.dev, "funnel port mask 0x%lx disabled\n",
+ dev_info(funnel.dev, "FUNNEL port mask 0x%lx disabled\n",
(unsigned long) port_mask);
}
+#define FUNNEL_ATTR(__name) \
+static struct kobj_attribute __name##_attr = \
+ __ATTR(__name, S_IRUGO | S_IWUSR, __name##_show, __name##_store)
+
+static ssize_t priority_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ funnel.priority = val;
+ return n;
+}
+static ssize_t priority_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long val = funnel.priority;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+FUNNEL_ATTR(priority);
+
+static int __init funnel_sysfs_init(void)
+{
+ int ret;
+
+ funnel.kobj = kobject_create_and_add("funnel", qdss_get_modulekobj());
+ if (!funnel.kobj) {
+ dev_err(funnel.dev, "failed to create FUNNEL sysfs kobject\n");
+ ret = -ENOMEM;
+ goto err_create;
+ }
+
+ ret = sysfs_create_file(funnel.kobj, &priority_attr.attr);
+ if (ret) {
+ dev_err(funnel.dev, "failed to create FUNNEL sysfs priority"
+ " attribute\n");
+ goto err_file;
+ }
+
+ return 0;
+err_file:
+ kobject_put(funnel.kobj);
+err_create:
+ return ret;
+}
+
+static void funnel_sysfs_exit(void)
+{
+ sysfs_remove_file(funnel.kobj, &priority_attr.attr);
+ kobject_put(funnel.kobj);
+}
+
static int __devinit funnel_probe(struct platform_device *pdev)
{
int ret;
@@ -122,10 +181,14 @@
funnel.dev = &pdev->dev;
+ funnel_sysfs_init();
+
+ dev_info(funnel.dev, "FUNNEL initialized\n");
return 0;
err_ioremap:
err_res:
+ dev_err(funnel.dev, "FUNNEL init failed\n");
return ret;
}
@@ -133,6 +196,7 @@
{
if (funnel.enabled)
funnel_disable(0x0, 0xFF);
+ funnel_sysfs_exit();
iounmap(funnel.base);
return 0;
diff --git a/arch/arm/mach-msm/qdss-tpiu.c b/arch/arm/mach-msm/qdss-tpiu.c
index e4a61de..409bf2c 100644
--- a/arch/arm/mach-msm/qdss-tpiu.c
+++ b/arch/arm/mach-msm/qdss-tpiu.c
@@ -23,19 +23,19 @@
#define tpiu_writel(tpiu, val, off) __raw_writel((val), tpiu.base + off)
#define tpiu_readl(tpiu, off) __raw_readl(tpiu.base + off)
-#define TPIU_SUPPORTED_PORT_SIZE (0x000)
-#define TPIU_CURRENT_PORT_SIZE (0x004)
-#define TPIU_SUPPORTED_TRIGGER_MODES (0x100)
-#define TPIU_TRIGGER_COUNTER_VALUE (0x104)
-#define TPIU_TRIGGER_MULTIPLIER (0x108)
-#define TPIU_SUPPORTED_TEST_PATTERNM (0x200)
-#define TPIU_CURRENT_TEST_PATTERNM (0x204)
-#define TPIU_TEST_PATTERN_REPEAT_COUNTER (0x208)
-#define TPIU_FORMATTER_AND_FLUSH_STATUS (0x300)
-#define TPIU_FORMATTER_AND_FLUSH_CONTROL (0x304)
-#define TPIU_FORMATTER_SYNCHRONIZATION_COUNTER (0x308)
-#define TPIU_EXTCTL_IN_PORT (0x400)
-#define TPIU_EXTCTL_OUT_PORT (0x404)
+#define TPIU_SUPP_PORTSZ (0x000)
+#define TPIU_CURR_PORTSZ (0x004)
+#define TPIU_SUPP_TRIGMODES (0x100)
+#define TPIU_TRIG_CNTRVAL (0x104)
+#define TPIU_TRIG_MULT (0x108)
+#define TPIU_SUPP_TESTPATM (0x200)
+#define TPIU_CURR_TESTPATM (0x204)
+#define TPIU_TEST_PATREPCNTR (0x208)
+#define TPIU_FFSR (0x300)
+#define TPIU_FFCR (0x304)
+#define TPIU_FSYNC_CNTR (0x308)
+#define TPIU_EXTCTL_INPORT (0x400)
+#define TPIU_EXTCTL_OUTPORT (0x404)
#define TPIU_ITTRFLINACK (0xEE4)
#define TPIU_ITTRFLIN (0xEE8)
#define TPIU_ITATBDATA0 (0xEEC)
@@ -67,8 +67,8 @@
{
TPIU_UNLOCK();
- tpiu_writel(tpiu, 0x3000, TPIU_FORMATTER_AND_FLUSH_CONTROL);
- tpiu_writel(tpiu, 0x3040, TPIU_FORMATTER_AND_FLUSH_CONTROL);
+ tpiu_writel(tpiu, 0x3000, TPIU_FFCR);
+ tpiu_writel(tpiu, 0x3040, TPIU_FFCR);
TPIU_LOCK();
}
@@ -77,7 +77,7 @@
{
__tpiu_disable();
tpiu.enabled = false;
- dev_info(tpiu.dev, "tpiu disabled\n");
+ dev_info(tpiu.dev, "TPIU disabled\n");
}
static int __devinit tpiu_probe(struct platform_device *pdev)
@@ -99,10 +99,12 @@
tpiu.dev = &pdev->dev;
+ dev_info(tpiu.dev, "TPIU initialized\n");
return 0;
err_ioremap:
err_res:
+ dev_err(tpiu.dev, "TPIU init failed\n");
return ret;
}
diff --git a/arch/arm/mach-msm/qdss.c b/arch/arm/mach-msm/qdss.c
index 55d14cd..ab28c82 100644
--- a/arch/arm/mach-msm/qdss.c
+++ b/arch/arm/mach-msm/qdss.c
@@ -29,6 +29,18 @@
QDSS_CLK_ON_HSDBG,
};
+struct qdss_ctx {
+ struct kobject *modulekobj;
+ uint8_t max_clk;
+};
+
+static struct qdss_ctx qdss;
+
+
+struct kobject *qdss_get_modulekobj(void)
+{
+ return qdss.modulekobj;
+}
int qdss_clk_enable(void)
{
@@ -36,13 +48,15 @@
struct msm_rpm_iv_pair iv;
iv.id = MSM_RPM_ID_QDSS_CLK;
- iv.value = QDSS_CLK_ON_DBG;
+ if (qdss.max_clk)
+ iv.value = QDSS_CLK_ON_HSDBG;
+ else
+ iv.value = QDSS_CLK_ON_DBG;
ret = msm_rpmrs_set(MSM_RPM_CTX_SET_0, &iv, 1);
if (WARN(ret, "qdss clks not enabled (%d)\n", ret))
goto err_clk;
return 0;
-
err_clk:
return ret;
}
@@ -58,10 +72,65 @@
WARN(ret, "qdss clks not disabled (%d)\n", ret);
}
+#define QDSS_ATTR(name) \
+static struct kobj_attribute name##_attr = \
+ __ATTR(name, S_IRUGO | S_IWUSR, name##_show, name##_store)
+
+static ssize_t max_clk_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (sscanf(buf, "%lx", &val) != 1)
+ return -EINVAL;
+
+ qdss.max_clk = val;
+ return n;
+}
+static ssize_t max_clk_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long val = qdss.max_clk;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+QDSS_ATTR(max_clk);
+
+static int __init qdss_sysfs_init(void)
+{
+ int ret;
+
+ qdss.modulekobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+ if (!qdss.modulekobj) {
+ pr_err("failed to find QDSS sysfs module kobject\n");
+ ret = -ENOENT;
+ goto err;
+ }
+
+ ret = sysfs_create_file(qdss.modulekobj, &max_clk_attr.attr);
+ if (ret) {
+ pr_err("failed to create QDSS sysfs max_clk attribute\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ return ret;
+}
+
+static void qdss_sysfs_exit(void)
+{
+ sysfs_remove_file(qdss.modulekobj, &max_clk_attr.attr);
+}
+
static int __init qdss_init(void)
{
int ret;
+ ret = qdss_sysfs_init();
+ if (ret)
+ goto err_sysfs;
ret = etb_init();
if (ret)
goto err_etb;
@@ -71,26 +140,30 @@
ret = funnel_init();
if (ret)
goto err_funnel;
- ret = ptm_init();
+ ret = etm_init();
if (ret)
- goto err_ptm;
+ goto err_etm;
+ pr_info("QDSS initialized\n");
return 0;
-
-err_ptm:
+err_etm:
funnel_exit();
err_funnel:
tpiu_exit();
err_tpiu:
etb_exit();
err_etb:
+ qdss_sysfs_exit();
+err_sysfs:
+ pr_err("QDSS init failed\n");
return ret;
}
module_init(qdss_init);
static void __exit qdss_exit(void)
{
- ptm_exit();
+ qdss_sysfs_exit();
+ etm_exit();
funnel_exit();
tpiu_exit();
etb_exit();
diff --git a/arch/arm/mach-msm/qdss.h b/arch/arm/mach-msm/qdss.h
index 199222a..fee0587 100644
--- a/arch/arm/mach-msm/qdss.h
+++ b/arch/arm/mach-msm/qdss.h
@@ -64,8 +64,8 @@
void tpiu_exit(void);
int funnel_init(void);
void funnel_exit(void);
-int ptm_init(void);
-void ptm_exit(void);
+int etm_init(void);
+void etm_exit(void);
void etb_enable(void);
void etb_disable(void);
@@ -73,6 +73,8 @@
void tpiu_disable(void);
void funnel_enable(uint8_t id, uint32_t port_mask);
void funnel_disable(uint8_t id, uint32_t port_mask);
+
+struct kobject *qdss_get_modulekobj(void);
int qdss_clk_enable(void);
void qdss_clk_disable(void);
diff --git a/arch/arm/mach-msm/rpm-regulator-8930.c b/arch/arm/mach-msm/rpm-regulator-8930.c
index 22595ec..f396fed 100644
--- a/arch/arm/mach-msm/rpm-regulator-8930.c
+++ b/arch/arm/mach-msm/rpm-regulator-8930.c
@@ -50,6 +50,11 @@
.hpm = REQUEST_MEMBER(0, 0x00000C00, 10),
};
+static struct rpm_vreg_parts corner_parts = {
+ .request_len = 1,
+ .uV = REQUEST_MEMBER(0, 0x00000003, 0),
+};
+
/* Physically available PMIC regulator voltage setpoint ranges */
static struct vreg_range pldo_ranges[] = {
VOLTAGE_RANGE( 750000, 1487500, 12500),
@@ -78,11 +83,16 @@
VOLTAGE_RANGE(1500000, 3300000, 50000),
};
+static struct vreg_range corner_ranges[] = {
+ VOLTAGE_RANGE(RPM_VREG_CORNER_NONE, RPM_VREG_CORNER_HIGH, 1),
+};
+
static struct vreg_set_points pldo_set_points = SET_POINTS(pldo_ranges);
static struct vreg_set_points nldo_set_points = SET_POINTS(nldo_ranges);
static struct vreg_set_points nldo1200_set_points = SET_POINTS(nldo1200_ranges);
static struct vreg_set_points smps_set_points = SET_POINTS(smps_ranges);
static struct vreg_set_points ftsmps_set_points = SET_POINTS(ftsmps_ranges);
+static struct vreg_set_points corner_set_points = SET_POINTS(corner_ranges);
static struct vreg_set_points *all_set_points[] = {
&pldo_set_points,
@@ -90,6 +100,7 @@
&nldo1200_set_points,
&smps_set_points,
&ftsmps_set_points,
+ &corner_set_points,
};
#define LDO(_id, _name, _name_pc, _ranges, _hpm_min_load) \
@@ -135,6 +146,19 @@
.rdesc_pc.name = _name_pc, \
}
+#define CORNER(_id, _rpm_id, _name, _ranges) \
+ [RPM_VREG_ID_PM8038_##_id] = { \
+ .req = { \
+ [0] = { .id = MSM_RPM_ID_##_rpm_id, }, \
+ [1] = { .id = -1, }, \
+ }, \
+ .type = RPM_REGULATOR_TYPE_CORNER, \
+ .set_points = &_ranges##_set_points, \
+ .part = &corner_parts, \
+ .id = RPM_VREG_ID_PM8038_##_id, \
+ .rdesc.name = _name, \
+ }
+
static struct vreg vregs[] = {
LDO(L1, "8038_l1", NULL, nldo1200, LDO_1200),
LDO(L2, "8038_l2", "8038_l2_pc", nldo, LDO_150),
@@ -171,6 +195,8 @@
LVS(LVS1, "8038_lvs1", "8038_lvs1_pc"),
LVS(LVS2, "8038_lvs2", "8038_lvs2_pc"),
+
+ CORNER(VDD_DIG_CORNER, VOLTAGE_CORNER, "vdd_dig_corner", corner),
};
static const char *pin_func_label[] = {
diff --git a/arch/arm/mach-msm/rpm-regulator-private.h b/arch/arm/mach-msm/rpm-regulator-private.h
index 88b52ea..d4f9a8a 100644
--- a/arch/arm/mach-msm/rpm-regulator-private.h
+++ b/arch/arm/mach-msm/rpm-regulator-private.h
@@ -24,7 +24,8 @@
RPM_REGULATOR_TYPE_SMPS,
RPM_REGULATOR_TYPE_VS,
RPM_REGULATOR_TYPE_NCP,
- RPM_REGULATOR_TYPE_MAX = RPM_REGULATOR_TYPE_NCP,
+ RPM_REGULATOR_TYPE_CORNER,
+ RPM_REGULATOR_TYPE_MAX = RPM_REGULATOR_TYPE_CORNER,
};
struct request_member {
diff --git a/arch/arm/mach-msm/rpm-regulator.c b/arch/arm/mach-msm/rpm-regulator.c
index ed366ae..fa369b3 100644
--- a/arch/arm/mach-msm/rpm-regulator.c
+++ b/arch/arm/mach-msm/rpm-regulator.c
@@ -90,6 +90,14 @@
[RPM_VREG_FREQ_1p28] = "1.28",
[RPM_VREG_FREQ_1p20] = "1.20",
};
+
+static const char *label_corner[] = {
+ [RPM_VREG_CORNER_NONE] = "NONE",
+ [RPM_VREG_CORNER_LOW] = "LOW",
+ [RPM_VREG_CORNER_NOMINAL] = "NOM",
+ [RPM_VREG_CORNER_HIGH] = "HIGH",
+};
+
/*
* This is used when voting for LPM or HPM by subtracting or adding to the
* hpm_min_load of a regulator. It has units of uA.
@@ -117,7 +125,7 @@
int uV, mV, fm, pm, pc, pf, pd, freq, state, i;
const char *pf_label = "", *fm_label = "", *pc_total = "";
const char *pc_en[4] = {"", "", "", ""};
- const char *pm_label = "", *freq_label = "";
+ const char *pm_label = "", *freq_label = "", *corner_label = "";
char buf[DEBUG_PRINT_BUFFER_SIZE];
size_t buflen = DEBUG_PRINT_BUFFER_SIZE;
int pos = 0;
@@ -169,7 +177,7 @@
vreg->rdesc.name,
(set == MSM_RPM_CTX_SET_0 ? 'A' : 'S'));
- if (USES_PART(vreg, uV))
+ if (USES_PART(vreg, uV) && vreg->type != RPM_REGULATOR_TYPE_CORNER)
pos += scnprintf(buf + pos, buflen - pos, ", v=%7d uV", uV);
if (USES_PART(vreg, mV))
pos += scnprintf(buf + pos, buflen - pos, ", v=%4d mV", mV);
@@ -215,6 +223,12 @@
if (USES_PART(vreg, hpm))
pos += scnprintf(buf + pos, buflen - pos,
", hpm=%d", GET_PART(vreg, hpm));
+ if (USES_PART(vreg, uV) && vreg->type == RPM_REGULATOR_TYPE_CORNER) {
+ if (uV >= 0 && uV < (ARRAY_SIZE(label_corner) - 1))
+ corner_label = label_corner[uV+1];
+ pos += scnprintf(buf + pos, buflen - pos, ", corner=%s (%d)",
+ corner_label, uV);
+ }
pos += scnprintf(buf + pos, buflen - pos, "; req[0]={%d, 0x%08X}",
vreg->req[0].id, vreg->req[0].value);
@@ -507,6 +521,16 @@
}
}
+ if (vreg->type == RPM_REGULATOR_TYPE_CORNER) {
+ /*
+ * Translate from enum values which work as inputs in the
+ * rpm_vreg_set_voltage function to the actual corner values
+ * sent to the RPM.
+ */
+ if (uV > 0)
+ uV -= RPM_VREG_CORNER_NONE;
+ }
+
if (vreg->part->uV.mask) {
val[vreg->part->uV.word] = uV << vreg->part->uV.shift;
mask[vreg->part->uV.word] = vreg->part->uV.mask;
@@ -695,6 +719,7 @@
switch (vreg->type) {
case RPM_REGULATOR_TYPE_LDO:
case RPM_REGULATOR_TYPE_SMPS:
+ case RPM_REGULATOR_TYPE_CORNER:
/* Enable by setting a voltage. */
if (vreg->part->uV.mask) {
val[vreg->part->uV.word]
@@ -717,7 +742,7 @@
}
}
-static int vreg_enable(struct regulator_dev *rdev)
+static int rpm_vreg_enable(struct regulator_dev *rdev)
{
struct vreg *vreg = rdev_get_drvdata(rdev);
unsigned int mask[2] = {0}, val[2] = {0};
@@ -746,6 +771,7 @@
switch (vreg->type) {
case RPM_REGULATOR_TYPE_LDO:
case RPM_REGULATOR_TYPE_SMPS:
+ case RPM_REGULATOR_TYPE_CORNER:
/* Disable by setting a voltage of 0 uV. */
if (vreg->part->uV.mask) {
val[vreg->part->uV.word] |= 0 << vreg->part->uV.shift;
@@ -765,7 +791,7 @@
}
}
-static int vreg_disable(struct regulator_dev *rdev)
+static int rpm_vreg_disable(struct regulator_dev *rdev)
{
struct vreg *vreg = rdev_get_drvdata(rdev);
unsigned int mask[2] = {0}, val[2] = {0};
@@ -838,6 +864,15 @@
return -EINVAL;
}
+ if (vreg->type == RPM_REGULATOR_TYPE_CORNER) {
+ /*
+ * Translate from enum values which work as inputs in the
+ * regulator_set_voltage function to the actual corner values
+ * sent to the RPM.
+ */
+ uV -= RPM_VREG_CORNER_NONE;
+ }
+
if (vreg->part->uV.mask) {
val[vreg->part->uV.word] = uV << vreg->part->uV.shift;
mask[vreg->part->uV.word] = vreg->part->uV.mask;
@@ -1111,8 +1146,8 @@
/* Real regulator operations. */
static struct regulator_ops ldo_ops = {
- .enable = vreg_enable,
- .disable = vreg_disable,
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
.is_enabled = vreg_is_enabled,
.set_voltage = vreg_set_voltage,
.get_voltage = vreg_get_voltage,
@@ -1124,8 +1159,8 @@
};
static struct regulator_ops smps_ops = {
- .enable = vreg_enable,
- .disable = vreg_disable,
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
.is_enabled = vreg_is_enabled,
.set_voltage = vreg_set_voltage,
.get_voltage = vreg_get_voltage,
@@ -1137,15 +1172,25 @@
};
static struct regulator_ops switch_ops = {
- .enable = vreg_enable,
- .disable = vreg_disable,
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
.is_enabled = vreg_is_enabled,
.enable_time = vreg_enable_time,
};
static struct regulator_ops ncp_ops = {
- .enable = vreg_enable,
- .disable = vreg_disable,
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
+ .is_enabled = vreg_is_enabled,
+ .set_voltage = vreg_set_voltage,
+ .get_voltage = vreg_get_voltage,
+ .list_voltage = vreg_list_voltage,
+ .enable_time = vreg_enable_time,
+};
+
+static struct regulator_ops corner_ops = {
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
.is_enabled = vreg_is_enabled,
.set_voltage = vreg_set_voltage,
.get_voltage = vreg_get_voltage,
@@ -1165,6 +1210,7 @@
[RPM_REGULATOR_TYPE_SMPS] = &smps_ops,
[RPM_REGULATOR_TYPE_VS] = &switch_ops,
[RPM_REGULATOR_TYPE_NCP] = &ncp_ops,
+ [RPM_REGULATOR_TYPE_CORNER] = &corner_ops,
};
static int __devinit
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 228c77fb..cd32152 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -50,7 +50,7 @@
#define EVENT_MASK_SIZE 1000
#define USER_SPACE_DATA 8000
#define PKT_SIZE 4096
-#define MAX_EQUIP_ID 12
+#define MAX_EQUIP_ID 15
#define DIAG_CTRL_MSG_LOG_MASK 9
#define DIAG_CTRL_MSG_EVENT_MASK 10
#define DIAG_CTRL_MSG_F3_MASK 11
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index f16aa0c..4cf6d33 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -522,7 +522,7 @@
uint8_t *temp = buf;
int i = 0;
unsigned char *ptr_data;
- int offset = 8*MAX_EQUIP_ID;
+ int offset = (sizeof(struct mask_info))*MAX_EQUIP_ID;
struct mask_info *ptr = (struct mask_info *)driver->log_masks;
mutex_lock(&driver->diagchar_mutex);
@@ -661,9 +661,10 @@
void *buf = driver->buf_log_mask_update;
int header_size = sizeof(struct diag_ctrl_log_mask);
struct mask_info *ptr = (struct mask_info *)driver->log_masks;
- int i, size = (driver->log_mask->num_items+7)/8;
+ int i, size;
for (i = 0; i < MAX_EQUIP_ID; i++) {
+ size = (ptr->num_items+7)/8;
/* reached null entry */
if ((ptr->equip_id == 0) && (ptr->index == 0))
break;
@@ -1608,7 +1609,7 @@
if (driver->log_masks == NULL &&
(driver->log_masks = kzalloc(LOG_MASK_SIZE, GFP_KERNEL)) == NULL)
goto err;
- driver->log_masks_length = 8*MAX_EQUIP_ID;
+ driver->log_masks_length = (sizeof(struct mask_info))*MAX_EQUIP_ID;
if (driver->event_masks == NULL &&
(driver->event_masks = kzalloc(EVENT_MASK_SIZE,
GFP_KERNEL)) == NULL)
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index eb017de..986a160 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -973,7 +973,6 @@
{
struct kgsl_memdesc *result = NULL;
struct kgsl_mem_entry *entry;
- struct kgsl_process_private *priv;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_ringbuffer *ringbuffer = &adreno_dev->ringbuffer;
struct kgsl_context *context;
@@ -988,21 +987,10 @@
if (kgsl_gpuaddr_in_memdesc(&device->memstore, gpuaddr, size))
return &device->memstore;
- mutex_lock(&kgsl_driver.process_mutex);
- list_for_each_entry(priv, &kgsl_driver.process_list, list) {
- if (!kgsl_mmu_pt_equal(priv->pagetable, pt_base))
- continue;
- spin_lock(&priv->mem_lock);
- entry = kgsl_sharedmem_find_region(priv, gpuaddr, size);
- if (entry) {
- result = &entry->memdesc;
- spin_unlock(&priv->mem_lock);
- mutex_unlock(&kgsl_driver.process_mutex);
- return result;
- }
- spin_unlock(&priv->mem_lock);
- }
- mutex_unlock(&kgsl_driver.process_mutex);
+ entry = kgsl_get_mem_entry(pt_base, gpuaddr, size);
+
+ if (entry)
+ return &entry->memdesc;
while (1) {
struct adreno_context *adreno_context = NULL;
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index 24e8efe..aeb48d7 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -2583,7 +2583,7 @@
adreno_regwrite(device, A3XX_RBBM_AHB_CTL0, 0x00000001);
/* Enable AHB error reporting */
- adreno_regwrite(device, A3XX_RBBM_AHB_CTL1, 0x86FFFFFF);
+ adreno_regwrite(device, A3XX_RBBM_AHB_CTL1, 0xA6FFFFFF);
/* Turn on the power counters */
adreno_regwrite(device, A3XX_RBBM_RBBM_CTL, 0x00003000);
diff --git a/drivers/gpu/msm/adreno_pm4types.h b/drivers/gpu/msm/adreno_pm4types.h
index 75512d0..420a941 100644
--- a/drivers/gpu/msm/adreno_pm4types.h
+++ b/drivers/gpu/msm/adreno_pm4types.h
@@ -197,6 +197,9 @@
#define cp_nop_packet(cnt) \
(CP_TYPE3_PKT | (((cnt)-1) << 16) | (CP_NOP << 8))
+#define pkt_is_type3(pkt) ((pkt) & CP_TYPE3_PKT)
+#define cp_type3_opcode(pkt) (((pkt) >> 8) & 0xFF)
+#define type3_pkt_size(pkt) ((((pkt) >> 16) & 0x3FFF) + 1)
/* packet headers */
#define CP_HDR_ME_INIT cp_type3_packet(CP_ME_INIT, 18)
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index a5c20dc..d6648e2 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -641,7 +641,6 @@
unsigned int val3;
unsigned int copy_rb_contents = 0;
unsigned int cur_context;
- unsigned int j;
GSL_RB_GET_READPTR(rb, &rb->rptr);
@@ -748,8 +747,20 @@
kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
rb->buffer_desc.size);
- BUG_ON((copy_rb_contents == 0) &&
- (value == cur_context));
+
+ /*
+ * If other context switches were already lost and
+ * and the current context is the one that is hanging,
+ * then we cannot recover. Print an error message
+ * and leave.
+ */
+
+ if ((copy_rb_contents == 0) && (value == cur_context)) {
+ KGSL_DRV_ERR(device, "GPU recovery could not "
+ "find the previous context\n");
+ return -EINVAL;
+ }
+
/*
* If we were copying the commands and got to this point
* then we need to remove the 3 commands that appear
@@ -780,19 +791,6 @@
}
*rb_size = temp_idx;
- KGSL_DRV_ERR(device, "Extracted rb contents, size: %x\n", *rb_size);
- for (temp_idx = 0; temp_idx < *rb_size;) {
- char str[80];
- int idx = 0;
- if ((temp_idx + 8) <= *rb_size)
- j = 8;
- else
- j = *rb_size - temp_idx;
- for (; j != 0; j--)
- idx += scnprintf(str + idx, 80 - idx,
- "%8.8X ", temp_rb_buffer[temp_idx++]);
- printk(KERN_ALERT "%s", str);
- }
return 0;
}
diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c
index 9836043..082df4b 100644
--- a/drivers/gpu/msm/adreno_snapshot.c
+++ b/drivers/gpu/msm/adreno_snapshot.c
@@ -27,6 +27,9 @@
#define SNAPSHOT_OBJ_TYPE_IB 0
+/* Keep track of how many bytes are frozen after a snapshot and tell the user */
+static int snapshot_frozen_objsize;
+
static struct kgsl_snapshot_obj {
int type;
uint32_t gpuaddr;
@@ -104,6 +107,97 @@
return 0;
}
+static void ib_parse_load_state(struct kgsl_device *device, unsigned int *pkt,
+ unsigned int ptbase)
+{
+ unsigned int block, source, type;
+
+ /*
+ * The object here is to find indirect shaders i.e - shaders loaded from
+ * GPU memory instead of directly in the command. These should be added
+ * to the list of memory objects to dump. So look at the load state
+ * call and see if 1) the shader block is a shader (block = 4, 5 or 6)
+ * 2) that the block is indirect (source = 4). If these all match then
+ * add the memory address to the list. The size of the object will
+ * differ depending on the type. Type 0 (instructions) are 8 dwords per
+ * unit and type 1 (constants) are 2 dwords per unit.
+ */
+
+ if (type3_pkt_size(pkt[0]) < 2)
+ return;
+
+ /*
+ * pkt[1] 18:16 - source
+ * pkt[1] 21:19 - state block
+ * pkt[1] 31:22 - size in units
+ * pkt[2] 0:1 - type
+ * pkt[2] 31:2 - GPU memory address
+ */
+
+ block = (pkt[1] >> 19) & 0x07;
+ source = (pkt[1] >> 16) & 0x07;
+ type = pkt[2] & 0x03;
+
+ if ((block == 4 || block == 5 || block == 6) && source == 4) {
+ int unitsize = (type == 0) ? 8 : 2;
+ int ret;
+
+ /* Freeze the GPU buffer containing the shader */
+
+ ret = kgsl_snapshot_get_object(device, ptbase,
+ pkt[2] & 0xFFFFFFFC,
+ (((pkt[1] >> 22) & 0x03FF) * unitsize) << 2,
+ SNAPSHOT_GPU_OBJECT_SHADER);
+ snapshot_frozen_objsize += ret;
+ }
+}
+
+/*
+ * Parse all the type3 opcode packets that may contain important information,
+ * such as additional GPU buffers to grab
+ */
+
+static void ib_parse_type3(struct kgsl_device *device, unsigned int *ptr,
+ unsigned int ptbase)
+{
+ switch (cp_type3_opcode(*ptr)) {
+ case CP_LOAD_STATE:
+ ib_parse_load_state(device, ptr, ptbase);
+ break;
+ }
+}
+
+/* Add an IB as a GPU object, but first, parse it to find more goodies within */
+
+static void ib_add_gpu_object(struct kgsl_device *device, unsigned int ptbase,
+ unsigned int gpuaddr, unsigned int dwords)
+{
+ int i, ret;
+ unsigned int *src = (unsigned int *) adreno_convertaddr(device, ptbase,
+ gpuaddr, dwords << 2);
+
+ if (src == NULL)
+ return;
+
+ for (i = 0; i < dwords; i++) {
+ if (pkt_is_type3(src[i])) {
+ if ((dwords - i) < type3_pkt_size(src[i]) + 1)
+ continue;
+
+ if (adreno_cmd_is_ib(src[i]))
+ ib_add_gpu_object(device, ptbase,
+ src[i + 1], src[i + 2]);
+ else
+ ib_parse_type3(device, &src[i], ptbase);
+ }
+ }
+
+ ret = kgsl_snapshot_get_object(device, ptbase, gpuaddr, dwords << 2,
+ SNAPSHOT_GPU_OBJECT_IB);
+
+ snapshot_frozen_objsize += ret;
+}
+
/* Snapshot the istore memory */
static int snapshot_istore(struct kgsl_device *device, void *snapshot,
int remain, void *priv)
@@ -137,7 +231,7 @@
unsigned int *data = snapshot + sizeof(*header);
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
- unsigned int rbbase, ptbase, rptr, *rbptr;
+ unsigned int rbbase, ptbase, rptr, *rbptr, ibbase;
int start, stop, index;
int numitems, size;
int parse_ibs = 0, ib_parse_start;
@@ -151,6 +245,13 @@
/* Get the current read pointers for the RB */
kgsl_regread(device, REG_CP_RB_RPTR, &rptr);
+ /*
+ * Get the address of the last executed IB1 so we can be sure to
+ * snapshot it
+ */
+
+ kgsl_regread(device, REG_CP_IB1_BASE, &ibbase);
+
/* start the dump at the rptr minus some history */
start = (int) rptr - NUM_DWORDS_OF_RINGBUFFER_HISTORY;
if (start < 0)
@@ -249,9 +350,19 @@
if (index == rptr)
parse_ibs = 0;
- if (parse_ibs && adreno_cmd_is_ib(rbptr[index]))
- push_object(device, SNAPSHOT_OBJ_TYPE_IB, ptbase,
- rbptr[index + 1], rbptr[index + 2]);
+ if (parse_ibs && adreno_cmd_is_ib(rbptr[index])) {
+ /*
+ * The IB from CP_IB1_BASE goes into the snapshot, all
+ * others get marked at GPU objects
+ */
+ if (rbptr[index + 1] == ibbase)
+ push_object(device, SNAPSHOT_OBJ_TYPE_IB,
+ ptbase, rbptr[index + 1],
+ rbptr[index + 2]);
+ else
+ ib_add_gpu_object(device, ptbase,
+ rbptr[index + 1], rbptr[index + 2]);
+ }
index = index + 1;
@@ -277,7 +388,6 @@
return size + sizeof(*header);
}
-/* Snapshot the memory for an indirect buffer */
static int snapshot_ib(struct kgsl_device *device, void *snapshot,
int remain, void *priv)
{
@@ -299,16 +409,19 @@
header->size = obj->dwords;
/* Write the contents of the ib */
- for (i = 0; i < obj->dwords; i++) {
+ for (i = 0; i < obj->dwords; i++, src++, dst++) {
*dst = *src;
- /* If another IB is discovered, then push it on the list too */
- if (adreno_cmd_is_ib(*src))
- push_object(device, SNAPSHOT_OBJ_TYPE_IB, obj->ptbase,
- *(src + 1), *(src + 2));
+ if (pkt_is_type3(*src)) {
+ if ((obj->dwords - i) < type3_pkt_size(*src) + 1)
+ continue;
- src++;
- dst++;
+ if (adreno_cmd_is_ib(*src))
+ push_object(device, SNAPSHOT_OBJ_TYPE_IB,
+ obj->ptbase, src[1], src[2]);
+ else
+ ib_parse_type3(device, src, obj->ptbase);
+ }
}
return (obj->dwords << 2) + sizeof(*header);
@@ -354,6 +467,8 @@
/* Reset the list of objects */
objbufptr = 0;
+ snapshot_frozen_objsize = 0;
+
/* Get the physical address of the MMU pagetable */
ptbase = kgsl_mmu_get_current_ptbase(device);
@@ -425,5 +540,9 @@
snapshot = adreno_dev->gpudev->snapshot(adreno_dev, snapshot,
remain, hang);
+ if (snapshot_frozen_objsize)
+ KGSL_DRV_ERR(device, "GPU snapshot froze %dKb of GPU buffers\n",
+ snapshot_frozen_objsize / 1024);
+
return snapshot;
}
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 50a6fab..5464bbb 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -136,6 +136,39 @@
}
}
+/* kgsl_get_mem_entry - get the mem_entry structure for the specified object
+ * @ptbase - the pagetable base of the object
+ * @gpuaddr - the GPU address of the object
+ * @size - Size of the region to search
+ */
+
+struct kgsl_mem_entry *kgsl_get_mem_entry(unsigned int ptbase,
+ unsigned int gpuaddr, unsigned int size)
+{
+ struct kgsl_process_private *priv;
+ struct kgsl_mem_entry *entry;
+
+ mutex_lock(&kgsl_driver.process_mutex);
+
+ list_for_each_entry(priv, &kgsl_driver.process_list, list) {
+ if (!kgsl_mmu_pt_equal(priv->pagetable, ptbase))
+ continue;
+ spin_lock(&priv->mem_lock);
+ entry = kgsl_sharedmem_find_region(priv, gpuaddr, size);
+
+ if (entry) {
+ spin_unlock(&priv->mem_lock);
+ mutex_unlock(&kgsl_driver.process_mutex);
+ return entry;
+ }
+ spin_unlock(&priv->mem_lock);
+ }
+ mutex_unlock(&kgsl_driver.process_mutex);
+
+ return NULL;
+}
+EXPORT_SYMBOL(kgsl_get_mem_entry);
+
static inline struct kgsl_mem_entry *
kgsl_mem_entry_create(void)
{
@@ -156,8 +189,6 @@
struct kgsl_mem_entry,
refcount);
- entry->priv->stats[entry->memtype].cur -= entry->memdesc.size;
-
if (entry->memtype != KGSL_MEM_ENTRY_KERNEL)
kgsl_driver.stats.mapped -= entry->memdesc.size;
@@ -200,6 +231,21 @@
entry->priv = process;
}
+/* Detach a memory entry from a process and unmap it from the MMU */
+
+static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry)
+{
+ if (entry == NULL)
+ return;
+
+ entry->priv->stats[entry->memtype].cur -= entry->memdesc.size;
+ entry->priv = NULL;
+
+ kgsl_mmu_unmap(entry->memdesc.pagetable, &entry->memdesc);
+
+ kgsl_mem_entry_put(entry);
+}
+
/* Allocate a new context id */
static struct kgsl_context *
@@ -593,7 +639,7 @@
list_for_each_entry_safe(entry, entry_tmp, &private->mem_list, list) {
list_del(&entry->list);
- kgsl_mem_entry_put(entry);
+ kgsl_mem_entry_detach_process(entry);
}
kgsl_mmu_putpagetable(private->pagetable);
@@ -1016,7 +1062,7 @@
list_del(&entry->list);
spin_unlock(&entry->priv->mem_lock);
trace_kgsl_mem_timestamp_free(entry, timestamp);
- kgsl_mem_entry_put(entry);
+ kgsl_mem_entry_detach_process(entry);
}
static long kgsl_ioctl_cmdstream_freememontimestamp(struct kgsl_device_private
@@ -1117,7 +1163,7 @@
if (entry) {
trace_kgsl_mem_free(entry);
- kgsl_mem_entry_put(entry);
+ kgsl_mem_entry_detach_process(entry);
} else {
KGSL_CORE_ERR("invalid gpuaddr %08x\n", param->gpuaddr);
result = -EINVAL;
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index b2fe095..06f78fc 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -126,10 +126,15 @@
#define KGSL_MEM_ENTRY_ION 4
#define KGSL_MEM_ENTRY_MAX 5
+/* List of flags */
+
+#define KGSL_MEM_ENTRY_FROZEN (1 << 0)
+
struct kgsl_mem_entry {
struct kref refcount;
struct kgsl_memdesc memdesc;
int memtype;
+ int flags;
void *priv_data;
struct list_head list;
uint32_t free_timestamp;
@@ -145,6 +150,10 @@
#endif
void kgsl_mem_entry_destroy(struct kref *kref);
+
+struct kgsl_mem_entry *kgsl_get_mem_entry(unsigned int ptbase,
+ unsigned int gpuaddr, unsigned int size);
+
struct kgsl_mem_entry *kgsl_sharedmem_find_region(
struct kgsl_process_private *private, unsigned int gpuaddr,
size_t size);
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index d7a25a1..2eacf22 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -176,6 +176,12 @@
losing the output on multiple hangs */
struct kobject snapshot_kobj;
+ /*
+ * List of GPU buffers that have been frozen in memory until they can be
+ * dumped
+ */
+ struct list_head snapshot_obj_list;
+
/* Logging levels */
int cmd_log;
int ctxt_log;
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index 671479e..8eebb77 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -605,6 +605,7 @@
memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK,
memdesc->size);
+ memdesc->gpuaddr = 0;
return 0;
}
EXPORT_SYMBOL(kgsl_mmu_unmap);
diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c
index 93fdc08..cfcb2ea 100644
--- a/drivers/gpu/msm/kgsl_snapshot.c
+++ b/drivers/gpu/msm/kgsl_snapshot.c
@@ -22,6 +22,17 @@
#include "kgsl_sharedmem.h"
#include "kgsl_snapshot.h"
+/* Placeholder for the list of memory objects frozen after a hang */
+
+struct kgsl_snapshot_object {
+ unsigned int gpuaddr;
+ unsigned int ptbase;
+ unsigned int size;
+ int type;
+ struct kgsl_mem_entry *entry;
+ struct list_head node;
+};
+
/* idr_for_each function to count the number of contexts */
static int snapshot_context_count(int id, void *ptr, void *data)
@@ -164,6 +175,199 @@
return (iregs->count * 4) + sizeof(*header);
}
+#define GPU_OBJ_HEADER_SZ \
+ (sizeof(struct kgsl_snapshot_section_header) + \
+ sizeof(struct kgsl_snapshot_gpu_object))
+
+#define GPU_OBJ_SECTION_SIZE(_o) \
+ (GPU_OBJ_HEADER_SZ + ((_o)->size))
+
+static int kgsl_snapshot_dump_object(struct kgsl_device *device,
+ struct kgsl_snapshot_object *obj, void *buf,
+ unsigned int off, unsigned int count)
+{
+ unsigned char headers[GPU_OBJ_HEADER_SZ];
+ struct kgsl_snapshot_section_header *sect =
+ (struct kgsl_snapshot_section_header *) headers;
+ struct kgsl_snapshot_gpu_object *header =
+ (struct kgsl_snapshot_gpu_object *) (headers + sizeof(*sect));
+ int ret = 0;
+
+ /* Construct a local copy of the headers */
+
+ sect->magic = SNAPSHOT_SECTION_MAGIC;
+ sect->id = KGSL_SNAPSHOT_SECTION_GPU_OBJECT;
+ sect->size = GPU_OBJ_SECTION_SIZE(obj);
+
+ header->type = obj->type;
+
+ /* Header size is in dwords, object size is in bytes */
+ header->size = obj->size >> 2;
+ header->gpuaddr = obj->gpuaddr;
+ header->ptbase = obj->ptbase;
+
+ /* Copy out any part of the header block that is needed */
+
+ if (off < GPU_OBJ_HEADER_SZ) {
+ int size = count < GPU_OBJ_HEADER_SZ - off ?
+ count : GPU_OBJ_HEADER_SZ - off;
+
+ memcpy(buf, headers + off, size);
+
+ count -= size;
+ ret += size;
+ }
+
+ /* Now copy whatever part of the data is needed */
+
+ if (off < (GPU_OBJ_HEADER_SZ + obj->size)) {
+ int offset;
+ int size = count < obj->size ? count : obj->size;
+
+ /*
+ * If the desired gpuaddr isn't at the beginning of the region,
+ * then offset the source pointer
+ */
+
+ offset = obj->gpuaddr - obj->entry->memdesc.gpuaddr;
+
+ /*
+ * Then adjust it to account for the offset for the output
+ * buffer.
+ */
+
+ if (off > GPU_OBJ_HEADER_SZ) {
+ int loff = (off - GPU_OBJ_HEADER_SZ);
+
+ /* Adjust the size so we don't walk off the end */
+
+ if ((loff + size) > obj->size)
+ size = obj->size - loff;
+
+ offset += loff;
+ }
+
+ memcpy(buf + ret, obj->entry->memdesc.hostptr + offset, size);
+ ret += size;
+ }
+
+ return ret;
+}
+
+static void kgsl_snapshot_put_object(struct kgsl_device *device,
+ struct kgsl_snapshot_object *obj)
+{
+ list_del(&obj->node);
+
+ obj->entry->flags &= ~KGSL_MEM_ENTRY_FROZEN;
+ kgsl_mem_entry_put(obj->entry);
+
+ kfree(obj);
+}
+
+/* kgsl_snapshot_get_object - Mark a GPU buffer to be frozen
+ * @device - the device that is being snapshotted
+ * @ptbase - the pagetable base of the object to freeze
+ * @gpuaddr - The gpu address of the object to freeze
+ * @size - the size of the object (may not always be the size of the region)
+ * @type - the type of object being saved (shader, vbo, etc)
+ *
+ * Mark and freeze a GPU buffer object. This will prevent it from being
+ * freed until it can be copied out as part of the snapshot dump. Returns the
+ * size of the object being frozen
+ */
+
+int kgsl_snapshot_get_object(struct kgsl_device *device, unsigned int ptbase,
+ unsigned int gpuaddr, unsigned int size, unsigned int type)
+{
+ struct kgsl_mem_entry *entry;
+ struct kgsl_snapshot_object *obj;
+ int offset;
+
+ entry = kgsl_get_mem_entry(ptbase, gpuaddr, size);
+
+ if (entry == NULL) {
+ KGSL_DRV_ERR(device, "Unable to find GPU buffer %8.8X\n",
+ gpuaddr);
+ return 0;
+ }
+
+ /* We can't freeze external memory, because we don't own it */
+ if (entry->memtype != KGSL_MEM_ENTRY_KERNEL) {
+ KGSL_DRV_ERR(device,
+ "Only internal GPU buffers can be frozen\n");
+ return 0;
+ }
+
+ /*
+ * size indicates the number of bytes in the region to save. This might
+ * not always be the entire size of the region because some buffers are
+ * sub-allocated from a larger region. However, if size 0 was passed
+ * thats a flag that the caller wants to capture the entire buffer
+ */
+
+ if (size == 0) {
+ size = entry->memdesc.size;
+ offset = 0;
+
+ /* Adjust the gpuaddr to the start of the object */
+ gpuaddr = entry->memdesc.gpuaddr;
+ } else {
+ offset = gpuaddr - entry->memdesc.gpuaddr;
+ }
+
+ if (size + offset > entry->memdesc.size) {
+ KGSL_DRV_ERR(device, "Invalid size for GPU buffer %8.8X\n",
+ gpuaddr);
+ return 0;
+ }
+
+ /* If the buffer is already on the list, skip it */
+ list_for_each_entry(obj, &device->snapshot_obj_list, node) {
+ if (obj->gpuaddr == gpuaddr && obj->ptbase == ptbase) {
+ /* If the size is different, use the new size */
+ if (obj->size != size)
+ obj->size = size;
+
+ return 0;
+ }
+ }
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+
+ if (obj == NULL) {
+ KGSL_DRV_ERR(device, "Unable to allocate memory\n");
+ return 0;
+ }
+
+ /* Ref count the mem entry */
+ kgsl_mem_entry_get(entry);
+
+ obj->type = type;
+ obj->entry = entry;
+ obj->gpuaddr = gpuaddr;
+ obj->ptbase = ptbase;
+ obj->size = size;
+
+ list_add(&obj->node, &device->snapshot_obj_list);
+
+ /*
+ * Return the size of the entire mem entry that was frozen - this gets
+ * used for tracking how much memory is frozen for a hang. Also, mark
+ * the memory entry as frozen. If the entry was already marked as
+ * frozen, then another buffer already got to it. In that case, return
+ * 0 so it doesn't get counted twice
+ */
+
+ if (entry->flags & KGSL_MEM_ENTRY_FROZEN)
+ return 0;
+
+ entry->flags |= KGSL_MEM_ENTRY_FROZEN;
+
+ return entry->memdesc.size;
+}
+EXPORT_SYMBOL(kgsl_snapshot_get_object);
+
/*
* kgsl_snapshot_dump_regs - helper function to dump device registers
* @device - the device to dump registers from
@@ -288,10 +492,6 @@
snapshot = device->ftbl->snapshot(device, snapshot, &remain,
hang);
- /* Add the empty end section to let the parser know we are done */
- snapshot = kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_END,
- snapshot, &remain, NULL, NULL);
-
device->snapshot_timestamp = get_seconds();
device->snapshot_size = (int) (snapshot - device->snapshot);
@@ -326,6 +526,8 @@
size_t count)
{
struct kgsl_device *device = kobj_to_device(kobj);
+ struct kgsl_snapshot_object *obj, *tmp;
+ unsigned int size, src, dst = 0;
if (device == NULL)
return 0;
@@ -337,25 +539,80 @@
/* Get the mutex to keep things from changing while we are dumping */
mutex_lock(&device->mutex);
- /*
- * Release the freeze on the snapshot the first time the buffer is read
- */
+ if (off < device->snapshot_size) {
+ size = count < (device->snapshot_size - off) ?
+ count : device->snapshot_size - off;
+
+ memcpy(buf, device->snapshot + off, size);
+
+ count -= size;
+ dst += size;
+ }
+
+ if (count == 0)
+ goto done;
+
+ src = device->snapshot_size;
+
+ list_for_each_entry(obj, &device->snapshot_obj_list, node) {
+
+ int objsize = GPU_OBJ_SECTION_SIZE(obj);
+ int offset;
+
+ /* If the offset is beyond this object, then move on */
+
+ if (off >= (src + objsize)) {
+ src += objsize;
+ continue;
+ }
+
+ /* Adjust the offset to be relative to the object */
+ offset = (off >= src) ? (off - src) : 0;
+
+ size = kgsl_snapshot_dump_object(device, obj, buf + dst,
+ offset, count);
+
+ count -= size;
+ dst += size;
+
+ if (count == 0)
+ goto done;
+
+ /* Move on to the next object - update src accordingly */
+ src += objsize;
+ }
+
+ /* Add the end section */
+
+ if (off < (src + sizeof(struct kgsl_snapshot_section_header))) {
+ if (count >= sizeof(struct kgsl_snapshot_section_header)) {
+ struct kgsl_snapshot_section_header *head =
+ (void *) (buf + dst);
+
+ head->magic = SNAPSHOT_SECTION_MAGIC;
+ head->id = KGSL_SNAPSHOT_SECTION_END;
+ head->size = sizeof(*head);
+
+ dst += sizeof(*head);
+ } else {
+ goto done;
+ }
+ }
+
+ /* Release the buffers and unfreeze the snapshot */
+
+ list_for_each_entry_safe(obj, tmp, &device->snapshot_obj_list, node)
+ kgsl_snapshot_put_object(device, obj);
+
+ if (device->snapshot_frozen)
+ KGSL_DRV_ERR(device, "Snapshot objects released\n");
device->snapshot_frozen = 0;
- if (off >= device->snapshot_size) {
- count = 0;
- goto exit;
- }
-
- if (off + count > device->snapshot_size)
- count = device->snapshot_size - off;
-
- memcpy(buf, device->snapshot + off, count);
-
-exit:
+done:
mutex_unlock(&device->mutex);
- return count;
+
+ return dst;
}
/* Show the timestamp of the last collected snapshot */
@@ -459,6 +716,8 @@
device->snapshot_maxsize = KGSL_SNAPSHOT_MEMSIZE;
device->snapshot_timestamp = 0;
+ INIT_LIST_HEAD(&device->snapshot_obj_list);
+
ret = kobject_init_and_add(&device->snapshot_kobj, &ktype_snapshot,
&device->dev->kobj, "snapshot");
if (ret)
diff --git a/drivers/gpu/msm/kgsl_snapshot.h b/drivers/gpu/msm/kgsl_snapshot.h
index 3b72b0f..bd5be74 100644
--- a/drivers/gpu/msm/kgsl_snapshot.h
+++ b/drivers/gpu/msm/kgsl_snapshot.h
@@ -48,6 +48,8 @@
#define KGSL_SNAPSHOT_SECTION_ISTORE 0x0801
#define KGSL_SNAPSHOT_SECTION_DEBUG 0x0901
#define KGSL_SNAPSHOT_SECTION_DEBUGBUS 0x0A01
+#define KGSL_SNAPSHOT_SECTION_GPU_OBJECT 0x0B01
+
#define KGSL_SNAPSHOT_SECTION_END 0xFFFF
/* OS sub-section header */
@@ -149,6 +151,16 @@
int count; /* Number of dwords in the dump */
} __packed;
+#define SNAPSHOT_GPU_OBJECT_SHADER 1
+#define SNAPSHOT_GPU_OBJECT_IB 2
+
+struct kgsl_snapshot_gpu_object {
+ int type; /* Type of GPU object */
+ __u32 gpuaddr; /* GPU address of the the object */
+ __u32 ptbase; /* Base for the pagetable the GPU address is valid in */
+ int size; /* Size of the object (in dwords) */
+};
+
#ifdef __KERNEL__
/* Allocate 512K for each device snapshot */
@@ -272,6 +284,9 @@
void *snapshot, int *remain, unsigned int index,
unsigned int data, unsigned int start, unsigned int count);
+/* Freeze a GPU buffer so it can be dumped in the snapshot */
+int kgsl_snapshot_get_object(struct kgsl_device *device, unsigned int ptbase,
+ unsigned int gpuaddr, unsigned int size, unsigned int type);
#endif
#endif
diff --git a/drivers/media/video/msm/csi/msm_csid.c b/drivers/media/video/msm/csi/msm_csid.c
index d7abaa9..e531089 100644
--- a/drivers/media/video/msm/csi/msm_csid.c
+++ b/drivers/media/video/msm/csi/msm_csid.c
@@ -51,8 +51,8 @@
#define CSID_TG_DT_n_CFG_1_ADDR 0xAC
#define CSID_TG_DT_n_CFG_2_ADDR 0xB0
#define CSID_TG_DT_n_CFG_3_ADDR 0xD8
-
-#define DBG_CSID 0
+#define CSID_RST_DONE_IRQ_BITSHIFT 11
+#define CSID_RST_STB_ALL 0x7FFF
static int msm_csid_cid_lut(
struct msm_camera_csid_lut_params *csid_lut_params,
@@ -92,6 +92,7 @@
csid_dev = v4l2_get_subdevdata(cfg_params->subdev);
csidbase = csid_dev->base;
csid_params = cfg_params->parms;
+
val = csid_params->lane_cnt - 1;
val |= csid_params->lane_assign << 2;
val |= 0x1 << 10;
@@ -105,8 +106,9 @@
if (rc < 0)
return rc;
- msm_io_w(0x7fF10800, csidbase + CSID_IRQ_MASK_ADDR);
- msm_io_w(0x7fF10800, csidbase + CSID_IRQ_CLEAR_CMD_ADDR);
+ val = ((1 << csid_params->lane_cnt) - 1) << 20;
+ msm_io_w(0x7f010800 | val, csidbase + CSID_IRQ_MASK_ADDR);
+ msm_io_w(0x7f010800 | val, csidbase + CSID_IRQ_CLEAR_CMD_ADDR);
msleep(20);
return rc;
@@ -119,10 +121,19 @@
irq = msm_io_r(csid_dev->base + CSID_IRQ_STATUS_ADDR);
CDBG("%s CSID%d_IRQ_STATUS_ADDR = 0x%x\n",
__func__, csid_dev->pdev->id, irq);
+ if (irq & (0x1 << CSID_RST_DONE_IRQ_BITSHIFT))
+ complete(&csid_dev->reset_complete);
msm_io_w(irq, csid_dev->base + CSID_IRQ_CLEAR_CMD_ADDR);
return IRQ_HANDLED;
}
+static void msm_csid_reset(struct csid_device *csid_dev)
+{
+ msm_io_w(CSID_RST_STB_ALL, csid_dev->base + CSID_RST_CMD_ADDR);
+ wait_for_completion_interruptible(&csid_dev->reset_complete);
+ return;
+}
+
static int msm_csid_subdev_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *chip)
{
@@ -185,9 +196,11 @@
msm_io_r(csid_dev->base + CSID_HW_VERSION_ADDR);
*csid_version = csid_dev->hw_version;
-#if DBG_CSID
+ init_completion(&csid_dev->reset_complete);
+
enable_irq(csid_dev->irq->start);
-#endif
+
+ msm_csid_reset(csid_dev);
return 0;
clk_enable_failed:
@@ -206,9 +219,7 @@
struct csid_device *csid_dev;
csid_dev = v4l2_get_subdevdata(sd);
-#if DBG_CSID
disable_irq(csid_dev->irq->start);
-#endif
msm_cam_clk_enable(&csid_dev->pdev->dev, csid_clk_info,
csid_dev->csid_clk, ARRAY_SIZE(csid_clk_info), 0);
diff --git a/drivers/media/video/msm/csi/msm_csid.h b/drivers/media/video/msm/csi/msm_csid.h
index 105cd49..2eae49c 100644
--- a/drivers/media/video/msm/csi/msm_csid.h
+++ b/drivers/media/video/msm/csi/msm_csid.h
@@ -26,6 +26,7 @@
struct regulator *csi_vdd;
void __iomem *base;
struct mutex mutex;
+ struct completion reset_complete;
uint32_t hw_version;
struct clk *csid_clk[5];
diff --git a/drivers/media/video/msm/csi/msm_csiphy.c b/drivers/media/video/msm/csi/msm_csiphy.c
index a30dc29..0c1e0a4 100644
--- a/drivers/media/video/msm/csi/msm_csiphy.c
+++ b/drivers/media/video/msm/csi/msm_csiphy.c
@@ -38,6 +38,8 @@
#define MIPI_CSIPHY_LNCK_MISC1_ADDR 0x128
#define MIPI_CSIPHY_GLBL_T_INIT_CFG0_ADDR 0x1E0
#define MIPI_CSIPHY_T_WAKEUP_CFG0_ADDR 0x1E8
+#define MIPI_CSIPHY_T_WAKEUP_CFG1_ADDR 0x1EC
+#define MIPI_CSIPHY_GLBL_RESET_ADDR 0x0140
#define MIPI_CSIPHY_GLBL_PWR_CFG_ADDR 0x0144
#define MIPI_CSIPHY_INTERRUPT_STATUS0_ADDR 0x0180
#define MIPI_CSIPHY_INTERRUPT_STATUS1_ADDR 0x0184
@@ -58,14 +60,17 @@
int msm_csiphy_config(struct csiphy_cfg_params *cfg_params)
{
int rc = 0;
- int i = 0;
+ int j = 0;
uint32_t val = 0;
+ uint8_t lane_cnt = 0, lane_mask = 0;
struct csiphy_device *csiphy_dev;
struct msm_camera_csiphy_params *csiphy_params;
void __iomem *csiphybase;
csiphy_dev = v4l2_get_subdevdata(cfg_params->subdev);
csiphybase = csiphy_dev->base;
csiphy_params = cfg_params->parms;
+ lane_mask = csiphy_params->lane_mask;
+ lane_cnt = csiphy_params->lane_cnt;
if (csiphy_params->lane_cnt < 1 || csiphy_params->lane_cnt > 4) {
CDBG("%s: unsupported lane cnt %d\n",
__func__, csiphy_params->lane_cnt);
@@ -73,15 +78,28 @@
}
val = 0x3;
- msm_io_w((((1 << csiphy_params->lane_cnt) - 1) << 2) | val,
- csiphybase + MIPI_CSIPHY_GLBL_PWR_CFG_ADDR);
+ msm_io_w((csiphy_params->lane_mask << 2) | val,
+ csiphybase + MIPI_CSIPHY_GLBL_PWR_CFG_ADDR);
msm_io_w(0x1, csiphybase + MIPI_CSIPHY_GLBL_T_INIT_CFG0_ADDR);
msm_io_w(0x1, csiphybase + MIPI_CSIPHY_T_WAKEUP_CFG0_ADDR);
- for (i = 0; i < csiphy_params->lane_cnt; i++) {
- msm_io_w(0x10, csiphybase + MIPI_CSIPHY_LNn_CFG2_ADDR + 0x40*i);
+ while (lane_mask & 0xf) {
+ if (!(lane_mask & 0x1)) {
+ j++;
+ lane_mask >>= 1;
+ continue;
+ }
+ msm_io_w(0x10, csiphybase + MIPI_CSIPHY_LNn_CFG2_ADDR + 0x40*j);
msm_io_w(csiphy_params->settle_cnt,
- csiphybase + MIPI_CSIPHY_LNn_CFG3_ADDR + 0x40*i);
+ csiphybase + MIPI_CSIPHY_LNn_CFG3_ADDR + 0x40*j);
+ msm_io_w(0x6F,
+ csiphybase + MIPI_CSIPHY_INTERRUPT_MASK0_ADDR +
+ 0x4*(j+1));
+ msm_io_w(0x6F,
+ csiphybase + MIPI_CSIPHY_INTERRUPT_CLEAR0_ADDR +
+ 0x4*(j+1));
+ j++;
+ lane_mask >>= 1;
}
msm_io_w(0x10, csiphybase + MIPI_CSIPHY_LNCK_CFG2_ADDR);
@@ -92,13 +110,6 @@
csiphybase + MIPI_CSIPHY_INTERRUPT_MASK0_ADDR);
msm_io_w(0x24,
csiphybase + MIPI_CSIPHY_INTERRUPT_CLEAR0_ADDR);
-
- for (i = 1; i <= csiphy_params->lane_cnt; i++) {
- msm_io_w(0x6F,
- csiphybase + MIPI_CSIPHY_INTERRUPT_MASK0_ADDR + 0x4*i);
- msm_io_w(0x6F,
- csiphybase + MIPI_CSIPHY_INTERRUPT_CLEAR0_ADDR + 0x4*i);
- }
return rc;
}
@@ -131,6 +142,13 @@
return IRQ_HANDLED;
}
+static void msm_csiphy_reset(struct csiphy_device *csiphy_dev)
+{
+ msm_io_w(0x1, csiphy_dev->base + MIPI_CSIPHY_GLBL_RESET_ADDR);
+ usleep_range(5000, 8000);
+ msm_io_w(0x0, csiphy_dev->base + MIPI_CSIPHY_GLBL_RESET_ADDR);
+}
+
static int msm_csiphy_subdev_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *chip)
{
@@ -173,6 +191,7 @@
#if DBG_CSIPHY
enable_irq(csiphy_dev->irq->start);
#endif
+ msm_csiphy_reset(csiphy_dev);
return 0;
}
diff --git a/drivers/media/video/msm/sensors/imx074_v4l2.c b/drivers/media/video/msm/sensors/imx074_v4l2.c
index 92700f6..af9d1f4 100644
--- a/drivers/media/video/msm/sensors/imx074_v4l2.c
+++ b/drivers/media/video/msm/sensors/imx074_v4l2.c
@@ -175,7 +175,6 @@
static struct msm_camera_csi2_params imx074_csi_params = {
.csid_params = {
- .lane_assign = 0xe4,
.lane_cnt = 4,
.lut_params = {
.num_cid = 2,
diff --git a/drivers/media/video/msm/sensors/msm_sensor.c b/drivers/media/video/msm/sensors/msm_sensor.c
index e40a528..1112be7 100644
--- a/drivers/media/video/msm/sensors/msm_sensor.c
+++ b/drivers/media/video/msm/sensors/msm_sensor.c
@@ -321,6 +321,12 @@
msm_sensor_write_res_settings(s_ctrl, res);
if (s_ctrl->curr_csi_params != s_ctrl->csi_params[res]) {
s_ctrl->curr_csi_params = s_ctrl->csi_params[res];
+ s_ctrl->curr_csi_params->csid_params.lane_assign =
+ s_ctrl->sensordata->sensor_platform_info->
+ csi_lane_params->csi_lane_assign;
+ s_ctrl->curr_csi_params->csiphy_params.lane_mask =
+ s_ctrl->sensordata->sensor_platform_info->
+ csi_lane_params->csi_lane_mask;
v4l2_subdev_notify(&s_ctrl->sensor_v4l2_subdev,
NOTIFY_CSID_CFG,
&s_ctrl->curr_csi_params->csid_params);
diff --git a/drivers/media/video/msm/sensors/mt9m114_v4l2.c b/drivers/media/video/msm/sensors/mt9m114_v4l2.c
index a1e56f7..2184806 100644
--- a/drivers/media/video/msm/sensors/mt9m114_v4l2.c
+++ b/drivers/media/video/msm/sensors/mt9m114_v4l2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1191,7 +1191,6 @@
static struct msm_camera_csi2_params mt9m114_csi_params = {
.csid_params = {
- .lane_assign = 0xe4,
.lane_cnt = 1,
.lut_params = {
.num_cid = 2,
diff --git a/drivers/media/video/msm/sensors/ov2720.c b/drivers/media/video/msm/sensors/ov2720.c
index 246900e..05556eb 100644
--- a/drivers/media/video/msm/sensors/ov2720.c
+++ b/drivers/media/video/msm/sensors/ov2720.c
@@ -341,7 +341,6 @@
static struct msm_camera_csi2_params ov2720_csi_params = {
.csid_params = {
- .lane_assign = 0xe4,
.lane_cnt = 2,
.lut_params = {
.num_cid = 2,
diff --git a/drivers/media/video/msm/sensors/s5k3l1yx.c b/drivers/media/video/msm/sensors/s5k3l1yx.c
index af68601..4a790f8 100644
--- a/drivers/media/video/msm/sensors/s5k3l1yx.c
+++ b/drivers/media/video/msm/sensors/s5k3l1yx.c
@@ -542,7 +542,6 @@
static struct msm_camera_csi2_params s5k3l1yx_csi_params = {
.csid_params = {
- .lane_assign = 0xe4,
.lane_cnt = 4,
.lut_params = {
.num_cid = ARRAY_SIZE(s5k3l1yx_cid_cfg),
diff --git a/drivers/misc/tzcom.c b/drivers/misc/tzcom.c
index 2b1484c..3b943c8 100644
--- a/drivers/misc/tzcom.c
+++ b/drivers/misc/tzcom.c
@@ -29,9 +29,13 @@
#include <linux/android_pmem.h>
#include <linux/io.h>
#include <linux/ion.h>
+#include <linux/tzcom.h>
+#include <linux/clk.h>
#include <mach/scm.h>
#include <mach/peripheral-loader.h>
-#include <linux/tzcom.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
+#include <mach/socinfo.h>
#include "tzcomi.h"
#define TZCOM_DEV "tzcom"
@@ -51,6 +55,7 @@
__func__, current->pid, current->comm, ## args)
+static uint32_t tzcom_perf_client;
static struct class *driver_class;
static dev_t tzcom_device_no;
static struct cdev tzcom_cdev;
@@ -68,7 +73,9 @@
static DEFINE_MUTEX(sb_in_lock);
static DEFINE_MUTEX(sb_out_lock);
static DEFINE_MUTEX(send_cmd_lock);
-
+static DEFINE_MUTEX(tzcom_bw_mutex);
+static int tzcom_bw_count;
+static struct clk *tzcom_bus_clk;
struct tzcom_callback_list {
struct list_head list;
struct tzcom_callback callback;
@@ -94,6 +101,53 @@
atomic_t ioctl_count;
};
+static int tzcom_enable_bus_scaling(void)
+{
+ int ret = 0;
+ if (!tzcom_perf_client)
+ return -EINVAL;
+
+ if (IS_ERR_OR_NULL(tzcom_bus_clk))
+ return -EINVAL;
+
+ mutex_lock(&tzcom_bw_mutex);
+ if (!tzcom_bw_count) {
+ ret = msm_bus_scale_client_update_request(
+ tzcom_perf_client, 1);
+ if (ret) {
+ pr_err("Bandwidth request failed (%d)\n", ret);
+ } else {
+ ret = clk_enable(tzcom_bus_clk);
+ if (ret)
+ pr_err("Clock enable failed\n");
+ }
+ }
+ if (ret)
+ msm_bus_scale_client_update_request(tzcom_perf_client, 0);
+ else
+ tzcom_bw_count++;
+ mutex_unlock(&tzcom_bw_mutex);
+ return ret;
+}
+
+static void tzcom_disable_bus_scaling(void)
+{
+ if (!tzcom_perf_client)
+ return ;
+
+ if (IS_ERR_OR_NULL(tzcom_bus_clk))
+ return ;
+
+ mutex_lock(&tzcom_bw_mutex);
+ if (tzcom_bw_count > 0)
+ if (tzcom_bw_count-- == 1) {
+ msm_bus_scale_client_update_request(tzcom_perf_client,
+ 0);
+ clk_disable(tzcom_bus_clk);
+ }
+ mutex_unlock(&tzcom_bw_mutex);
+}
+
static int tzcom_scm_call(const void *cmd_buf, size_t cmd_len,
void *resp_buf, size_t resp_len)
{
@@ -878,6 +932,9 @@
struct tzcom_data_t *tzcom_data;
PDEBUG("In here");
+
+ ret = tzcom_enable_bus_scaling();
+
if (pil == NULL) {
pil = pil_get("tzapps");
if (IS_ERR(pil)) {
@@ -1008,9 +1065,39 @@
}
PDEBUG("Freeing tzcom data");
kfree(tzcom_data);
+ tzcom_disable_bus_scaling();
return 0;
}
+static struct msm_bus_paths tzcom_bw_table[] = {
+ {
+ .vectors = (struct msm_bus_vectors[]){
+ {
+ .src = MSM_BUS_MASTER_SPS,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ },
+ },
+ .num_paths = 1,
+ },
+ {
+ .vectors = (struct msm_bus_vectors[]){
+ {
+ .src = MSM_BUS_MASTER_SPS,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ib = (492 * 8) * 1000000UL,
+ .ab = (492 * 8) * 100000UL,
+ },
+ },
+ .num_paths = 1,
+ },
+
+};
+
+static struct msm_bus_scale_pdata tzcom_bus_pdata = {
+ .usecase = tzcom_bw_table,
+ .num_usecases = ARRAY_SIZE(tzcom_bw_table),
+ .name = "tzcom",
+};
static const struct file_operations tzcom_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = tzcom_ioctl,
@@ -1098,6 +1185,18 @@
/* Initialized in tzcom_open */
pil = NULL;
+ tzcom_perf_client = msm_bus_scale_register_client(
+ &tzcom_bus_pdata);
+ if (!tzcom_perf_client)
+ pr_err("Unable to register bus client");
+
+ tzcom_bus_clk = clk_get(class_dev, "bus_clk");
+ if (IS_ERR(tzcom_bus_clk)) {
+ tzcom_bus_clk = NULL;
+ } else if (tzcom_bus_clk != NULL) {
+ pr_debug("Enabled DFAB clock\n");
+ clk_set_rate(tzcom_bus_clk, 64000000);
+ }
return 0;
class_device_destroy:
@@ -1132,6 +1231,7 @@
pil_put(pil);
pil = NULL;
}
+ clk_put(tzcom_bus_clk);
device_destroy(driver_class, tzcom_device_no);
class_destroy(driver_class);
unregister_chrdev_region(tzcom_device_no, 1);
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index ff12eb1..c5f4498 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -4732,6 +4732,14 @@
return 0;
}
+static void msmsdcc_shutdown(struct platform_device *pdev)
+{
+ struct mmc_host *mmc = mmc_get_drvdata(pdev);
+
+ mmc_remove_host(mmc);
+ mmc_free_host(mmc);
+}
+
#ifdef CONFIG_MSM_SDIO_AL
int msmsdcc_sdio_al_lpm(struct mmc_host *mmc, bool enable)
{
@@ -5030,6 +5038,7 @@
static struct platform_driver msmsdcc_driver = {
.probe = msmsdcc_probe,
.remove = msmsdcc_remove,
+ .shutdown = msmsdcc_shutdown,
.driver = {
.name = "msm_sdcc",
.pm = &msmsdcc_dev_pm_ops,
diff --git a/drivers/power/pm8921-charger.c b/drivers/power/pm8921-charger.c
index 78757f9..9b8ea48 100644
--- a/drivers/power/pm8921-charger.c
+++ b/drivers/power/pm8921-charger.c
@@ -258,10 +258,10 @@
struct delayed_work eoc_work;
struct work_struct unplug_ovp_fet_open_work;
struct delayed_work unplug_check_work;
- struct wake_lock unplug_ovp_fet_open_wake_lock;
struct wake_lock eoc_wake_lock;
enum pm8921_chg_cold_thr cold_thr;
enum pm8921_chg_hot_thr hot_thr;
+ int rconn_mohm;
};
static int usb_max_current;
@@ -1728,6 +1728,7 @@
chip->usb_present = usb_present;
power_supply_changed(&chip->usb_psy);
power_supply_changed(&chip->batt_psy);
+ pm8921_bms_calibrate_hkadc();
}
if (usb_present) {
schedule_delayed_work(&chip->unplug_check_work,
@@ -2389,6 +2390,80 @@
round_jiffies_relative(msecs_to_jiffies
(chip->update_time)));
}
+#define VDD_LOOP_ACTIVE_BIT BIT(3)
+#define VDD_MAX_INCREASE_MV 400
+static int vdd_max_increase_mv = VDD_MAX_INCREASE_MV;
+module_param(vdd_max_increase_mv, int, 0644);
+
+static int ichg_threshold_ua = -400000;
+module_param(ichg_threshold_ua, int, 0644);
+static void adjust_vdd_max_for_fastchg(struct pm8921_chg_chip *chip)
+{
+ int ichg_meas_ua, vbat_uv;
+ int ichg_meas_ma;
+ int adj_vdd_max_mv, programmed_vdd_max;
+ int vbat_batt_terminal_uv;
+ int vbat_batt_terminal_mv;
+ int reg_loop;
+ int delta_mv = 0;
+
+ if (chip->rconn_mohm == 0) {
+ pr_debug("Exiting as rconn_mohm is 0\n");
+ return;
+ }
+ /* adjust vdd_max only in normal temperature zone */
+ if (chip->is_bat_cool || chip->is_bat_warm) {
+ pr_debug("Exiting is_bat_cool = %d is_batt_warm = %d\n",
+ chip->is_bat_cool, chip->is_bat_warm);
+ return;
+ }
+
+ reg_loop = pm_chg_get_regulation_loop(chip);
+ if (!(reg_loop & VDD_LOOP_ACTIVE_BIT)) {
+ pr_debug("Exiting Vdd loop is not active reg loop=0x%x\n",
+ reg_loop);
+ return;
+ }
+
+ pm8921_bms_get_simultaneous_battery_voltage_and_current(&ichg_meas_ua,
+ &vbat_uv);
+ if (ichg_meas_ua >= 0) {
+ pr_debug("Exiting ichg_meas_ua = %d > 0\n", ichg_meas_ua);
+ return;
+ }
+ if (ichg_meas_ua <= ichg_threshold_ua) {
+ pr_debug("Exiting ichg_meas_ua = %d < ichg_threshold_ua = %d\n",
+ ichg_meas_ua, ichg_threshold_ua);
+ return;
+ }
+ ichg_meas_ma = ichg_meas_ua / 1000;
+
+ /* rconn_mohm is in milliOhms */
+ vbat_batt_terminal_uv = vbat_uv + ichg_meas_ma * the_chip->rconn_mohm;
+ vbat_batt_terminal_mv = vbat_batt_terminal_uv/1000;
+ pm_chg_vddmax_get(the_chip, &programmed_vdd_max);
+
+ delta_mv = chip->max_voltage_mv - vbat_batt_terminal_mv;
+
+ adj_vdd_max_mv = programmed_vdd_max + delta_mv;
+ pr_debug("vdd_max needs to be changed by %d mv from %d to %d\n",
+ delta_mv,
+ programmed_vdd_max,
+ adj_vdd_max_mv);
+
+ if (adj_vdd_max_mv < chip->max_voltage_mv) {
+ pr_debug("adj vdd_max lower than default max voltage\n");
+ return;
+ }
+
+ if (adj_vdd_max_mv > (chip->max_voltage_mv + vdd_max_increase_mv))
+ adj_vdd_max_mv = chip->max_voltage_mv + vdd_max_increase_mv;
+
+ pr_debug("adjusting vdd_max_mv to %d to make "
+ "vbat_batt_termial_uv = %d to %d\n",
+ adj_vdd_max_mv, vbat_batt_terminal_uv, chip->max_voltage_mv);
+ pm_chg_vddmax_set(chip, adj_vdd_max_mv);
+}
enum {
CHG_IN_PROGRESS,
@@ -2442,8 +2517,6 @@
}
pr_debug("vddmax = %d vbat_meas_mv=%d\n",
vbat_programmed, vbat_meas_mv);
- if (vbat_meas_mv < vbat_programmed - VBAT_TOLERANCE_MV)
- return CHG_IN_PROGRESS;
if (last_vbat_programmed == -EINVAL)
last_vbat_programmed = vbat_programmed;
@@ -2455,10 +2528,6 @@
return CHG_IN_PROGRESS;
}
- /*
- * TODO if charging from an external charger
- * check SOC instead of regulation loop
- */
regulation_loop = pm_chg_get_regulation_loop(chip);
if (regulation_loop < 0) {
pr_err("couldnt read the regulation loop err=%d\n",
@@ -2518,10 +2587,9 @@
end = is_charging_finished(chip);
if (end == CHG_NOT_IN_PROGRESS) {
- /* enable fastchg irq */
- count = 0;
- wake_unlock(&chip->eoc_wake_lock);
- return;
+ count = 0;
+ wake_unlock(&chip->eoc_wake_lock);
+ return;
}
if (end == CHG_FINISHED) {
@@ -2547,6 +2615,7 @@
chgdone_irq_handler(chip->pmic_chg_irq[CHGDONE_IRQ], chip);
wake_unlock(&chip->eoc_wake_lock);
} else {
+ adjust_vdd_max_for_fastchg(chip);
pr_debug("EOC count = %d\n", count);
schedule_delayed_work(&chip->eoc_work,
round_jiffies_relative(msecs_to_jiffies
@@ -2687,6 +2756,23 @@
module_param_call(disabled, set_disable_status_param, param_get_uint,
&charging_disabled, 0644);
+static int rconn_mohm;
+static int set_rconn_mohm(const char *val, struct kernel_param *kp)
+{
+ int ret;
+ struct pm8921_chg_chip *chip = the_chip;
+
+ ret = param_set_int(val, kp);
+ if (ret) {
+ pr_err("error setting value %d\n", ret);
+ return ret;
+ }
+ if (chip)
+ chip->rconn_mohm = rconn_mohm;
+ return 0;
+}
+module_param_call(rconn_mohm, set_rconn_mohm, param_get_uint,
+ &rconn_mohm, 0644);
/**
* set_thermal_mitigation_level -
*
@@ -3004,6 +3090,7 @@
static int __devinit pm8921_chg_hw_init(struct pm8921_chg_chip *chip)
{
int rc;
+ int vdd_safe;
rc = pm_chg_masked_write(chip, SYS_CONFIG_2,
BOOT_DONE_BIT, BOOT_DONE_BIT);
@@ -3012,7 +3099,13 @@
return rc;
}
- rc = pm_chg_vddsafe_set(chip, chip->max_voltage_mv);
+ vdd_safe = chip->max_voltage_mv + VDD_MAX_INCREASE_MV;
+
+ if (vdd_safe > PM8921_CHG_VDDSAFE_MAX)
+ vdd_safe = PM8921_CHG_VDDSAFE_MAX;
+
+ rc = pm_chg_vddsafe_set(chip, vdd_safe);
+
if (rc) {
pr_err("Failed to set safe voltage to %d rc=%d\n",
chip->max_voltage_mv, rc);
@@ -3505,6 +3598,7 @@
chip->cold_thr = pdata->cold_thr;
chip->hot_thr = pdata->hot_thr;
+ chip->rconn_mohm = pdata->rconn_mohm;
rc = pm8921_chg_hw_init(chip);
if (rc) {
@@ -3556,8 +3650,6 @@
the_chip = chip;
wake_lock_init(&chip->eoc_wake_lock, WAKE_LOCK_SUSPEND, "pm8921_eoc");
- wake_lock_init(&chip->unplug_ovp_fet_open_wake_lock,
- WAKE_LOCK_SUSPEND, "pm8921_unplug_wrkarnd");
INIT_DELAYED_WORK(&chip->eoc_work, eoc_worker);
INIT_WORK(&chip->unplug_ovp_fet_open_work,
unplug_ovp_fet_open_worker);
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index 7a9d57d..8a97a6e 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -15,169 +15,43 @@
#ifndef __GADGET_CHIPS_H
#define __GADGET_CHIPS_H
-#ifdef CONFIG_USB_GADGET_NET2280
-#define gadget_is_net2280(g) !strcmp("net2280", (g)->name)
-#else
-#define gadget_is_net2280(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_AMD5536UDC
-#define gadget_is_amd5536udc(g) !strcmp("amd5536udc", (g)->name)
-#else
-#define gadget_is_amd5536udc(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_DUMMY_HCD
-#define gadget_is_dummy(g) !strcmp("dummy_udc", (g)->name)
-#else
-#define gadget_is_dummy(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_PXA25X
-#define gadget_is_pxa(g) !strcmp("pxa25x_udc", (g)->name)
-#else
-#define gadget_is_pxa(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_GOKU
-#define gadget_is_goku(g) !strcmp("goku_udc", (g)->name)
-#else
-#define gadget_is_goku(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_OMAP
-#define gadget_is_omap(g) !strcmp("omap_udc", (g)->name)
-#else
-#define gadget_is_omap(g) 0
-#endif
-
-/* various unstable versions available */
-#ifdef CONFIG_USB_GADGET_PXA27X
-#define gadget_is_pxa27x(g) !strcmp("pxa27x_udc", (g)->name)
-#else
-#define gadget_is_pxa27x(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_ATMEL_USBA
-#define gadget_is_atmel_usba(g) !strcmp("atmel_usba_udc", (g)->name)
-#else
-#define gadget_is_atmel_usba(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_S3C2410
-#define gadget_is_s3c2410(g) !strcmp("s3c2410_udc", (g)->name)
-#else
-#define gadget_is_s3c2410(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_AT91
-#define gadget_is_at91(g) !strcmp("at91_udc", (g)->name)
-#else
-#define gadget_is_at91(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_IMX
-#define gadget_is_imx(g) !strcmp("imx_udc", (g)->name)
-#else
-#define gadget_is_imx(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_FSL_USB2
-#define gadget_is_fsl_usb2(g) !strcmp("fsl-usb2-udc", (g)->name)
-#else
-#define gadget_is_fsl_usb2(g) 0
-#endif
-
-/* Mentor high speed "dual role" controller, in peripheral role */
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
-#define gadget_is_musbhdrc(g) !strcmp("musb-hdrc", (g)->name)
-#else
-#define gadget_is_musbhdrc(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_LANGWELL
-#define gadget_is_langwell(g) (!strcmp("langwell_udc", (g)->name))
-#else
-#define gadget_is_langwell(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_M66592
-#define gadget_is_m66592(g) !strcmp("m66592_udc", (g)->name)
-#else
-#define gadget_is_m66592(g) 0
-#endif
-
-/* Freescale CPM/QE UDC SUPPORT */
-#ifdef CONFIG_USB_GADGET_FSL_QE
-#define gadget_is_fsl_qe(g) !strcmp("fsl_qe_udc", (g)->name)
-#else
-#define gadget_is_fsl_qe(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_CI13XXX_PCI
-#define gadget_is_ci13xxx_pci(g) (!strcmp("ci13xxx_pci", (g)->name))
-#else
-#define gadget_is_ci13xxx_pci(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_DWC3
-#define gadget_is_dwc3(g) (!strcmp("dwc3-gadget", (g)->name))
-#else
-#define gadget_is_dwc3(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_MSM_72K
-#define gadget_is_msm72k(g) !strcmp("msm72k_udc", (g)->name)
-#else
-#define gadget_is_msm72k(g) 0
-#endif
-
-// CONFIG_USB_GADGET_SX2
-// CONFIG_USB_GADGET_AU1X00
-// ...
-
-#ifdef CONFIG_USB_GADGET_R8A66597
-#define gadget_is_r8a66597(g) !strcmp("r8a66597_udc", (g)->name)
-#else
-#define gadget_is_r8a66597(g) 0
-#endif
-
-#ifdef CONFIG_USB_S3C_HSOTG
-#define gadget_is_s3c_hsotg(g) (!strcmp("s3c-hsotg", (g)->name))
-#else
-#define gadget_is_s3c_hsotg(g) 0
-#endif
-
-#ifdef CONFIG_USB_S3C_HSUDC
-#define gadget_is_s3c_hsudc(g) (!strcmp("s3c-hsudc", (g)->name))
-#else
-#define gadget_is_s3c_hsudc(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_EG20T
-#define gadget_is_pch(g) (!strcmp("pch_udc", (g)->name))
-#else
-#define gadget_is_pch(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_CI13XXX_MSM
+/*
+ * NOTICE: the entries below are alphabetical and should be kept
+ * that way.
+ *
+ * Always be sure to add new entries to the correct position or
+ * accept the bashing later.
+ *
+ * If you have forgotten the alphabetical order let VIM/EMACS
+ * do that for you.
+ */
+#define gadget_is_amd5536udc(g) (!strcmp("amd5536udc", (g)->name))
+#define gadget_is_at91(g) (!strcmp("at91_udc", (g)->name))
+#define gadget_is_atmel_usba(g) (!strcmp("atmel_usba_udc", (g)->name))
#define gadget_is_ci13xxx_msm(g) (!strcmp("ci13xxx_msm", (g)->name))
-#else
-#define gadget_is_ci13xxx_msm(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_CI13XXX_MSM_HSIC
-#define gadget_is_ci13xxx_msm_hsic(g) \
- (!strncmp("ci13xxx_msm_hsic", (g)->name, 16))
-#else
-#define gadget_is_ci13xxx_msm_hsic(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_RENESAS_USBHS
-#define gadget_is_renesas_usbhs(g) (!strcmp("renesas_usbhs_udc", (g)->name))
-#else
-#define gadget_is_renesas_usbhs(g) 0
-#endif
+#define gadget_is_ci13xxx_msm_hsic(g) (!strcmp("ci13xxx_msm_hsic", (g)->name))
+#define gadget_is_ci13xxx_pci(g) (!strcmp("ci13xxx_pci", (g)->name))
+#define gadget_is_dummy(g) (!strcmp("dummy_udc", (g)->name))
+#define gadget_is_dwc3(g) (!strcmp("dwc3-gadget", (g)->name))
+#define gadget_is_fsl_qe(g) (!strcmp("fsl_qe_udc", (g)->name))
+#define gadget_is_fsl_usb2(g) (!strcmp("fsl-usb2-udc", (g)->name))
+#define gadget_is_goku(g) (!strcmp("goku_udc", (g)->name))
+#define gadget_is_imx(g) (!strcmp("imx_udc", (g)->name))
+#define gadget_is_langwell(g) (!strcmp("langwell_udc", (g)->name))
+#define gadget_is_m66592(g) (!strcmp("m66592_udc", (g)->name))
+#define gadget_is_msm72k(g) (!strcmp("msm72k_udc", (g)->name))
+#define gadget_is_musbhdrc(g) (!strcmp("musb-hdrc", (g)->name))
+#define gadget_is_net2272(g) (!strcmp("net2272", (g)->name))
+#define gadget_is_net2280(g) (!strcmp("net2280", (g)->name))
+#define gadget_is_omap(g) (!strcmp("omap_udc", (g)->name))
+#define gadget_is_pch(g) (!strcmp("pch_udc", (g)->name))
+#define gadget_is_pxa(g) (!strcmp("pxa25x_udc", (g)->name))
+#define gadget_is_pxa27x(g) (!strcmp("pxa27x_udc", (g)->name))
+#define gadget_is_r8a66597(g) (!strcmp("r8a66597_udc", (g)->name))
+#define gadget_is_renesas_usbhs(g) (!strcmp("renesas_usbhs_udc", (g)->name))
+#define gadget_is_s3c2410(g) (!strcmp("s3c2410_udc", (g)->name))
+#define gadget_is_s3c_hsotg(g) (!strcmp("s3c-hsotg", (g)->name))
+#define gadget_is_s3c_hsudc(g) (!strcmp("s3c-hsudc", (g)->name))
/**
* usb_gadget_controller_number - support bcdDevice id convention
diff --git a/drivers/usb/gadget/u_data_hsic.c b/drivers/usb/gadget/u_data_hsic.c
index abf147a..534aa7b 100644
--- a/drivers/usb/gadget/u_data_hsic.c
+++ b/drivers/usb/gadget/u_data_hsic.c
@@ -666,6 +666,7 @@
if (!port->wq) {
pr_err("%s: Unable to create workqueue:%s\n",
__func__, data_bridge_names[port_num]);
+ kfree(port);
return -ENOMEM;
}
port->port_num = port_num;
diff --git a/drivers/usb/gadget/u_sdio.c b/drivers/usb/gadget/u_sdio.c
index 14dc73a..8c4b4c7 100644
--- a/drivers/usb/gadget/u_sdio.c
+++ b/drivers/usb/gadget/u_sdio.c
@@ -231,7 +231,7 @@
int gsdio_write(struct gsdio_port *port, struct usb_request *req)
{
unsigned avail;
- char *packet = req->buf;
+ char *packet;
unsigned size = req->actual;
unsigned n;
int ret = 0;
diff --git a/drivers/usb/gadget/u_smd.c b/drivers/usb/gadget/u_smd.c
index 0e9ad48..e74b2e1 100644
--- a/drivers/usb/gadget/u_smd.c
+++ b/drivers/usb/gadget/u_smd.c
@@ -245,7 +245,7 @@
char *packet = req->buf;
unsigned size = req->actual;
unsigned n;
- unsigned count;
+ int count;
n = port->n_read;
if (n) {
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index 579899c..e451cee 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -498,7 +498,13 @@
outpdw(rgb_base + 0x0050, format);/* MDP_RGB_SRC_FORMAT */
outpdw(rgb_base + 0x0054, pattern);/* MDP_RGB_SRC_UNPACK_PATTERN */
- outpdw(rgb_base + 0x0058, pipe->op_mode);/* MDP_RGB_OP_MODE */
+ if (format & MDP4_FORMAT_SOLID_FILL) {
+ u32 op_mode = pipe->op_mode;
+ op_mode &= ~(MDP4_OP_FLIP_LR + MDP4_OP_SCALEX_EN);
+ op_mode &= ~(MDP4_OP_FLIP_UD + MDP4_OP_SCALEY_EN);
+ outpdw(rgb_base + 0x0058, op_mode);/* MDP_RGB_OP_MODE */
+ } else
+ outpdw(rgb_base + 0x0058, pipe->op_mode);/* MDP_RGB_OP_MODE */
outpdw(rgb_base + 0x005c, pipe->phasex_step);
outpdw(rgb_base + 0x0060, pipe->phasey_step);
@@ -670,7 +676,13 @@
outpdw(vg_base + 0x0050, format); /* MDP_RGB_SRC_FORMAT */
outpdw(vg_base + 0x0054, pattern); /* MDP_RGB_SRC_UNPACK_PATTERN */
- outpdw(vg_base + 0x0058, pipe->op_mode);/* MDP_RGB_OP_MODE */
+ if (format & MDP4_FORMAT_SOLID_FILL) {
+ u32 op_mode = pipe->op_mode;
+ op_mode &= ~(MDP4_OP_FLIP_LR + MDP4_OP_SCALEX_EN);
+ op_mode &= ~(MDP4_OP_FLIP_UD + MDP4_OP_SCALEY_EN);
+ outpdw(vg_base + 0x0058, op_mode);/* MDP_RGB_OP_MODE */
+ } else
+ outpdw(vg_base + 0x0058, pipe->op_mode);/* MDP_RGB_OP_MODE */
outpdw(vg_base + 0x005c, pipe->phasex_step);
outpdw(vg_base + 0x0060, pipe->phasey_step);
@@ -1494,11 +1506,21 @@
if (pipe->is_fg) {
if (pipe->alpha == 0xff &&
bg_pipe->pipe_type == OVERLAY_TYPE_RGB) {
+ u32 op_mode;
pnum = bg_pipe->pipe_num - OVERLAY_PIPE_RGB1;
rgb_base = MDP_BASE + MDP4_RGB_BASE;
rgb_base += MDP4_RGB_OFF * pnum;
rgb_src_format = inpdw(rgb_base + 0x50);
rgb_src_format |= MDP4_FORMAT_SOLID_FILL;
+ /*
+ * If solid fill is enabled, flip and scale
+ * have to be disabled. otherwise, h/w
+ * underruns.
+ */
+ op_mode = inpdw(rgb_base + 0x0058);
+ op_mode &= ~(MDP4_OP_FLIP_LR + MDP4_OP_SCALEX_EN);
+ op_mode &= ~(MDP4_OP_FLIP_UD + MDP4_OP_SCALEY_EN);
+ outpdw(rgb_base + 0x0058, op_mode);
outpdw(rgb_base + 0x50, rgb_src_format);
outpdw(rgb_base + 0x1008, constant_color);
}
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index bacc085..c06a0df 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -1180,7 +1180,8 @@
var->xres = panel_info->xres;
var->yres = panel_info->yres;
var->xres_virtual = panel_info->xres;
- var->yres_virtual = panel_info->yres * mfd->fb_page;
+ var->yres_virtual = panel_info->yres * mfd->fb_page +
+ ((PAGE_SIZE - remainder)/fix->line_length) * mfd->fb_page;
var->bits_per_pixel = bpp * 8; /* FrameBuffer color depth */
if (mfd->dest == DISPLAY_LCD) {
var->reserved[4] = panel_info->lcd.refx100 / 100;
@@ -1585,7 +1586,6 @@
static int msm_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
- int hole_offset;
if (var->rotate != FB_ROTATE_UR)
return -EINVAL;
@@ -1680,20 +1680,7 @@
if (var->xoffset > (var->xres_virtual - var->xres))
return -EINVAL;
- if (!mfd->panel_info.mode2_yres)
- hole_offset = (mfd->fbi->fix.line_length *
- mfd->panel_info.yres) % PAGE_SIZE;
- else
- hole_offset = (mfd->fbi->fix.line_length *
- mfd->panel_info.mode2_yres) % PAGE_SIZE;
-
- if (!hole_offset) {
- hole_offset = PAGE_SIZE - hole_offset;
- hole_offset = hole_offset/mfd->fbi->fix.line_length;
- }
-
- if (var->yoffset > (var->yres_virtual - var->yres + (hole_offset *
- (mfd->fb_page - 1))))
+ if (var->yoffset > (var->yres_virtual - var->yres))
return -EINVAL;
return 0;
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.c
index 1e02a2c..eea902d 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.c
@@ -164,14 +164,20 @@
DDL_MSG_ERROR("ddl_open:Client_trasac_failed");
return status;
}
- ddl->shared_mem[0].mem_type = DDL_CMD_MEM;
+ if (res_trk_check_for_sec_session())
+ ddl->shared_mem[0].mem_type = DDL_CMD_MEM;
+ else
+ ddl->shared_mem[0].mem_type = DDL_FW_MEM;
ptr = ddl_pmem_alloc(&ddl->shared_mem[0],
DDL_FW_AUX_HOST_CMD_SPACE_SIZE, 0);
if (!ptr)
status = VCD_ERR_ALLOC_FAIL;
if (!status && ddl_context->frame_channel_depth
== VCD_DUAL_FRAME_COMMAND_CHANNEL) {
- ddl->shared_mem[1].mem_type = DDL_CMD_MEM;
+ if (res_trk_check_for_sec_session())
+ ddl->shared_mem[1].mem_type = DDL_CMD_MEM;
+ else
+ ddl->shared_mem[1].mem_type = DDL_FW_MEM;
ptr = ddl_pmem_alloc(&ddl->shared_mem[1],
DDL_FW_AUX_HOST_CMD_SPACE_SIZE, 0);
if (!ptr) {
@@ -289,6 +295,11 @@
DDL_MSG_ERROR("ddl_enc_start:Seq_hdr_alloc_failed");
return VCD_ERR_ALLOC_FAIL;
}
+ msm_ion_do_cache_op(ddl_context->video_ion_client,
+ encoder->seq_header.alloc_handle,
+ encoder->seq_header.virtual_base_addr,
+ encoder->seq_header.buffer_size,
+ ION_IOC_CLEAN_INV_CACHES);
if (!ddl_take_command_channel(ddl_context, ddl, client_data))
return VCD_ERR_BUSY;
ddl_vidc_channel_set(ddl);
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
index 75df48d..1da70bc 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
@@ -14,7 +14,7 @@
#include <mach/msm_memtypes.h>
#include "vcd_ddl.h"
#include "vcd_ddl_shared_mem.h"
-
+#include "vcd_res_tracker_api.h"
struct ddl_context *ddl_get_context(void)
{
@@ -639,6 +639,7 @@
u32 status = VCD_S_SUCCESS, dpb = 0;
u32 width = 0, height = 0;
u8 *ptr;
+ struct ddl_context *ddl_context = ddl->ddl_context;
dec_bufs = &ddl->codec_data.decoder.hw_bufs;
ddl_calc_dec_hw_buffers_size(ddl->codec_data.decoder.
@@ -649,6 +650,11 @@
DDL_KILO_BYTE(2));
if (!ptr)
status = VCD_ERR_ALLOC_FAIL;
+ msm_ion_do_cache_op(ddl_context->video_ion_client,
+ dec_bufs->context.alloc_handle,
+ dec_bufs->context.virtual_base_addr,
+ dec_bufs->context.buffer_size,
+ ION_IOC_CLEAN_INV_CACHES);
}
if (buf_size.sz_nb_ip > 0) {
dec_bufs->h264_nb_ip.mem_type = DDL_MM_MEM;
@@ -726,9 +732,15 @@
DDL_KILO_BYTE(2));
if (!ptr)
status = VCD_ERR_ALLOC_FAIL;
- else
+ else {
memset(dec_bufs->desc.align_virtual_addr,
0, buf_size.sz_desc);
+ msm_ion_do_cache_op(ddl_context->video_ion_client,
+ dec_bufs->desc.alloc_handle,
+ dec_bufs->desc.alloc_handle,
+ dec_bufs->desc.buffer_size,
+ ION_IOC_CLEAN_INV_CACHES);
+ }
}
if (status)
ddl_free_dec_hw_buffers(ddl);
@@ -830,6 +842,7 @@
struct ddl_enc_buffer_size buf_size;
void *ptr;
u32 status = VCD_S_SUCCESS;
+ struct ddl_context *ddl_context = ddl->ddl_context;
enc_bufs = &ddl->codec_data.encoder.hw_bufs;
enc_bufs->dpb_count = DDL_ENC_MIN_DPB_BUFFERS;
@@ -908,6 +921,11 @@
buf_size.sz_context, DDL_KILO_BYTE(2));
if (!ptr)
status = VCD_ERR_ALLOC_FAIL;
+ msm_ion_do_cache_op(ddl_context->video_ion_client,
+ enc_bufs->context.alloc_handle,
+ enc_bufs->context.virtual_base_addr,
+ enc_bufs->context.buffer_size,
+ ION_IOC_CLEAN_INV_CACHES);
}
if (status)
ddl_free_enc_hw_buffers(ddl);
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.c
index 7ccf4c2..6aa7451 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.c
@@ -23,7 +23,6 @@
};
static struct time_data proc_time[MAX_TIME_DATA];
#define DDL_MSG_TIME(x...) printk(KERN_DEBUG x)
-
static unsigned int vidc_mmu_subsystem[] = {
MSM_SUBSYSTEM_VIDEO, MSM_SUBSYSTEM_VIDEO_FWARE};
@@ -37,13 +36,16 @@
#endif
void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment)
{
- u32 alloc_size, offset = 0, flags = 0;
+ u32 alloc_size, offset = 0 ;
u32 index = 0;
struct ddl_context *ddl_context;
struct msm_mapped_buffer *mapped_buffer = NULL;
- int rc = -EINVAL;
- ion_phys_addr_t phyaddr = 0;
- size_t len = 0;
+ unsigned long iova = 0;
+ unsigned long buffer_size = 0;
+ unsigned long *kernel_vaddr = NULL;
+ unsigned long ionflag = 0;
+ unsigned long flags = 0;
+ int ret = 0;
DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
if (!addr) {
DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__);
@@ -61,6 +63,7 @@
__func__);
goto bail_out;
}
+ alloc_size = (alloc_size+4095) & ~4095;
addr->alloc_handle = ion_alloc(
ddl_context->video_ion_client, alloc_size, SZ_4K,
res_trk_get_mem_type());
@@ -69,15 +72,48 @@
__func__);
goto bail_out;
}
- rc = ion_phys(ddl_context->video_ion_client,
- addr->alloc_handle, &phyaddr,
- &len);
- if (rc || !phyaddr) {
+ if (res_trk_check_for_sec_session() ||
+ addr->mem_type == DDL_FW_MEM)
+ ionflag = UNCACHED;
+ else
+ ionflag = CACHED;
+ kernel_vaddr = (unsigned long *) ion_map_kernel(
+ ddl_context->video_ion_client,
+ addr->alloc_handle, ionflag);
+ if (IS_ERR_OR_NULL(kernel_vaddr)) {
+ DDL_MSG_ERROR("%s() :DDL ION map failed\n",
+ __func__);
+ goto free_ion_alloc;
+ }
+ addr->virtual_base_addr = (u8 *) kernel_vaddr;
+ ret = ion_map_iommu(ddl_context->video_ion_client,
+ addr->alloc_handle,
+ VIDEO_DOMAIN,
+ VIDEO_MAIN_POOL,
+ SZ_4K,
+ 0,
+ &iova,
+ &buffer_size,
+ UNCACHED, 0);
+ if (ret) {
+ DDL_MSG_ERROR("%s():DDL ION ion map iommu failed\n",
+ __func__);
+ goto unmap_ion_alloc;
+ }
+ addr->alloced_phys_addr = (phys_addr_t) iova;
+ if (!addr->alloced_phys_addr) {
DDL_MSG_ERROR("%s():DDL ION client physical failed\n",
__func__);
- goto free_acm_ion_alloc;
+ goto unmap_ion_alloc;
}
- addr->alloced_phys_addr = phyaddr;
+ addr->mapped_buffer = NULL;
+ addr->physical_base_addr = (u8 *) iova;
+ addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
+ addr->physical_base_addr, alignment);
+ offset = (u32)(addr->align_physical_addr -
+ addr->physical_base_addr);
+ addr->align_virtual_addr = addr->virtual_base_addr + offset;
+ addr->buffer_size = alloc_size;
} else {
addr->alloced_phys_addr = (phys_addr_t)
allocate_contiguous_memory_nomap(alloc_size,
@@ -87,51 +123,52 @@
__func__, alloc_size);
goto bail_out;
}
- }
- flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR;
- if (alignment == DDL_KILO_BYTE(128))
- index = 1;
- else if (alignment > SZ_4K)
- flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K;
+ flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR;
+ if (alignment == DDL_KILO_BYTE(128))
+ index = 1;
+ else if (alignment > SZ_4K)
+ flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K;
- addr->mapped_buffer =
- msm_subsystem_map_buffer((unsigned long)addr->alloced_phys_addr,
- alloc_size, flags, &vidc_mmu_subsystem[index],
- sizeof(vidc_mmu_subsystem[index])/sizeof(unsigned int));
- if (IS_ERR(addr->mapped_buffer)) {
- pr_err(" %s() buffer map failed", __func__);
- goto free_acm_ion_alloc;
+ addr->mapped_buffer =
+ msm_subsystem_map_buffer((unsigned long)addr->alloced_phys_addr,
+ alloc_size, flags, &vidc_mmu_subsystem[index],
+ sizeof(vidc_mmu_subsystem[index])/sizeof(unsigned int));
+ if (IS_ERR(addr->mapped_buffer)) {
+ pr_err(" %s() buffer map failed", __func__);
+ goto free_acm_alloc;
+ }
+ mapped_buffer = addr->mapped_buffer;
+ if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) {
+ pr_err("%s() map buffers failed\n", __func__);
+ goto free_map_buffers;
+ }
+ addr->physical_base_addr = (u8 *)mapped_buffer->iova[0];
+ addr->virtual_base_addr = mapped_buffer->vaddr;
+ addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
+ addr->physical_base_addr, alignment);
+ offset = (u32)(addr->align_physical_addr -
+ addr->physical_base_addr);
+ addr->align_virtual_addr = addr->virtual_base_addr + offset;
+ addr->buffer_size = sz;
}
- mapped_buffer = addr->mapped_buffer;
- if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) {
- pr_err("%s() map buffers failed\n", __func__);
- goto free_map_buffers;
- }
- addr->physical_base_addr = (u8 *)mapped_buffer->iova[0];
- addr->virtual_base_addr = mapped_buffer->vaddr;
- addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
- addr->physical_base_addr, alignment);
- offset = (u32)(addr->align_physical_addr -
- addr->physical_base_addr);
- addr->align_virtual_addr = addr->virtual_base_addr + offset;
- addr->buffer_size = sz;
return addr->virtual_base_addr;
-
free_map_buffers:
msm_subsystem_unmap_buffer(addr->mapped_buffer);
addr->mapped_buffer = NULL;
-free_acm_ion_alloc:
- if (ddl_context->video_ion_client) {
- if (addr->alloc_handle) {
- ion_free(ddl_context->video_ion_client,
- addr->alloc_handle);
- addr->alloc_handle = NULL;
- }
- } else {
+free_acm_alloc:
free_contiguous_memory_by_paddr(
(unsigned long)addr->alloced_phys_addr);
addr->alloced_phys_addr = (phys_addr_t)NULL;
- }
+ return NULL;
+unmap_ion_alloc:
+ ion_unmap_kernel(ddl_context->video_ion_client,
+ addr->alloc_handle);
+ addr->virtual_base_addr = NULL;
+ addr->alloced_phys_addr = (phys_addr_t)NULL;
+free_ion_alloc:
+ ion_free(ddl_context->video_ion_client,
+ addr->alloc_handle);
+ addr->alloc_handle = NULL;
bail_out:
return NULL;
}
@@ -146,16 +183,22 @@
}
if (ddl_context->video_ion_client) {
if (!IS_ERR_OR_NULL(addr->alloc_handle)) {
+ ion_unmap_kernel(ddl_context->video_ion_client,
+ addr->alloc_handle);
+ ion_unmap_iommu(ddl_context->video_ion_client,
+ addr->alloc_handle,
+ VIDEO_DOMAIN,
+ VIDEO_MAIN_POOL);
ion_free(ddl_context->video_ion_client,
addr->alloc_handle);
- }
+ }
} else {
+ if (addr->mapped_buffer)
+ msm_subsystem_unmap_buffer(addr->mapped_buffer);
if (addr->alloced_phys_addr)
free_contiguous_memory_by_paddr(
(unsigned long)addr->alloced_phys_addr);
}
- if (addr->mapped_buffer)
- msm_subsystem_unmap_buffer(addr->mapped_buffer);
memset(addr, 0, sizeof(struct ddl_buf_addr));
}
diff --git a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
index 2f26e01..7e9ac40 100644
--- a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
+++ b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
@@ -58,40 +58,88 @@
u32 index = 0;
struct ddl_context *ddl_context;
struct msm_mapped_buffer *mapped_buffer = NULL;
- ddl_context = ddl_get_context();
- if (!addr->alloced_phys_addr) {
- pr_err(" %s() alloced addres NULL", __func__);
- goto bail_out;
- }
- flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR;
- if (alignment == DDL_KILO_BYTE(128))
- index = 1;
- else if (alignment > SZ_4K)
- flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K;
+ int ret = 0;
+ unsigned long iova = 0;
+ unsigned long buffer_size = 0;
+ unsigned long *kernel_vaddr = NULL;
- addr->mapped_buffer =
- msm_subsystem_map_buffer((unsigned long)addr->alloced_phys_addr,
- sz, flags, &restrk_mmu_subsystem[index],
- sizeof(restrk_mmu_subsystem[index])/sizeof(unsigned int));
- if (IS_ERR(addr->mapped_buffer)) {
- pr_err(" %s() buffer map failed", __func__);
- goto bail_out;
- }
- mapped_buffer = addr->mapped_buffer;
- if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) {
- pr_err("%s() map buffers failed\n", __func__);
- goto bail_out;
- }
- addr->physical_base_addr = (u8 *)mapped_buffer->iova[0];
- addr->virtual_base_addr = mapped_buffer->vaddr;
- addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
+ ddl_context = ddl_get_context();
+ if (res_trk_get_enable_ion()) {
+ kernel_vaddr = (unsigned long *) ion_map_kernel(
+ ddl_context->video_ion_client,
+ addr->alloc_handle, UNCACHED);
+ if (IS_ERR_OR_NULL(kernel_vaddr)) {
+ DDL_MSG_ERROR("%s():DDL ION client map failed\n",
+ __func__);
+ goto ion_bail_out;
+ }
+ addr->virtual_base_addr = (u8 *) kernel_vaddr;
+ ret = ion_map_iommu(ddl_context->video_ion_client,
+ addr->alloc_handle,
+ VIDEO_DOMAIN,
+ VIDEO_FIRMWARE_POOL,
+ SZ_4K,
+ 0,
+ &iova,
+ &buffer_size,
+ UNCACHED, 0);
+ if (ret) {
+ DDL_MSG_ERROR("%s():DDL ION client iommu map failed\n",
+ __func__);
+ goto ion_unmap_bail_out;
+ }
+ addr->mapped_buffer = NULL;
+ addr->physical_base_addr = (u8 *)iova;
+ addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
addr->physical_base_addr, alignment);
- offset = (u32)(addr->align_physical_addr -
- addr->physical_base_addr);
- addr->align_virtual_addr = addr->virtual_base_addr + offset;
- addr->buffer_size = sz;
+ offset = (u32)(addr->align_physical_addr -
+ addr->physical_base_addr);
+ addr->align_virtual_addr = addr->virtual_base_addr + offset;
+ addr->buffer_size = buffer_size;
+ } else {
+ if (!addr->alloced_phys_addr) {
+ pr_err(" %s() alloced addres NULL", __func__);
+ goto bail_out;
+ }
+ flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR;
+ if (alignment == DDL_KILO_BYTE(128))
+ index = 1;
+ else if (alignment > SZ_4K)
+ flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K;
+
+ addr->mapped_buffer =
+ msm_subsystem_map_buffer((unsigned long)addr->alloced_phys_addr,
+ sz, flags, &restrk_mmu_subsystem[index],
+ sizeof(restrk_mmu_subsystem[index])/sizeof(unsigned int));
+ if (IS_ERR(addr->mapped_buffer)) {
+ pr_err(" %s() buffer map failed", __func__);
+ goto bail_out;
+ }
+ mapped_buffer = addr->mapped_buffer;
+ if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) {
+ pr_err("%s() map buffers failed\n", __func__);
+ goto bail_out;
+ }
+ addr->physical_base_addr = (u8 *)mapped_buffer->iova[0];
+ addr->virtual_base_addr = mapped_buffer->vaddr;
+ addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
+ addr->physical_base_addr, alignment);
+ offset = (u32)(addr->align_physical_addr -
+ addr->physical_base_addr);
+ addr->align_virtual_addr = addr->virtual_base_addr + offset;
+ addr->buffer_size = sz;
+ }
return addr->virtual_base_addr;
bail_out:
+ if (addr->mapped_buffer)
+ msm_subsystem_unmap_buffer(addr->mapped_buffer);
+ return NULL;
+ion_unmap_bail_out:
+ if (!IS_ERR_OR_NULL(addr->alloc_handle)) {
+ ion_unmap_kernel(resource_context.
+ res_ion_client, addr->alloc_handle);
+ }
+ion_bail_out:
return NULL;
}
@@ -100,9 +148,6 @@
{
u32 alloc_size;
struct ddl_context *ddl_context;
- int rc = -EINVAL;
- ion_phys_addr_t phyaddr = 0;
- size_t len = 0;
DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
if (!addr) {
DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__);
@@ -120,23 +165,16 @@
__func__);
goto bail_out;
}
+ alloc_size = (alloc_size+4095) & ~4095;
addr->alloc_handle = ion_alloc(
ddl_context->video_ion_client, alloc_size, SZ_4K,
res_trk_get_mem_type());
if (IS_ERR_OR_NULL(addr->alloc_handle)) {
DDL_MSG_ERROR("%s() :DDL ION alloc failed\n",
__func__);
- goto bail_out;
- }
- rc = ion_phys(ddl_context->video_ion_client,
- addr->alloc_handle, &phyaddr,
- &len);
- if (rc || !phyaddr) {
- DDL_MSG_ERROR("%s():DDL ION client physical failed\n",
- __func__);
goto free_acm_ion_alloc;
}
- addr->alloced_phys_addr = phyaddr;
+ return (void *) addr->alloc_handle;
} else {
addr->alloced_phys_addr = (phys_addr_t)
allocate_contiguous_memory_nomap(alloc_size,
@@ -146,10 +184,10 @@
__func__, alloc_size);
goto bail_out;
}
+ addr->buffer_size = sz;
+ return (void *)addr->alloced_phys_addr;
}
- addr->buffer_size = sz;
- return (void *)addr->alloced_phys_addr;
free_acm_ion_alloc:
if (ddl_context->video_ion_client) {
@@ -169,7 +207,18 @@
pr_err("%s() invalid args\n", __func__);
return;
}
- if (addr->mapped_buffer)
+ if (!IS_ERR_OR_NULL(addr->alloc_handle)) {
+ if (addr->physical_base_addr) {
+ ion_unmap_kernel(resource_context.res_ion_client,
+ addr->alloc_handle);
+ ion_unmap_iommu(resource_context.res_ion_client,
+ addr->alloc_handle,
+ VIDEO_DOMAIN,
+ VIDEO_FIRMWARE_POOL);
+ addr->virtual_base_addr = NULL;
+ addr->physical_base_addr = NULL;
+ }
+ } else if (addr->mapped_buffer)
msm_subsystem_unmap_buffer(addr->mapped_buffer);
addr->mapped_buffer = NULL;
}
@@ -628,8 +677,8 @@
int mem_type = -1;
switch (resource_context.res_mem_type) {
case DDL_FW_MEM:
- mem_type = resource_context.fw_mem_type;
- break;
+ mem_type = ION_HEAP(resource_context.fw_mem_type);
+ return mem_type;
case DDL_MM_MEM:
mem_type = resource_context.memtype;
break;
@@ -646,7 +695,8 @@
if (res_trk_check_for_sec_session())
mem_type = (ION_HEAP(mem_type) | ION_SECURE);
else
- mem_type = ION_HEAP(mem_type);
+ mem_type = (ION_HEAP(mem_type) |
+ ION_HEAP(ION_IOMMU_HEAP_ID));
}
return mem_type;
}
diff --git a/drivers/video/msm/vidc/common/dec/vdec.c b/drivers/video/msm/vidc/common/dec/vdec.c
index cffb0a9..356a4ae 100644
--- a/drivers/video/msm/vidc/common/dec/vdec.c
+++ b/drivers/video/msm/vidc/common/dec/vdec.c
@@ -812,7 +812,9 @@
u32 len = 0, flags = 0;
struct file *file;
int rc = 0;
- unsigned long ionflag;
+ unsigned long ionflag = 0;
+ unsigned long buffer_size = 0;
+ unsigned long iova = 0;
if (!client_ctx || !mv_data)
return false;
@@ -839,13 +841,25 @@
return false;
}
put_pmem_file(file);
+ flags = MSM_SUBSYSTEM_MAP_IOVA;
+ mapped_buffer = msm_subsystem_map_buffer(
+ (unsigned long)vcd_h264_mv_buffer->physical_addr, len,
+ flags, vidc_mmu_subsystem,
+ sizeof(vidc_mmu_subsystem)/
+ sizeof(unsigned int));
+ if (IS_ERR(mapped_buffer)) {
+ pr_err("buffer map failed");
+ return false;
+ }
+ vcd_h264_mv_buffer->client_data = (void *) mapped_buffer;
+ vcd_h264_mv_buffer->dev_addr = (u8 *)mapped_buffer->iova[0];
} else {
client_ctx->h264_mv_ion_handle = ion_import_fd(
client_ctx->user_ion_client,
vcd_h264_mv_buffer->pmem_fd);
if (!client_ctx->h264_mv_ion_handle) {
ERR("%s(): get_ION_handle failed\n", __func__);
- goto ion_error;
+ goto import_ion_error;
}
rc = ion_handle_get_flags(client_ctx->user_ion_client,
client_ctx->h264_mv_ion_handle,
@@ -853,7 +867,7 @@
if (rc) {
ERR("%s():get_ION_flags fail\n",
__func__);
- goto ion_error;
+ goto import_ion_error;
}
vcd_h264_mv_buffer->kernel_virtual_addr = (u8 *) ion_map_kernel(
client_ctx->user_ion_client,
@@ -862,29 +876,22 @@
if (!vcd_h264_mv_buffer->kernel_virtual_addr) {
ERR("%s(): get_ION_kernel virtual addr failed\n",
__func__);
- goto ion_error;
+ goto import_ion_error;
}
- rc = ion_phys(client_ctx->user_ion_client,
+ rc = ion_map_iommu(client_ctx->user_ion_client,
client_ctx->h264_mv_ion_handle,
- (unsigned long *) (&(vcd_h264_mv_buffer->
- physical_addr)), &len);
+ VIDEO_DOMAIN, VIDEO_MAIN_POOL,
+ SZ_4K, 0, (unsigned long *)&iova,
+ (unsigned long *)&buffer_size, UNCACHED, 0);
if (rc) {
ERR("%s():get_ION_kernel physical addr fail\n",
__func__);
- goto ion_error;
+ goto ion_map_error;
}
+ vcd_h264_mv_buffer->physical_addr = (u8 *) iova;
+ vcd_h264_mv_buffer->client_data = NULL;
+ vcd_h264_mv_buffer->dev_addr = (u8 *) iova;
}
- flags = MSM_SUBSYSTEM_MAP_IOVA;
- mapped_buffer = msm_subsystem_map_buffer(
- (unsigned long)vcd_h264_mv_buffer->physical_addr, len,
- flags, vidc_mmu_subsystem,
- sizeof(vidc_mmu_subsystem)/sizeof(unsigned int));
- if (IS_ERR(mapped_buffer)) {
- pr_err("buffer map failed");
- return false;
- }
- vcd_h264_mv_buffer->client_data = (void *) mapped_buffer;
- vcd_h264_mv_buffer->dev_addr = (u8 *)mapped_buffer->iova[0];
DBG("Virt: %p, Phys %p, fd: %d", vcd_h264_mv_buffer->
kernel_virtual_addr, vcd_h264_mv_buffer->physical_addr,
vcd_h264_mv_buffer->pmem_fd);
@@ -896,13 +903,14 @@
return false;
else
return true;
-ion_error:
+ion_map_error:
if (vcd_h264_mv_buffer->kernel_virtual_addr)
ion_unmap_kernel(client_ctx->user_ion_client,
client_ctx->h264_mv_ion_handle);
if (client_ctx->h264_mv_ion_handle)
ion_free(client_ctx->user_ion_client,
client_ctx->h264_mv_ion_handle);
+import_ion_error:
return false;
}
@@ -973,6 +981,10 @@
if (client_ctx->h264_mv_ion_handle != NULL) {
ion_unmap_kernel(client_ctx->user_ion_client,
client_ctx->h264_mv_ion_handle);
+ ion_unmap_iommu(client_ctx->user_ion_client,
+ client_ctx->h264_mv_ion_handle,
+ VIDEO_DOMAIN,
+ VIDEO_MAIN_POOL);
ion_free(client_ctx->user_ion_client,
client_ctx->h264_mv_ion_handle);
}
diff --git a/drivers/video/msm/vidc/common/enc/venc_internal.c b/drivers/video/msm/vidc/common/enc/venc_internal.c
index cac5dc4..37f1001 100644
--- a/drivers/video/msm/vidc/common/enc/venc_internal.c
+++ b/drivers/video/msm/vidc/common/enc/venc_internal.c
@@ -1766,10 +1766,11 @@
struct vcd_property_hdr vcd_property_hdr;
struct vcd_property_enc_recon_buffer *control = NULL;
struct msm_mapped_buffer *mapped_buffer = NULL;
- size_t ion_len = -1;
- unsigned long phy_addr;
int rc = -1;
- unsigned long ionflag;
+ unsigned long ionflag = 0;
+ unsigned long iova = 0;
+ unsigned long buffer_size = 0;
+
if (!client_ctx || !venc_recon) {
pr_err("%s() Invalid params", __func__);
return false;
@@ -1802,12 +1803,23 @@
return false;
}
put_pmem_file(file);
+ flags = MSM_SUBSYSTEM_MAP_IOVA;
+ mapped_buffer = msm_subsystem_map_buffer(
+ (unsigned long)control->physical_addr, len,
+ flags, vidc_mmu_subsystem,
+ sizeof(vidc_mmu_subsystem)/sizeof(unsigned int));
+ if (IS_ERR(mapped_buffer)) {
+ pr_err("buffer map failed");
+ return false;
+ }
+ control->client_data = (void *) mapped_buffer;
+ control->dev_addr = (u8 *)mapped_buffer->iova[0];
} else {
client_ctx->recon_buffer_ion_handle[i] = ion_import_fd(
client_ctx->user_ion_client, control->pmem_fd);
if (IS_ERR_OR_NULL(client_ctx->recon_buffer_ion_handle[i])) {
ERR("%s(): get_ION_handle failed\n", __func__);
- goto ion_error;
+ goto import_ion_error;
}
rc = ion_handle_get_flags(client_ctx->user_ion_client,
client_ctx->recon_buffer_ion_handle[i],
@@ -1815,7 +1827,7 @@
if (rc) {
ERR("%s():get_ION_flags fail\n",
__func__);
- goto ion_error;
+ goto import_ion_error;
}
control->kernel_virtual_addr = (u8 *) ion_map_kernel(
client_ctx->user_ion_client,
@@ -1824,30 +1836,27 @@
if (!control->kernel_virtual_addr) {
ERR("%s(): get_ION_kernel virtual addr fail\n",
__func__);
- goto ion_error;
+ goto import_ion_error;
}
- rc = ion_phys(client_ctx->user_ion_client,
+ rc = ion_map_iommu(client_ctx->user_ion_client,
client_ctx->recon_buffer_ion_handle[i],
- &phy_addr, &ion_len);
+ VIDEO_DOMAIN,
+ VIDEO_MAIN_POOL,
+ SZ_4K,
+ 0,
+ (unsigned long *)&iova,
+ (unsigned long *)&buffer_size,
+ UNCACHED, 0);
if (rc) {
- ERR("%s():get_ION_kernel physical addr fail\n",
+ ERR("%s():ION map iommu addr fail\n",
__func__);
- goto ion_error;
+ goto map_ion_error;
}
- control->physical_addr = (u8 *) phy_addr;
- len = (unsigned long) ion_len;
+ control->physical_addr = (u8 *) iova;
+ len = buffer_size;
+ control->client_data = NULL;
+ control->dev_addr = (u8 *)iova;
}
- flags = MSM_SUBSYSTEM_MAP_IOVA;
- mapped_buffer = msm_subsystem_map_buffer(
- (unsigned long)control->physical_addr, len,
- flags, vidc_mmu_subsystem,
- sizeof(vidc_mmu_subsystem)/sizeof(unsigned int));
- if (IS_ERR(mapped_buffer)) {
- pr_err("buffer map failed");
- return false;
- }
- control->client_data = (void *) mapped_buffer;
- control->dev_addr = (u8 *)mapped_buffer->iova[0];
vcd_property_hdr.prop_id = VCD_I_RECON_BUFFERS;
vcd_property_hdr.sz =
@@ -1863,7 +1872,7 @@
__func__, vcd_status);
return false;
}
-ion_error:
+map_ion_error:
if (control->kernel_virtual_addr)
ion_unmap_kernel(client_ctx->user_ion_client,
client_ctx->recon_buffer_ion_handle[i]);
@@ -1871,6 +1880,7 @@
ion_free(client_ctx->user_ion_client,
client_ctx->recon_buffer_ion_handle[i]);
client_ctx->recon_buffer_ion_handle[i] = NULL;
+import_ion_error:
return false;
}
@@ -1914,6 +1924,10 @@
if (client_ctx->recon_buffer_ion_handle[i]) {
ion_unmap_kernel(client_ctx->user_ion_client,
client_ctx->recon_buffer_ion_handle[i]);
+ ion_unmap_iommu(client_ctx->user_ion_client,
+ client_ctx->recon_buffer_ion_handle[i],
+ VIDEO_DOMAIN,
+ VIDEO_MAIN_POOL);
ion_free(client_ctx->user_ion_client,
client_ctx->recon_buffer_ion_handle[i]);
client_ctx->recon_buffer_ion_handle[i] = NULL;
diff --git a/drivers/video/msm/vidc/common/init/vidc_init.c b/drivers/video/msm/vidc/common/init/vidc_init.c
index 0ea64d4..cd128dd 100644
--- a/drivers/video/msm/vidc/common/init/vidc_init.c
+++ b/drivers/video/msm/vidc/common/init/vidc_init.c
@@ -415,6 +415,10 @@
if (buf_addr_table[i].buff_ion_handle) {
ion_unmap_kernel(client_ctx->user_ion_client,
buf_addr_table[i].buff_ion_handle);
+ ion_unmap_iommu(client_ctx->user_ion_client,
+ buf_addr_table[i].buff_ion_handle,
+ VIDEO_DOMAIN,
+ VIDEO_MAIN_POOL);
ion_free(client_ctx->user_ion_client,
buf_addr_table[i].buff_ion_handle);
buf_addr_table[i].buff_ion_handle = NULL;
@@ -428,6 +432,10 @@
if (client_ctx->h264_mv_ion_handle) {
ion_unmap_kernel(client_ctx->user_ion_client,
client_ctx->h264_mv_ion_handle);
+ ion_unmap_iommu(client_ctx->user_ion_client,
+ client_ctx->h264_mv_ion_handle,
+ VIDEO_DOMAIN,
+ VIDEO_MAIN_POOL);
ion_free(client_ctx->user_ion_client,
client_ctx->h264_mv_ion_handle);
client_ctx->h264_mv_ion_handle = NULL;
@@ -530,9 +538,11 @@
u32 i, flags;
struct buf_addr_table *buf_addr_table;
struct msm_mapped_buffer *mapped_buffer = NULL;
- size_t ion_len;
struct ion_handle *buff_ion_handle = NULL;
unsigned long ionflag = 0;
+ unsigned long iova = 0;
+ int ret = 0;
+ unsigned long buffer_size = 0;
if (!client_ctx || !length)
return false;
@@ -548,6 +558,7 @@
num_of_buffers = &client_ctx->num_of_output_buffers;
DBG("%s(): buffer = OUTPUT #Buf = %d\n",
__func__, *num_of_buffers);
+ length = length * 2; /* workaround for iommu video h/w bug */
}
if (*num_of_buffers == max_num_buffers) {
@@ -573,6 +584,20 @@
goto bail_out_add;
}
put_pmem_file(file);
+ flags = (buffer == BUFFER_TYPE_INPUT)
+ ? MSM_SUBSYSTEM_MAP_IOVA :
+ MSM_SUBSYSTEM_MAP_IOVA|MSM_SUBSYSTEM_ALIGN_IOVA_8K;
+ mapped_buffer = msm_subsystem_map_buffer(phys_addr,
+ length, flags, vidc_mmu_subsystem,
+ sizeof(vidc_mmu_subsystem)/sizeof(unsigned int));
+ if (IS_ERR(mapped_buffer)) {
+ pr_err("buffer map failed");
+ goto bail_out_add;
+ }
+ buf_addr_table[*num_of_buffers].client_data = (void *)
+ mapped_buffer;
+ buf_addr_table[*num_of_buffers].dev_addr =
+ mapped_buffer->iova[0];
} else {
buff_ion_handle = ion_import_fd(
client_ctx->user_ion_client, pmem_fd);
@@ -586,7 +611,7 @@
&ionflag)) {
ERR("%s():ION flags fail\n",
__func__);
- goto ion_error;
+ goto bail_out_add;
}
*kernel_vaddr = (unsigned long)
ion_map_kernel(
@@ -597,32 +622,28 @@
ERR("%s():ION virtual addr fail\n",
__func__);
*kernel_vaddr = (unsigned long)NULL;
- goto ion_error;
+ goto ion_free_error;
}
- if (ion_phys(client_ctx->user_ion_client,
+ ret = ion_map_iommu(client_ctx->user_ion_client,
buff_ion_handle,
- &phys_addr, &ion_len)) {
- ERR("%s():ION physical addr fail\n",
+ VIDEO_DOMAIN,
+ VIDEO_MAIN_POOL,
+ SZ_8K,
+ length,
+ (unsigned long *) &iova,
+ (unsigned long *) &buffer_size,
+ UNCACHED, ION_IOMMU_UNMAP_DELAYED);
+ if (ret) {
+ ERR("%s():ION iommu map fail\n",
__func__);
- goto ion_error;
+ goto ion_map_error;
}
- len = (unsigned long) ion_len;
+ phys_addr = iova;
+ buf_addr_table[*num_of_buffers].client_data = NULL;
+ buf_addr_table[*num_of_buffers].dev_addr = iova;
}
phys_addr += buffer_addr_offset;
(*kernel_vaddr) += buffer_addr_offset;
- flags = (buffer == BUFFER_TYPE_INPUT) ? MSM_SUBSYSTEM_MAP_IOVA :
- MSM_SUBSYSTEM_MAP_IOVA|MSM_SUBSYSTEM_ALIGN_IOVA_8K;
- mapped_buffer = msm_subsystem_map_buffer(phys_addr, length,
- flags, vidc_mmu_subsystem,
- sizeof(vidc_mmu_subsystem)/sizeof(unsigned int));
- if (IS_ERR(mapped_buffer)) {
- pr_err("buffer map failed");
- goto ion_error;
- }
- buf_addr_table[*num_of_buffers].client_data = (void *)
- mapped_buffer;
- buf_addr_table[*num_of_buffers].dev_addr =
- mapped_buffer->iova[0];
buf_addr_table[*num_of_buffers].user_vaddr = user_vaddr;
buf_addr_table[*num_of_buffers].kernel_vaddr = *kernel_vaddr;
buf_addr_table[*num_of_buffers].pmem_fd = pmem_fd;
@@ -640,9 +661,10 @@
}
mutex_unlock(&client_ctx->enrty_queue_lock);
return true;
-ion_error:
+ion_map_error:
if (*kernel_vaddr && buff_ion_handle)
ion_unmap_kernel(client_ctx->user_ion_client, buff_ion_handle);
+ion_free_error:
if (!IS_ERR_OR_NULL(buff_ion_handle))
ion_free(client_ctx->user_ion_client, buff_ion_handle);
bail_out_add:
@@ -685,12 +707,19 @@
__func__, client_ctx, user_vaddr);
goto bail_out_del;
}
- msm_subsystem_unmap_buffer(
- (struct msm_mapped_buffer *)buf_addr_table[i].client_data);
+ if (buf_addr_table[i].client_data) {
+ msm_subsystem_unmap_buffer(
+ (struct msm_mapped_buffer *)buf_addr_table[i].client_data);
+ buf_addr_table[i].client_data = NULL;
+ }
*kernel_vaddr = buf_addr_table[i].kernel_vaddr;
if (buf_addr_table[i].buff_ion_handle) {
ion_unmap_kernel(client_ctx->user_ion_client,
buf_addr_table[i].buff_ion_handle);
+ ion_unmap_iommu(client_ctx->user_ion_client,
+ buf_addr_table[i].buff_ion_handle,
+ VIDEO_DOMAIN,
+ VIDEO_MAIN_POOL);
ion_free(client_ctx->user_ion_client,
buf_addr_table[i].buff_ion_handle);
buf_addr_table[i].buff_ion_handle = NULL;
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_sub.c b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
index ebc30fd..b5f643f 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_sub.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
@@ -36,9 +36,10 @@
u32 memtype, i = 0, flags = 0;
struct vcd_msm_map_buffer *map_buffer = NULL;
struct msm_mapped_buffer *mapped_buffer = NULL;
- int rc = 0;
- ion_phys_addr_t phyaddr = 0;
- size_t len = 0;
+ unsigned long iova = 0;
+ unsigned long buffer_size = 0;
+ int ret = 0;
+ unsigned long ionflag = 0;
if (!kernel_vaddr || !phy_addr || !cctxt) {
pr_err("\n%s: Invalid parameters", __func__);
@@ -66,6 +67,22 @@
pr_err("%s() acm alloc failed", __func__);
goto free_map_table;
}
+ flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR;
+ map_buffer->mapped_buffer =
+ msm_subsystem_map_buffer((unsigned long)map_buffer->phy_addr,
+ sz, flags, vidc_mmu_subsystem,
+ sizeof(vidc_mmu_subsystem)/sizeof(unsigned int));
+ if (IS_ERR(map_buffer->mapped_buffer)) {
+ pr_err(" %s() buffer map failed", __func__);
+ goto free_acm_alloc;
+ }
+ mapped_buffer = map_buffer->mapped_buffer;
+ if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) {
+ pr_err("%s() map buffers failed", __func__);
+ goto free_map_buffers;
+ }
+ *phy_addr = (u8 *) mapped_buffer->iova[0];
+ *kernel_vaddr = (u8 *) mapped_buffer->vaddr;
} else {
map_buffer->alloc_handle = ion_alloc(
cctxt->vcd_ion_client, sz, SZ_4K,
@@ -74,48 +91,58 @@
pr_err("%s() ION alloc failed", __func__);
goto bailout;
}
- rc = ion_phys(cctxt->vcd_ion_client,
- map_buffer->alloc_handle, &phyaddr, &len);
- if (rc) {
- pr_err("%s() : ION client physical fail\n",
- __func__);
- goto free_acm_alloc;
+ if (ion_handle_get_flags(cctxt->vcd_ion_client,
+ map_buffer->alloc_handle,
+ &ionflag)) {
+ pr_err("%s() ION get flag failed", __func__);
+ goto bailout;
}
- map_buffer->phy_addr = phyaddr;
+ *kernel_vaddr = (u8 *) ion_map_kernel(
+ cctxt->vcd_ion_client,
+ map_buffer->alloc_handle,
+ ionflag);
+ if (!(*kernel_vaddr)) {
+ pr_err("%s() ION map failed", __func__);
+ goto ion_free_bailout;
+ }
+ ret = ion_map_iommu(cctxt->vcd_ion_client,
+ map_buffer->alloc_handle,
+ VIDEO_DOMAIN,
+ VIDEO_MAIN_POOL,
+ SZ_4K,
+ 0,
+ (unsigned long *)&iova,
+ (unsigned long *)&buffer_size,
+ UNCACHED, 0);
+ if (ret) {
+ pr_err("%s() ION iommu map failed", __func__);
+ goto ion_map_bailout;
+ }
+ map_buffer->phy_addr = iova;
if (!map_buffer->phy_addr) {
pr_err("%s() acm alloc failed", __func__);
goto free_map_table;
}
-
+ *phy_addr = (u8 *)iova;
+ mapped_buffer = NULL;
+ map_buffer->mapped_buffer = NULL;
}
- flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR;
- map_buffer->mapped_buffer =
- msm_subsystem_map_buffer((unsigned long)map_buffer->phy_addr,
- sz, flags, vidc_mmu_subsystem,
- sizeof(vidc_mmu_subsystem)/sizeof(unsigned int));
- if (IS_ERR(map_buffer->mapped_buffer)) {
- pr_err(" %s() buffer map failed", __func__);
- goto free_acm_alloc;
- }
- mapped_buffer = map_buffer->mapped_buffer;
- if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) {
- pr_err("%s() map buffers failed", __func__);
- goto free_map_buffers;
- }
- *phy_addr = (u8 *) mapped_buffer->iova[0];
- *kernel_vaddr = (u8 *) mapped_buffer->vaddr;
return 0;
free_map_buffers:
- msm_subsystem_unmap_buffer(map_buffer->mapped_buffer);
+ if (map_buffer->mapped_buffer)
+ msm_subsystem_unmap_buffer(map_buffer->mapped_buffer);
free_acm_alloc:
if (!cctxt->vcd_enable_ion) {
free_contiguous_memory_by_paddr(
(unsigned long)map_buffer->phy_addr);
- } else {
- ion_free(cctxt->vcd_ion_client, map_buffer->alloc_handle);
}
+ return -ENOMEM;
+ion_map_bailout:
+ ion_unmap_kernel(cctxt->vcd_ion_client, map_buffer->alloc_handle);
+ion_free_bailout:
+ ion_free(cctxt->vcd_ion_client, map_buffer->alloc_handle);
free_map_table:
map_buffer->in_use = 0;
bailout:
@@ -145,9 +172,16 @@
pr_err("%s() Entry not found", __func__);
goto bailout;
}
- msm_subsystem_unmap_buffer(map_buffer->mapped_buffer);
+ if (map_buffer->mapped_buffer)
+ msm_subsystem_unmap_buffer(map_buffer->mapped_buffer);
if (cctxt->vcd_enable_ion) {
if (map_buffer->alloc_handle) {
+ ion_unmap_kernel(cctxt->vcd_ion_client,
+ map_buffer->alloc_handle);
+ ion_unmap_iommu(cctxt->vcd_ion_client,
+ map_buffer->alloc_handle,
+ VIDEO_DOMAIN,
+ VIDEO_MAIN_POOL);
ion_free(cctxt->vcd_ion_client,
map_buffer->alloc_handle);
}
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index 5d3a6a1..15737d6 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -679,7 +679,7 @@
/* LOG CODES */
#define LOG_0 0x0
-#define LOG_1 0x1520
+#define LOG_1 0x15A7
#define LOG_2 0x0
#define LOG_3 0x0
#define LOG_4 0x4910
diff --git a/include/linux/mfd/pm8xxx/pm8921-charger.h b/include/linux/mfd/pm8xxx/pm8921-charger.h
index 31af535..8a3b999 100644
--- a/include/linux/mfd/pm8xxx/pm8921-charger.h
+++ b/include/linux/mfd/pm8xxx/pm8921-charger.h
@@ -100,6 +100,10 @@
* VBAT_THERM goes below 35% of VREF_THERM, if low the
* battery will be considered hot when VBAT_THERM goes
* below 25% of VREF_THERM. Hardware defaults to low.
+ * @rconn_mohm: resistance in milliOhm from the vbat sense to ground
+ * with the battery terminals shorted. This indicates
+ * resistance of the pads, connectors, battery terminals
+ * and rsense.
*/
struct pm8921_charger_platform_data {
struct pm8xxx_charger_core_data charger_cdata;
@@ -131,6 +135,7 @@
int thermal_levels;
enum pm8921_chg_cold_thr cold_thr;
enum pm8921_chg_hot_thr hot_thr;
+ int rconn_mohm;
};
enum pm8921_charger_source {