Merge "msm: rpm: Correct MSM8930+PM8038 resource mapping"
diff --git a/arch/arm/boot/dts/msm8226.dtsi b/arch/arm/boot/dts/msm8226.dtsi
index b900c3f..ebd0ea3 100644
--- a/arch/arm/boot/dts/msm8226.dtsi
+++ b/arch/arm/boot/dts/msm8226.dtsi
@@ -83,6 +83,74 @@
qcom,ipi-ping = <1>;
};
+ qcom,smem@fa00000 {
+ compatible = "qcom,smem";
+ reg = <0xfa00000 0x200000>,
+ <0xfa006000 0x1000>,
+ <0xfc428000 0x4000>;
+ reg-names = "smem", "irq-reg-base", "aux-mem1";
+
+ qcom,smd-modem {
+ compatible = "qcom,smd";
+ qcom,smd-edge = <0>;
+ qcom,smd-irq-offset = <0x8>;
+ qcom,smd-irq-bitmask = <0x1000>;
+ qcom,pil-string = "modem";
+ interrupts = <0 25 1>;
+ }
+
+ qcom,smsm-modem {
+ compatible = "qcom,smsm";
+ qcom,smsm-edge = <0>;
+ qcom,smsm-irq-offset = <0x8>;
+ qcom,smsm-irq-bitmask = <0x2000>;
+ interrupts = <0 26 1>;
+ }
+
+ qcom,smd-adsp {
+ compatible = "qcom,smd";
+ qcom,smd-edge = <1>;
+ qcom,smd-irq-offset = <0x8>;
+ qcom,smd-irq-bitmask = <0x100>;
+ qcom,pil-string = "adsp";
+ interrupts = <0 156 1>;
+ }
+
+ qcom,smsm-adsp {
+ compatible = "qcom,smsm";
+ qcom,smsm-edge = <1>;
+ qcom,smsm-irq-offset = <0x8>;
+ qcom,smsm-irq-bitmask = <0x200>;
+ interrupts = <0 157 1>;
+ }
+
+ qcom,smd-wcnss {
+ compatible = "qcom,smd";
+ qcom,smd-edge = <6>;
+ qcom,smd-irq-offset = <0x8>;
+ qcom,smd-irq-bitmask = <0x20000>;
+ qcom,pil-string = "wcnss";
+ interrupts = <0 142 1>;
+ }
+
+ qcom,smsm-wcnss {
+ compatible = "qcom,smsm";
+ qcom,smsm-edge = <6>;
+ qcom,smsm-irq-offset = <0x8>;
+ qcom,smsm-irq-bitmask = <0x80000>;
+ interrupts = <0 144 1>;
+ }
+
+ qcom,smd-rpm {
+ compatible = "qcom,smd";
+ qcom,smd-edge = <15>;
+ qcom,smd-irq-offset = <0x8>;
+ qcom,smd-irq-bitmask = <0x1>;
+ interrupts = <0 168 1>;
+ qcom,irq-no-suspend;
+ }
+ };
+
};
&gdsc_venus {
diff --git a/arch/arm/configs/msm8910_defconfig b/arch/arm/configs/msm8910_defconfig
index 72c2969..de78559 100644
--- a/arch/arm/configs/msm8910_defconfig
+++ b/arch/arm/configs/msm8910_defconfig
@@ -128,6 +128,8 @@
CONFIG_MMC_BLOCK_MINORS=32
CONFIG_MMC_TEST=m
CONFIG_MMC_MSM=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_STAGING=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
diff --git a/arch/arm/configs/msm8974-perf_defconfig b/arch/arm/configs/msm8974-perf_defconfig
index e4b60ff..8f82962 100644
--- a/arch/arm/configs/msm8974-perf_defconfig
+++ b/arch/arm/configs/msm8974-perf_defconfig
@@ -47,6 +47,7 @@
CONFIG_MSM_BAM_DMUX=y
CONFIG_MSM_IPC_ROUTER=y
CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
+CONFIG_MSM_IPC_ROUTER_SECURITY=y
CONFIG_MSM_QMI_INTERFACE=y
# CONFIG_MSM_HW3D is not set
CONFIG_MSM_SUBSYSTEM_RESTART=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index 0042406..ade2e66 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -46,6 +46,7 @@
CONFIG_MSM_BAM_DMUX=y
CONFIG_MSM_IPC_ROUTER=y
CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
+CONFIG_MSM_IPC_ROUTER_SECURITY=y
CONFIG_MSM_QMI_INTERFACE=y
# CONFIG_MSM_HW3D is not set
CONFIG_MSM_SUBSYSTEM_RESTART=y
diff --git a/arch/arm/mach-msm/acpuclock-8064.c b/arch/arm/mach-msm/acpuclock-8064.c
index 5c1d3e1..8174370 100644
--- a/arch/arm/mach-msm/acpuclock-8064.c
+++ b/arch/arm/mach-msm/acpuclock-8064.c
@@ -241,20 +241,127 @@
static struct acpu_level tbl_PVS0_1700MHz[] __initdata = {
{ 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 950000 },
- { 1, { 486000, HFPLL, 2, 0x24 }, L2(6), 975000 },
- { 1, { 594000, HFPLL, 1, 0x16 }, L2(6), 1000000 },
- { 1, { 702000, HFPLL, 1, 0x1A }, L2(6), 1025000 },
- { 1, { 810000, HFPLL, 1, 0x1E }, L2(6), 1075000 },
- { 1, { 918000, HFPLL, 1, 0x22 }, L2(6), 1100000 },
- { 1, { 1026000, HFPLL, 1, 0x26 }, L2(6), 1125000 },
- { 1, { 1134000, HFPLL, 1, 0x2A }, L2(15), 1175000 },
- { 1, { 1242000, HFPLL, 1, 0x2E }, L2(15), 1200000 },
- { 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 1225000 },
- { 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 1237500 },
- { 1, { 1512000, HFPLL, 1, 0x38 }, L2(15), 1250000 },
- { 1, { 1566000, HFPLL, 1, 0x3A }, L2(15), 1250000 },
- { 1, { 1620000, HFPLL, 1, 0x3C }, L2(15), 1250000 },
- { 1, { 1674000, HFPLL, 1, 0x3E }, L2(15), 1250000 },
+ { 1, { 486000, HFPLL, 2, 0x24 }, L2(6), 950000 },
+ { 1, { 594000, HFPLL, 1, 0x16 }, L2(6), 950000 },
+ { 1, { 702000, HFPLL, 1, 0x1A }, L2(6), 962500 },
+ { 1, { 810000, HFPLL, 1, 0x1E }, L2(6), 1000000 },
+ { 1, { 918000, HFPLL, 1, 0x22 }, L2(6), 1025000 },
+ { 1, { 1026000, HFPLL, 1, 0x26 }, L2(6), 1037500 },
+ { 1, { 1134000, HFPLL, 1, 0x2A }, L2(15), 1075000 },
+ { 1, { 1242000, HFPLL, 1, 0x2E }, L2(15), 1087500 },
+ { 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 1125000 },
+ { 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 1150000 },
+ { 1, { 1566000, HFPLL, 1, 0x3A }, L2(15), 1175000 },
+ { 1, { 1674000, HFPLL, 1, 0x3E }, L2(15), 1225000 },
+ { 1, { 1728000, HFPLL, 1, 0x40 }, L2(15), 1250000 },
+ { 0, { 0 } }
+};
+
+static struct acpu_level tbl_PVS1_1700MHz[] __initdata = {
+ { 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 950000 },
+ { 1, { 486000, HFPLL, 2, 0x24 }, L2(6), 950000 },
+ { 1, { 594000, HFPLL, 1, 0x16 }, L2(6), 950000 },
+ { 1, { 702000, HFPLL, 1, 0x1A }, L2(6), 962500 },
+ { 1, { 810000, HFPLL, 1, 0x1E }, L2(6), 975000 },
+ { 1, { 918000, HFPLL, 1, 0x22 }, L2(6), 1000000 },
+ { 1, { 1026000, HFPLL, 1, 0x26 }, L2(6), 1012500 },
+ { 1, { 1134000, HFPLL, 1, 0x2A }, L2(15), 1037500 },
+ { 1, { 1242000, HFPLL, 1, 0x2E }, L2(15), 1050000 },
+ { 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 1087500 },
+ { 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 1112500 },
+ { 1, { 1566000, HFPLL, 1, 0x3A }, L2(15), 1150000 },
+ { 1, { 1674000, HFPLL, 1, 0x3E }, L2(15), 1187500 },
+ { 1, { 1728000, HFPLL, 1, 0x40 }, L2(15), 1200000 },
+ { 0, { 0 } }
+};
+
+static struct acpu_level tbl_PVS2_1700MHz[] __initdata = {
+ { 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 925000 },
+ { 1, { 486000, HFPLL, 2, 0x24 }, L2(6), 925000 },
+ { 1, { 594000, HFPLL, 1, 0x16 }, L2(6), 925000 },
+ { 1, { 702000, HFPLL, 1, 0x1A }, L2(6), 925000 },
+ { 1, { 810000, HFPLL, 1, 0x1E }, L2(6), 937500 },
+ { 1, { 918000, HFPLL, 1, 0x22 }, L2(6), 950000 },
+ { 1, { 1026000, HFPLL, 1, 0x26 }, L2(6), 975000 },
+ { 1, { 1134000, HFPLL, 1, 0x2A }, L2(15), 1000000 },
+ { 1, { 1242000, HFPLL, 1, 0x2E }, L2(15), 1012500 },
+ { 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 1037500 },
+ { 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 1075000 },
+ { 1, { 1566000, HFPLL, 1, 0x3A }, L2(15), 1100000 },
+ { 1, { 1674000, HFPLL, 1, 0x3E }, L2(15), 1137500 },
+ { 1, { 1728000, HFPLL, 1, 0x40 }, L2(15), 1162500 },
+ { 0, { 0 } }
+};
+
+static struct acpu_level tbl_PVS3_1700MHz[] __initdata = {
+ { 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 900000 },
+ { 1, { 486000, HFPLL, 2, 0x24 }, L2(6), 900000 },
+ { 1, { 594000, HFPLL, 1, 0x16 }, L2(6), 900000 },
+ { 1, { 702000, HFPLL, 1, 0x1A }, L2(6), 900000 },
+ { 1, { 810000, HFPLL, 1, 0x1E }, L2(6), 900000 },
+ { 1, { 918000, HFPLL, 1, 0x22 }, L2(6), 925000 },
+ { 1, { 1026000, HFPLL, 1, 0x26 }, L2(6), 950000 },
+ { 1, { 1134000, HFPLL, 1, 0x2A }, L2(15), 975000 },
+ { 1, { 1242000, HFPLL, 1, 0x2E }, L2(15), 987500 },
+ { 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 1000000 },
+ { 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 1037500 },
+ { 1, { 1566000, HFPLL, 1, 0x3A }, L2(15), 1062500 },
+ { 1, { 1674000, HFPLL, 1, 0x3E }, L2(15), 1100000 },
+ { 1, { 1728000, HFPLL, 1, 0x40 }, L2(15), 1125000 },
+ { 0, { 0 } }
+};
+
+static struct acpu_level tbl_PVS4_1700MHz[] __initdata = {
+ { 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 875000 },
+ { 1, { 486000, HFPLL, 2, 0x24 }, L2(6), 875000 },
+ { 1, { 594000, HFPLL, 1, 0x16 }, L2(6), 875000 },
+ { 1, { 702000, HFPLL, 1, 0x1A }, L2(6), 875000 },
+ { 1, { 810000, HFPLL, 1, 0x1E }, L2(6), 887500 },
+ { 1, { 918000, HFPLL, 1, 0x22 }, L2(6), 900000 },
+ { 1, { 1026000, HFPLL, 1, 0x26 }, L2(6), 925000 },
+ { 1, { 1134000, HFPLL, 1, 0x2A }, L2(15), 950000 },
+ { 1, { 1242000, HFPLL, 1, 0x2E }, L2(15), 962500 },
+ { 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 975000 },
+ { 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 1000000 },
+ { 1, { 1566000, HFPLL, 1, 0x3A }, L2(15), 1037500 },
+ { 1, { 1674000, HFPLL, 1, 0x3E }, L2(15), 1075000 },
+ { 1, { 1728000, HFPLL, 1, 0x40 }, L2(15), 1100000 },
+ { 0, { 0 } }
+};
+
+static struct acpu_level tbl_PVS5_1700MHz[] __initdata = {
+ { 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 875000 },
+ { 1, { 486000, HFPLL, 2, 0x24 }, L2(6), 875000 },
+ { 1, { 594000, HFPLL, 1, 0x16 }, L2(6), 875000 },
+ { 1, { 702000, HFPLL, 1, 0x1A }, L2(6), 875000 },
+ { 1, { 810000, HFPLL, 1, 0x1E }, L2(6), 887500 },
+ { 1, { 918000, HFPLL, 1, 0x22 }, L2(6), 900000 },
+ { 1, { 1026000, HFPLL, 1, 0x26 }, L2(6), 925000 },
+ { 1, { 1134000, HFPLL, 1, 0x2A }, L2(15), 937500 },
+ { 1, { 1242000, HFPLL, 1, 0x2E }, L2(15), 950000 },
+ { 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 962500 },
+ { 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 987500 },
+ { 1, { 1566000, HFPLL, 1, 0x3A }, L2(15), 1012500 },
+ { 1, { 1674000, HFPLL, 1, 0x3E }, L2(15), 1050000 },
+ { 1, { 1728000, HFPLL, 1, 0x40 }, L2(15), 1075000 },
+ { 0, { 0 } }
+};
+
+static struct acpu_level tbl_PVS6_1700MHz[] __initdata = {
+ { 1, { 384000, PLL_8, 0, 0x00 }, L2(0), 875000 },
+ { 1, { 486000, HFPLL, 2, 0x24 }, L2(6), 875000 },
+ { 1, { 594000, HFPLL, 1, 0x16 }, L2(6), 875000 },
+ { 1, { 702000, HFPLL, 1, 0x1A }, L2(6), 875000 },
+ { 1, { 810000, HFPLL, 1, 0x1E }, L2(6), 887500 },
+ { 1, { 918000, HFPLL, 1, 0x22 }, L2(6), 900000 },
+ { 1, { 1026000, HFPLL, 1, 0x26 }, L2(6), 925000 },
+ { 1, { 1134000, HFPLL, 1, 0x2A }, L2(15), 937500 },
+ { 1, { 1242000, HFPLL, 1, 0x2E }, L2(15), 950000 },
+ { 1, { 1350000, HFPLL, 1, 0x32 }, L2(15), 962500 },
+ { 1, { 1458000, HFPLL, 1, 0x36 }, L2(15), 975000 },
+ { 1, { 1566000, HFPLL, 1, 0x3A }, L2(15), 1000000 },
+ { 1, { 1674000, HFPLL, 1, 0x3E }, L2(15), 1025000 },
+ { 1, { 1728000, HFPLL, 1, 0x40 }, L2(15), 1050000 },
{ 0, { 0 } }
};
@@ -398,12 +505,12 @@
[0][PVS_FASTER] = {tbl_faster, sizeof(tbl_faster), 25000 },
[1][0] = { tbl_PVS0_1700MHz, sizeof(tbl_PVS0_1700MHz), 0 },
- [1][1] = { tbl_PVS0_1700MHz, sizeof(tbl_PVS0_1700MHz), 0 },
- [1][2] = { tbl_PVS0_1700MHz, sizeof(tbl_PVS0_1700MHz), 0 },
- [1][3] = { tbl_PVS0_1700MHz, sizeof(tbl_PVS0_1700MHz), 0 },
- [1][4] = { tbl_PVS0_1700MHz, sizeof(tbl_PVS0_1700MHz), 0 },
- [1][5] = { tbl_PVS0_1700MHz, sizeof(tbl_PVS0_1700MHz), 0 },
- [1][6] = { tbl_PVS0_1700MHz, sizeof(tbl_PVS0_1700MHz), 0 },
+ [1][1] = { tbl_PVS1_1700MHz, sizeof(tbl_PVS1_1700MHz), 0 },
+ [1][2] = { tbl_PVS2_1700MHz, sizeof(tbl_PVS2_1700MHz), 0 },
+ [1][3] = { tbl_PVS3_1700MHz, sizeof(tbl_PVS3_1700MHz), 0 },
+ [1][4] = { tbl_PVS4_1700MHz, sizeof(tbl_PVS4_1700MHz), 0 },
+ [1][5] = { tbl_PVS5_1700MHz, sizeof(tbl_PVS5_1700MHz), 0 },
+ [1][6] = { tbl_PVS6_1700MHz, sizeof(tbl_PVS6_1700MHz), 0 },
[2][0] = { tbl_PVS0_2000MHz, sizeof(tbl_PVS0_2000MHz), 0 },
[2][1] = { tbl_PVS1_2000MHz, sizeof(tbl_PVS1_2000MHz), 0 },
diff --git a/arch/arm/mach-msm/board-8064-pmic.c b/arch/arm/mach-msm/board-8064-pmic.c
index f9d1533..f6dd2ea 100644
--- a/arch/arm/mach-msm/board-8064-pmic.c
+++ b/arch/arm/mach-msm/board-8064-pmic.c
@@ -405,7 +405,7 @@
.resume_charge_percent = 99,
.term_current = CHG_TERM_MA,
.cool_temp = 10,
- .warm_temp = 40,
+ .warm_temp = 45,
.temp_check_period = 1,
.max_bat_chg_current = 1100,
.cool_bat_chg_current = 350,
diff --git a/arch/arm/mach-msm/board-8910.c b/arch/arm/mach-msm/board-8910.c
index 42fe1ea..b4a95ee 100644
--- a/arch/arm/mach-msm/board-8910.c
+++ b/arch/arm/mach-msm/board-8910.c
@@ -69,6 +69,7 @@
CLK_DUMMY("iface_clk", NULL, "msm_sdcc.2", OFF),
CLK_DUMMY("core_clk", NULL, "msm_sdcc.2", OFF),
CLK_DUMMY("bus_clk", NULL, "msm_sdcc.2", OFF),
+ CLK_DUMMY("dfab_clk", DFAB_CLK, "msm_sps", OFF),
};
static struct clock_init_data msm_dummy_clock_init_data __initdata = {
diff --git a/arch/arm/mach-msm/board-8930-pmic.c b/arch/arm/mach-msm/board-8930-pmic.c
index dd4b67e..a5fded4 100644
--- a/arch/arm/mach-msm/board-8930-pmic.c
+++ b/arch/arm/mach-msm/board-8930-pmic.c
@@ -325,7 +325,7 @@
.resume_charge_percent = 99,
.term_current = CHG_TERM_MA,
.cool_temp = 10,
- .warm_temp = 40,
+ .warm_temp = 45,
.temp_check_period = 1,
.max_bat_chg_current = 1100,
.cool_bat_chg_current = 350,
diff --git a/arch/arm/mach-msm/board-8960-pmic.c b/arch/arm/mach-msm/board-8960-pmic.c
index 95a157a..f0ba1c9 100644
--- a/arch/arm/mach-msm/board-8960-pmic.c
+++ b/arch/arm/mach-msm/board-8960-pmic.c
@@ -406,7 +406,7 @@
.resume_charge_percent = 99,
.term_current = CHG_TERM_MA,
.cool_temp = 10,
- .warm_temp = 40,
+ .warm_temp = 45,
.temp_check_period = 1,
.max_bat_chg_current = 1100,
.cool_bat_chg_current = 350,
@@ -615,6 +615,6 @@
pm8921_bms_pdata.rconn_mohm = 20;
if (!machine_is_msm8960_fluid() && !machine_is_msm8960_liquid()
- && !machine_is_msm8960_fluid())
+ && !machine_is_msm8960_mtp())
pm8921_chg_pdata.battery_less_hardware = 1;
}
diff --git a/arch/arm/mach-msm/board-msm7627a-io.c b/arch/arm/mach-msm/board-msm7627a-io.c
index 2983dc0..8c52456 100644
--- a/arch/arm/mach-msm/board-msm7627a-io.c
+++ b/arch/arm/mach-msm/board-msm7627a-io.c
@@ -727,6 +727,15 @@
[KP_INDEX_SKU3(1, 1)] = KEY_CAMERA,
};
+static unsigned int kp_row_gpios_skud[] = {31, 32};
+static unsigned int kp_col_gpios_skud[] = {37};
+
+static const unsigned short keymap_skud[] = {
+ [KP_INDEX_SKU3(0, 0)] = KEY_VOLUMEUP,
+ [KP_INDEX_SKU3(0, 1)] = KEY_VOLUMEDOWN,
+};
+
+
static struct gpio_event_matrix_info kp_matrix_info_sku3 = {
.info.func = gpio_event_matrix_func,
.keymap = keymap_sku3,
@@ -887,13 +896,23 @@
#endif
/* keypad */
+
+ if (machine_is_qrd_skud_prime()) {
+ kp_matrix_info_sku3.keymap = keymap_skud;
+ kp_matrix_info_sku3.output_gpios = kp_row_gpios_skud;
+ kp_matrix_info_sku3.input_gpios = kp_col_gpios_skud;
+ kp_matrix_info_sku3.noutputs = ARRAY_SIZE(kp_row_gpios_skud);
+ kp_matrix_info_sku3.ninputs = ARRAY_SIZE(kp_col_gpios_skud);
+ }
+
if (machine_is_msm8625_evt())
kp_matrix_info_8625.keymap = keymap_8625_evt;
if (machine_is_msm7627a_evb() || machine_is_msm8625_evb() ||
machine_is_msm8625_evt())
platform_device_register(&kp_pdev_8625);
- else if (machine_is_msm7627a_qrd3() || machine_is_msm8625_qrd7())
+ else if (machine_is_msm7627a_qrd3() || machine_is_msm8625_qrd7()
+ || machine_is_qrd_skud_prime())
platform_device_register(&kp_pdev_sku3);
/* leds */
diff --git a/arch/arm/mach-msm/mdm_common.c b/arch/arm/mach-msm/mdm_common.c
index b81832e..aef4ac9 100644
--- a/arch/arm/mach-msm/mdm_common.c
+++ b/arch/arm/mach-msm/mdm_common.c
@@ -300,6 +300,7 @@
if (ret)
pr_err("%s: Graceful shutdown of the external modem failed, ret = %d\n",
__func__, ret);
+ put_user(ret, (unsigned long __user *) arg);
break;
default:
pr_err("%s: invalid ioctl cmd = %d\n", __func__, _IOC_NR(cmd));
diff --git a/arch/arm/mach-msm/peripheral-loader.c b/arch/arm/mach-msm/peripheral-loader.c
index 65e05a9..e3a3563 100644
--- a/arch/arm/mach-msm/peripheral-loader.c
+++ b/arch/arm/mach-msm/peripheral-loader.c
@@ -382,6 +382,7 @@
if (priv->region)
ion_free(ion, priv->region);
+ priv->region = NULL;
list_for_each_entry_safe(p, tmp, &priv->segs, list) {
list_del(&p->list);
kfree(p);
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index e61b040..0b445d6 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -323,9 +323,19 @@
}
EXPORT_SYMBOL(kgsl_mem_entry_destroy);
-static
-void kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry,
- struct kgsl_process_private *process)
+/**
+ * kgsl_mem_entry_track_gpuaddr - Insert a mem_entry in the address tree
+ * @process: the process that owns the memory
+ * @entry: the memory entry
+ *
+ * Insert a kgsl_mem_entry in to the rb_tree for searching by GPU address.
+ * Not all mem_entries will have gpu addresses when first created, so this
+ * function may be called after creation when the GPU address is finally
+ * assigned.
+ */
+static void
+kgsl_mem_entry_track_gpuaddr(struct kgsl_process_private *process,
+ struct kgsl_mem_entry *entry)
{
struct rb_node **node;
struct rb_node *parent = NULL;
@@ -350,8 +360,48 @@
rb_insert_color(&entry->node, &process->mem_rb);
spin_unlock(&process->mem_lock);
+}
+/**
+ * kgsl_mem_entry_attach_process - Attach a mem_entry to its owner process
+ * @entry: the memory entry
+ * @process: the owner process
+ *
+ * Attach a newly created mem_entry to its owner process so that
+ * it can be found later. The mem_entry will be added to mem_idr and have
+ * its 'id' field assigned. If the GPU address has been set, the entry
+ * will also be added to the mem_rb tree.
+ *
+ * @returns - 0 on success or error code on failure.
+ */
+static int
+kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry,
+ struct kgsl_process_private *process)
+{
+ int ret;
+
+ while (1) {
+ if (idr_pre_get(&process->mem_idr, GFP_KERNEL) == 0) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ spin_lock(&process->mem_lock);
+ ret = idr_get_new_above(&process->mem_idr, entry, 1,
+ &entry->id);
+ spin_unlock(&process->mem_lock);
+
+ if (ret == 0)
+ break;
+ else if (ret != -EAGAIN)
+ goto err;
+ }
entry->priv = process;
+
+ if (entry->memdesc.gpuaddr != 0)
+ kgsl_mem_entry_track_gpuaddr(process, entry);
+err:
+ return ret;
}
/* Detach a memory entry from a process and unmap it from the MMU */
@@ -361,6 +411,17 @@
if (entry == NULL)
return;
+ spin_lock(&entry->priv->mem_lock);
+
+ if (entry->id != 0)
+ idr_remove(&entry->priv->mem_idr, entry->id);
+ entry->id = 0;
+
+ if (entry->memdesc.gpuaddr != 0)
+ rb_erase(&entry->node, &entry->priv->mem_rb);
+
+ spin_unlock(&entry->priv->mem_lock);
+
entry->priv->stats[entry->memtype].cur -= entry->memdesc.size;
entry->priv = NULL;
@@ -777,6 +838,8 @@
private->pid = task_tgid_nr(current);
private->mem_rb = RB_ROOT;
+ idr_init(&private->mem_idr);
+
if (kgsl_mmu_enabled())
{
unsigned long pt_name;
@@ -805,7 +868,7 @@
struct kgsl_process_private *private)
{
struct kgsl_mem_entry *entry = NULL;
- struct rb_node *node;
+ int next = 0;
if (!private)
return;
@@ -820,14 +883,22 @@
list_del(&private->list);
- for (node = rb_first(&private->mem_rb); node; ) {
- entry = rb_entry(node, struct kgsl_mem_entry, node);
- node = rb_next(&entry->node);
-
- rb_erase(&entry->node, &private->mem_rb);
+ while (1) {
+ rcu_read_lock();
+ entry = idr_get_next(&private->mem_idr, &next);
+ rcu_read_unlock();
+ if (entry == NULL)
+ break;
kgsl_mem_entry_detach_process(entry);
+ /*
+ * Always start back at the beginning, to
+ * ensure all entries are removed,
+ * like list_for_each_entry_safe.
+ */
+ next = 0;
}
kgsl_mmu_putpagetable(private->pagetable);
+ idr_destroy(&private->mem_idr);
kfree(private);
unlock:
mutex_unlock(&kgsl_driver.process_mutex);
@@ -1006,6 +1077,25 @@
return kgsl_sharedmem_find_region(private, gpuaddr, 1);
}
+/**
+ * kgsl_sharedmem_find_id - find a memory entry by id
+ * @process: the owning process
+ * @id: id to find
+ *
+ * @returns - the mem_entry or NULL
+ */
+static inline struct kgsl_mem_entry *
+kgsl_sharedmem_find_id(struct kgsl_process_private *process, unsigned int id)
+{
+ struct kgsl_mem_entry *entry;
+
+ rcu_read_lock();
+ entry = idr_find(&process->mem_idr, id);
+ rcu_read_unlock();
+
+ return entry;
+}
+
/*call all ioctl sub functions with driver locked*/
static long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
@@ -1293,9 +1383,6 @@
void *priv, u32 id, u32 timestamp)
{
struct kgsl_mem_entry *entry = priv;
- spin_lock(&entry->priv->mem_lock);
- rb_erase(&entry->node, &entry->priv->mem_rb);
- spin_unlock(&entry->priv->mem_lock);
trace_kgsl_mem_timestamp_free(device, entry, id, timestamp, 0);
kgsl_mem_entry_detach_process(entry);
}
@@ -1407,34 +1494,52 @@
static long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
- int result = 0;
struct kgsl_sharedmem_free *param = data;
struct kgsl_process_private *private = dev_priv->process_priv;
struct kgsl_mem_entry *entry = NULL;
spin_lock(&private->mem_lock);
entry = kgsl_sharedmem_find(private, param->gpuaddr);
- if (entry)
- rb_erase(&entry->node, &private->mem_rb);
-
spin_unlock(&private->mem_lock);
- if (entry) {
- trace_kgsl_mem_free(entry);
-
- kgsl_memfree_hist_set_event(
- entry->priv->pid,
- entry->memdesc.gpuaddr,
- entry->memdesc.size,
- entry->memdesc.flags);
-
- kgsl_mem_entry_detach_process(entry);
- } else {
- KGSL_CORE_ERR("invalid gpuaddr %08x\n", param->gpuaddr);
- result = -EINVAL;
+ if (!entry) {
+ KGSL_MEM_INFO(dev_priv->device, "invalid gpuaddr %08x\n",
+ param->gpuaddr);
+ return -EINVAL;
}
+ trace_kgsl_mem_free(entry);
- return result;
+ kgsl_memfree_hist_set_event(entry->priv->pid,
+ entry->memdesc.gpuaddr,
+ entry->memdesc.size,
+ entry->memdesc.flags);
+
+ kgsl_mem_entry_detach_process(entry);
+ return 0;
+}
+
+static long kgsl_ioctl_gpumem_free_id(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ struct kgsl_gpumem_free_id *param = data;
+ struct kgsl_process_private *private = dev_priv->process_priv;
+ struct kgsl_mem_entry *entry = NULL;
+
+ entry = kgsl_sharedmem_find_id(private, param->id);
+
+ if (!entry) {
+ KGSL_MEM_INFO(dev_priv->device, "invalid id %d\n", param->id);
+ return -EINVAL;
+ }
+ trace_kgsl_mem_free(entry);
+
+ kgsl_memfree_hist_set_event(entry->priv->pid,
+ entry->memdesc.gpuaddr,
+ entry->memdesc.size,
+ entry->memdesc.flags);
+
+ kgsl_mem_entry_detach_process(entry);
+ return 0;
}
static struct vm_area_struct *kgsl_get_vma_from_start_addr(unsigned int addr)
@@ -1798,6 +1903,16 @@
else
memtype = param->memtype;
+ /*
+ * Mask off unknown flags from userspace. This way the caller can
+ * check if a flag is supported by looking at the returned flags.
+ * Note: CACHEMODE is ignored for this call. Caching should be
+ * determined by type of allocation being mapped.
+ */
+ param->flags &= KGSL_MEMFLAGS_GPUREADONLY
+ | KGSL_MEMTYPE_MASK
+ | KGSL_MEMALIGN_MASK;
+
entry->memdesc.flags = param->flags;
switch (memtype) {
@@ -1872,18 +1987,25 @@
/* Adjust the returned value for a non 4k aligned offset */
param->gpuaddr = entry->memdesc.gpuaddr + (param->offset & ~PAGE_MASK);
+ /* echo back flags */
+ param->flags = entry->memdesc.flags;
+
+ result = kgsl_mem_entry_attach_process(entry, private);
+ if (result)
+ goto error_unmap;
KGSL_STATS_ADD(param->len, kgsl_driver.stats.mapped,
kgsl_driver.stats.mapped_max);
kgsl_process_add_stats(private, entry->memtype, param->len);
- kgsl_mem_entry_attach_process(entry, private);
trace_kgsl_mem_map(entry, param->fd);
kgsl_check_idle(dev_priv->device);
return result;
+error_unmap:
+ kgsl_mmu_unmap(private->pagetable, &entry->memdesc);
error_put_file_ptr:
switch (entry->memtype) {
case KGSL_MEM_ENTRY_PMEM:
@@ -1903,33 +2025,133 @@
return result;
}
-/*This function flushes a graphics memory allocation from CPU cache
- *when caching is enabled with MMU*/
+static int _kgsl_gpumem_sync_cache(struct kgsl_mem_entry *entry, int op)
+{
+ int ret = 0;
+ int cacheop;
+ int mode;
+
+ /*
+ * Flush is defined as (clean | invalidate). If both bits are set, then
+ * do a flush, otherwise check for the individual bits and clean or inv
+ * as requested
+ */
+
+ if ((op & KGSL_GPUMEM_CACHE_FLUSH) == KGSL_GPUMEM_CACHE_FLUSH)
+ cacheop = KGSL_CACHE_OP_FLUSH;
+ else if (op & KGSL_GPUMEM_CACHE_CLEAN)
+ cacheop = KGSL_CACHE_OP_CLEAN;
+ else if (op & KGSL_GPUMEM_CACHE_INV)
+ cacheop = KGSL_CACHE_OP_INV;
+ else {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ mode = kgsl_memdesc_get_cachemode(&entry->memdesc);
+ if (mode != KGSL_CACHEMODE_UNCACHED
+ && mode != KGSL_CACHEMODE_WRITECOMBINE)
+ kgsl_cache_range_op(&entry->memdesc, cacheop);
+
+done:
+ return ret;
+}
+
+/* New cache sync function - supports both directions (clean and invalidate) */
+
+static long
+kgsl_ioctl_gpumem_sync_cache(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ struct kgsl_gpumem_sync_cache *param = data;
+ struct kgsl_process_private *private = dev_priv->process_priv;
+ struct kgsl_mem_entry *entry = NULL;
+
+ if (param->id != 0) {
+ entry = kgsl_sharedmem_find_id(private, param->id);
+ if (entry == NULL) {
+ KGSL_MEM_INFO(dev_priv->device, "can't find id %d\n",
+ param->id);
+ return -EINVAL;
+ }
+ } else if (param->gpuaddr != 0) {
+ spin_lock(&private->mem_lock);
+ entry = kgsl_sharedmem_find(private, param->gpuaddr);
+ spin_unlock(&private->mem_lock);
+ if (entry == NULL) {
+ KGSL_MEM_INFO(dev_priv->device,
+ "can't find gpuaddr %x\n",
+ param->gpuaddr);
+ return -EINVAL;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return _kgsl_gpumem_sync_cache(entry, param->op);
+}
+
+/* Legacy cache function, does a flush (clean + inv) */
+
static long
kgsl_ioctl_sharedmem_flush_cache(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
- int result = 0;
- struct kgsl_mem_entry *entry;
struct kgsl_sharedmem_free *param = data;
struct kgsl_process_private *private = dev_priv->process_priv;
+ struct kgsl_mem_entry *entry = NULL;
spin_lock(&private->mem_lock);
entry = kgsl_sharedmem_find(private, param->gpuaddr);
- if (!entry) {
- KGSL_CORE_ERR("invalid gpuaddr %08x\n", param->gpuaddr);
- result = -EINVAL;
- goto done;
- }
- if (!entry->memdesc.hostptr) {
- KGSL_CORE_ERR("invalid hostptr with gpuaddr %08x\n",
- param->gpuaddr);
- goto done;
+ spin_unlock(&private->mem_lock);
+ if (entry == NULL) {
+ KGSL_MEM_INFO(dev_priv->device,
+ "can't find gpuaddr %x\n",
+ param->gpuaddr);
+ return -EINVAL;
}
- kgsl_cache_range_op(&entry->memdesc, KGSL_CACHE_OP_CLEAN);
-done:
- spin_unlock(&private->mem_lock);
+ return _kgsl_gpumem_sync_cache(entry, KGSL_GPUMEM_CACHE_FLUSH);
+}
+
+/*
+ * The common parts of kgsl_ioctl_gpumem_alloc and kgsl_ioctl_gpumem_alloc_id.
+ */
+int
+_gpumem_alloc(struct kgsl_device_private *dev_priv,
+ struct kgsl_mem_entry **ret_entry,
+ unsigned int size, unsigned int flags)
+{
+ int result;
+ struct kgsl_process_private *private = dev_priv->process_priv;
+ struct kgsl_mem_entry *entry;
+
+ /*
+ * Mask off unknown flags from userspace. This way the caller can
+ * check if a flag is supported by looking at the returned flags.
+ */
+ flags &= KGSL_MEMFLAGS_GPUREADONLY
+ | KGSL_CACHEMODE_MASK
+ | KGSL_MEMTYPE_MASK
+ | KGSL_MEMALIGN_MASK;
+
+ entry = kgsl_mem_entry_create();
+ if (entry == NULL)
+ return -ENOMEM;
+
+ result = kgsl_allocate_user(&entry->memdesc, private->pagetable, size,
+ flags);
+ if (result != 0)
+ goto err;
+
+ entry->memtype = KGSL_MEM_ENTRY_KERNEL;
+
+ kgsl_check_idle(dev_priv->device);
+ *ret_entry = entry;
+ return result;
+err:
+ kfree(entry);
+ *ret_entry = NULL;
return result;
}
@@ -1939,29 +2161,111 @@
{
struct kgsl_process_private *private = dev_priv->process_priv;
struct kgsl_gpumem_alloc *param = data;
- struct kgsl_mem_entry *entry;
+ struct kgsl_mem_entry *entry = NULL;
int result;
- entry = kgsl_mem_entry_create();
- if (entry == NULL)
- return -ENOMEM;
+ result = _gpumem_alloc(dev_priv, &entry, param->size, param->flags);
+ if (result)
+ return result;
- result = kgsl_allocate_user(&entry->memdesc, private->pagetable,
- param->size, param->flags);
+ result = kgsl_mmu_map(private->pagetable, &entry->memdesc,
+ kgsl_memdesc_protflags(&entry->memdesc));
+ if (result)
+ goto err;
- if (result == 0) {
- entry->memtype = KGSL_MEM_ENTRY_KERNEL;
- kgsl_mem_entry_attach_process(entry, private);
- param->gpuaddr = entry->memdesc.gpuaddr;
+ result = kgsl_mem_entry_attach_process(entry, private);
+ if (result != 0)
+ goto err;
- kgsl_process_add_stats(private, entry->memtype, param->size);
- trace_kgsl_mem_alloc(entry);
- } else
- kfree(entry);
+ kgsl_process_add_stats(private, entry->memtype, param->size);
+ trace_kgsl_mem_alloc(entry);
- kgsl_check_idle(dev_priv->device);
+ param->gpuaddr = entry->memdesc.gpuaddr;
+ param->size = entry->memdesc.size;
+ param->flags = entry->memdesc.flags;
+ return result;
+err:
+ kgsl_sharedmem_free(&entry->memdesc);
+ kfree(entry);
return result;
}
+
+static long
+kgsl_ioctl_gpumem_alloc_id(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ struct kgsl_process_private *private = dev_priv->process_priv;
+ struct kgsl_gpumem_alloc_id *param = data;
+ struct kgsl_mem_entry *entry = NULL;
+ int result;
+
+ result = _gpumem_alloc(dev_priv, &entry, param->size, param->flags);
+ if (result != 0)
+ goto err;
+
+ result = kgsl_mmu_map(private->pagetable, &entry->memdesc,
+ kgsl_memdesc_protflags(&entry->memdesc));
+ if (result)
+ goto err;
+
+ result = kgsl_mem_entry_attach_process(entry, private);
+ if (result != 0)
+ goto err;
+
+ kgsl_process_add_stats(private, entry->memtype, param->size);
+ trace_kgsl_mem_alloc(entry);
+
+ param->id = entry->id;
+ param->flags = entry->memdesc.flags;
+ param->size = entry->memdesc.size;
+ param->mmapsize = entry->memdesc.size;
+ param->gpuaddr = entry->memdesc.gpuaddr;
+ return result;
+err:
+ if (entry)
+ kgsl_sharedmem_free(&entry->memdesc);
+ kfree(entry);
+ return result;
+}
+
+static long
+kgsl_ioctl_gpumem_get_info(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ struct kgsl_process_private *private = dev_priv->process_priv;
+ struct kgsl_gpumem_get_info *param = data;
+ struct kgsl_mem_entry *entry = NULL;
+ int result = 0;
+
+ if (param->id != 0) {
+ entry = kgsl_sharedmem_find_id(private, param->id);
+ if (entry == NULL) {
+ KGSL_MEM_INFO(dev_priv->device, "can't find id %d\n",
+ param->id);
+ return -EINVAL;
+ }
+ } else if (param->gpuaddr != 0) {
+ spin_lock(&private->mem_lock);
+ entry = kgsl_sharedmem_find(private, param->gpuaddr);
+ spin_unlock(&private->mem_lock);
+ if (entry == NULL) {
+ KGSL_MEM_INFO(dev_priv->device,
+ "can't find gpuaddr %lx\n",
+ param->gpuaddr);
+ return -EINVAL;
+ }
+ } else {
+ return -EINVAL;
+ }
+ param->gpuaddr = entry->memdesc.gpuaddr;
+ param->id = entry->id;
+ param->flags = entry->memdesc.flags;
+ param->size = entry->memdesc.size;
+ param->mmapsize = entry->memdesc.size;
+ param->useraddr = entry->memdesc.useraddr;
+ return result;
+}
+
static long kgsl_ioctl_cff_syncmem(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
@@ -2161,6 +2465,14 @@
kgsl_ioctl_timestamp_event, 1),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SETPROPERTY,
kgsl_ioctl_device_setproperty, 1),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_ALLOC_ID,
+ kgsl_ioctl_gpumem_alloc_id, 0),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_FREE_ID,
+ kgsl_ioctl_gpumem_free_id, 0),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_GET_INFO,
+ kgsl_ioctl_gpumem_get_info, 0),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_SYNC_CACHE,
+ kgsl_ioctl_gpumem_sync_cache, 0),
};
static long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
@@ -2330,7 +2642,7 @@
static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
{
- unsigned int ret;
+ unsigned int ret, cache;
unsigned long vma_offset = vma->vm_pgoff << PAGE_SHIFT;
struct kgsl_device_private *dev_priv = file->private_data;
struct kgsl_process_private *private = dev_priv->process_priv;
@@ -2343,16 +2655,17 @@
return kgsl_mmap_memstore(device, vma);
/* Find a chunk of GPU memory */
+ entry = kgsl_sharedmem_find_id(private, vma->vm_pgoff);
- spin_lock(&private->mem_lock);
- entry = kgsl_sharedmem_find(private, vma_offset);
+ if (entry == NULL) {
+ spin_lock(&private->mem_lock);
+ entry = kgsl_sharedmem_find(private, vma_offset);
+ spin_unlock(&private->mem_lock);
+ }
if (entry)
kgsl_mem_entry_get(entry);
-
- spin_unlock(&private->mem_lock);
-
- if (entry == NULL)
+ else
return -EINVAL;
if (!entry->memdesc.ops ||
@@ -2375,7 +2688,27 @@
vma->vm_flags |= entry->memdesc.ops->vmflags(&entry->memdesc);
vma->vm_private_data = entry;
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ /* Determine user-side caching policy */
+
+ cache = kgsl_memdesc_get_cachemode(&entry->memdesc);
+
+ switch (cache) {
+ case KGSL_CACHEMODE_UNCACHED:
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ break;
+ case KGSL_CACHEMODE_WRITETHROUGH:
+ vma->vm_page_prot = pgprot_writethroughcache(vma->vm_page_prot);
+ break;
+ case KGSL_CACHEMODE_WRITEBACK:
+ vma->vm_page_prot = pgprot_writebackcache(vma->vm_page_prot);
+ break;
+ case KGSL_CACHEMODE_WRITECOMBINE:
+ default:
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ break;
+ }
+
vma->vm_ops = &kgsl_gpumem_vm_ops;
vma->vm_file = file;
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index d22cb6d..cd1f763 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -185,6 +185,7 @@
int flags;
void *priv_data;
struct rb_node node;
+ unsigned int id;
unsigned int context_id;
/* back pointer to private structure under whose context this
* allocation is made */
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
index 07a5ff4..d4834e5 100644
--- a/drivers/gpu/msm/kgsl_debugfs.c
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -221,17 +221,28 @@
return '-';
}
+static char get_cacheflag(const struct kgsl_memdesc *m)
+{
+ static const char table[] = {
+ [KGSL_CACHEMODE_WRITECOMBINE] = '-',
+ [KGSL_CACHEMODE_UNCACHED] = 'u',
+ [KGSL_CACHEMODE_WRITEBACK] = 'b',
+ [KGSL_CACHEMODE_WRITETHROUGH] = 't',
+ };
+ return table[kgsl_memdesc_get_cachemode(m)];
+}
+
static int process_mem_print(struct seq_file *s, void *unused)
{
struct kgsl_mem_entry *entry;
struct rb_node *node;
struct kgsl_process_private *private = s->private;
- char flags[4];
+ char flags[5];
char usage[16];
spin_lock(&private->mem_lock);
- seq_printf(s, "%8s %8s %5s %10s %16s %5s\n",
- "gpuaddr", "size", "flags", "type", "usage", "sglen");
+ seq_printf(s, "%8s %8s %5s %5s %10s %16s %5s\n",
+ "gpuaddr", "size", "id", "flags", "type", "usage", "sglen");
for (node = rb_first(&private->mem_rb); node; node = rb_next(node)) {
struct kgsl_memdesc *m;
@@ -241,12 +252,13 @@
flags[0] = kgsl_memdesc_is_global(m) ? 'g' : '-';
flags[1] = m->flags & KGSL_MEMFLAGS_GPUREADONLY ? 'r' : '-';
flags[2] = get_alignflag(m);
- flags[3] = '\0';
+ flags[3] = get_cacheflag(m);
+ flags[4] = '\0';
kgsl_get_memory_usage(usage, sizeof(usage), m->flags);
- seq_printf(s, "%08x %8d %5s %10s %16s %5d\n",
- m->gpuaddr, m->size, flags,
+ seq_printf(s, "%08x %8d %5d %5s %10s %16s %5d\n",
+ m->gpuaddr, m->size, entry->id, flags,
memtype_str(entry->memtype), usage, m->sglen);
}
spin_unlock(&private->mem_lock);
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 35ffc1b..ce3820c 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -268,6 +268,7 @@
pid_t pid;
spinlock_t mem_lock;
struct rb_root mem_rb;
+ struct idr mem_idr;
struct kgsl_pagetable *pagetable;
struct list_head list;
struct kobject kobj;
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index be51c11..11ca0b0 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -511,7 +511,14 @@
void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op)
{
- void *addr = memdesc->hostptr;
+ /*
+ * If the buffer is mapped in the kernel operate on that address
+ * otherwise use the user address
+ */
+
+ void *addr = (memdesc->hostptr) ?
+ memdesc->hostptr : (void *) memdesc->useraddr;
+
int size = memdesc->size;
if (addr != NULL) {
@@ -534,7 +541,7 @@
static int
_kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable,
- size_t size, unsigned int protflags)
+ size_t size)
{
int pcount = 0, order, ret = 0;
int j, len, page_size, sglen_alloc, sglen = 0;
@@ -698,11 +705,6 @@
outer_cache_range_op_sg(memdesc->sg, memdesc->sglen,
KGSL_CACHE_OP_FLUSH);
- ret = kgsl_mmu_map(pagetable, memdesc, protflags);
-
- if (ret)
- goto done;
-
KGSL_STATS_ADD(size, kgsl_driver.stats.page_alloc,
kgsl_driver.stats.page_alloc_max);
@@ -729,8 +731,7 @@
size = ALIGN(size, PAGE_SIZE * 2);
- ret = _kgsl_sharedmem_page_alloc(memdesc, pagetable, size,
- GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+ ret = _kgsl_sharedmem_page_alloc(memdesc, pagetable, size);
if (!ret)
ret = kgsl_page_alloc_map_kernel(memdesc);
if (ret)
@@ -744,17 +745,7 @@
struct kgsl_pagetable *pagetable,
size_t size)
{
- unsigned int protflags;
-
- if (size == 0)
- return -EINVAL;
-
- protflags = GSL_PT_PAGE_RV;
- if (!(memdesc->flags & KGSL_MEMFLAGS_GPUREADONLY))
- protflags |= GSL_PT_PAGE_WV;
-
- return _kgsl_sharedmem_page_alloc(memdesc, pagetable, size,
- protflags);
+ return _kgsl_sharedmem_page_alloc(memdesc, pagetable, PAGE_ALIGN(size));
}
EXPORT_SYMBOL(kgsl_sharedmem_page_alloc_user);
@@ -832,12 +823,6 @@
if (result)
goto err;
- result = kgsl_mmu_map(pagetable, memdesc,
- GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
-
- if (result)
- goto err;
-
KGSL_STATS_ADD(size, kgsl_driver.stats.coherent,
kgsl_driver.stats.coherent_max);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
index 53e88be..f07f049 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.h
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -83,6 +83,18 @@
}
/*
+ * kgsl_memdesc_get_cachemode - Get cache mode of a memdesc
+ * @memdesc: the memdesc
+ *
+ * Returns a KGSL_CACHEMODE* value.
+ */
+static inline int
+kgsl_memdesc_get_cachemode(const struct kgsl_memdesc *memdesc)
+{
+ return (memdesc->flags & KGSL_CACHEMODE_MASK) >> KGSL_CACHEMODE_SHIFT;
+}
+
+/*
* kgsl_memdesc_set_align - Set alignment flags of a memdesc
* @memdesc - the memdesc
* @align - alignment requested, as a power of 2 exponent.
@@ -168,14 +180,38 @@
return (memdesc->priv & KGSL_MEMDESC_GLOBAL) != 0;
}
+/*
+ * kgsl_memdesc_protflags - get mmu protection flags
+ * @memdesc - the memdesc
+ * Returns a mask of GSL_PT_PAGE* values based on the
+ * memdesc flags.
+ */
+static inline unsigned int
+kgsl_memdesc_protflags(const struct kgsl_memdesc *memdesc)
+{
+ unsigned int protflags = GSL_PT_PAGE_RV;
+ if (!(memdesc->flags & KGSL_MEMFLAGS_GPUREADONLY))
+ protflags |= GSL_PT_PAGE_WV;
+ return protflags;
+}
+
static inline int
kgsl_allocate(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable, size_t size)
{
+ int ret;
+ memdesc->priv |= (KGSL_MEMTYPE_KERNEL << KGSL_MEMTYPE_SHIFT);
if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
return kgsl_sharedmem_ebimem(memdesc, pagetable, size);
- memdesc->flags |= (KGSL_MEMTYPE_KERNEL << KGSL_MEMTYPE_SHIFT);
- return kgsl_sharedmem_page_alloc(memdesc, pagetable, size);
+
+ ret = kgsl_sharedmem_page_alloc(memdesc, pagetable, size);
+ if (ret)
+ return ret;
+ ret = kgsl_mmu_map(pagetable, memdesc,
+ kgsl_memdesc_protflags(memdesc));
+ if (ret)
+ kgsl_sharedmem_free(memdesc);
+ return ret;
}
static inline int
@@ -185,6 +221,9 @@
{
int ret;
+ if (size == 0)
+ return -EINVAL;
+
memdesc->flags = flags;
if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h
index 0b247e5..7cc55f6 100644
--- a/drivers/gpu/msm/kgsl_trace.h
+++ b/drivers/gpu/msm/kgsl_trace.h
@@ -317,6 +317,8 @@
__field(unsigned int, size)
__field(unsigned int, tgid)
__array(char, usage, 16)
+ __field(unsigned int, id)
+ __field(unsigned int, flags)
),
TP_fast_assign(
@@ -325,12 +327,14 @@
__entry->tgid = mem_entry->priv->pid;
kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage),
mem_entry->memdesc.flags);
+ __entry->id = mem_entry->id;
+ __entry->flags = mem_entry->memdesc.flags;
),
TP_printk(
- "gpuaddr=0x%08x size=%d tgid=%d usage=%s",
+ "gpuaddr=0x%08x size=%d tgid=%d usage=%s id=%d flags=0x%08x",
__entry->gpuaddr, __entry->size, __entry->tgid,
- __entry->usage
+ __entry->usage, __entry->id, __entry->flags
)
);
@@ -347,6 +351,7 @@
__field(int, type)
__field(unsigned int, tgid)
__array(char, usage, 16)
+ __field(unsigned int, id)
),
TP_fast_assign(
@@ -357,13 +362,14 @@
__entry->tgid = mem_entry->priv->pid;
kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage),
mem_entry->memdesc.flags);
+ __entry->id = mem_entry->id;
),
TP_printk(
- "gpuaddr=0x%08x size=%d type=%d fd=%d tgid=%d usage %s",
+ "gpuaddr=0x%08x size=%d type=%d fd=%d tgid=%d usage=%s id=%d",
__entry->gpuaddr, __entry->size,
__entry->type, __entry->fd, __entry->tgid,
- __entry->usage
+ __entry->usage, __entry->id
)
);
@@ -380,6 +386,7 @@
__field(int, fd)
__field(unsigned int, tgid)
__array(char, usage, 16)
+ __field(unsigned int, id)
),
TP_fast_assign(
@@ -389,12 +396,13 @@
__entry->tgid = mem_entry->priv->pid;
kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage),
mem_entry->memdesc.flags);
+ __entry->id = mem_entry->id;
),
TP_printk(
- "gpuaddr=0x%08x size=%d type=%d tgid=%d usage=%s",
+ "gpuaddr=0x%08x size=%d type=%d tgid=%d usage=%s id=%d",
__entry->gpuaddr, __entry->size, __entry->type,
- __entry->tgid, __entry->usage
+ __entry->tgid, __entry->usage, __entry->id
)
);
@@ -411,6 +419,7 @@
__field(unsigned int, size)
__field(int, type)
__array(char, usage, 16)
+ __field(unsigned int, id)
__field(unsigned int, drawctxt_id)
__field(unsigned int, curr_ts)
__field(unsigned int, free_ts)
@@ -422,6 +431,7 @@
__entry->size = mem_entry->memdesc.size;
kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage),
mem_entry->memdesc.flags);
+ __entry->id = mem_entry->id;
__entry->drawctxt_id = id;
__entry->type = mem_entry->memtype;
__entry->curr_ts = curr_ts;
@@ -429,13 +439,14 @@
),
TP_printk(
- "d_name=%s gpuaddr=0x%08x size=%d type=%d usage=%s ctx=%u"
+ "d_name=%s gpuaddr=0x%08x size=%d type=%d usage=%s id=%d ctx=%u"
" curr_ts=0x%x free_ts=0x%x",
__get_str(device_name),
__entry->gpuaddr,
__entry->size,
__entry->type,
__entry->usage,
+ __entry->id,
__entry->drawctxt_id,
__entry->curr_ts,
__entry->free_ts
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 75cc086..872a9b5 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -12,11 +12,7 @@
config SPS
bool "SPS support"
- depends on (HAS_IOMEM && (ARCH_MSM8960 || ARCH_MSM8X60 \
- || ARCH_APQ8064 || ARCH_MSM9615 \
- || ARCH_MSM9625 || ARCH_MSM8974))
select GENERIC_ALLOCATOR
- default n
help
The SPS (Smart Peripheral Switch) is a DMA engine.
It can move data in the following modes:
diff --git a/drivers/power/pm8921-bms.c b/drivers/power/pm8921-bms.c
index b193810..2eddb9d 100644
--- a/drivers/power/pm8921-bms.c
+++ b/drivers/power/pm8921-bms.c
@@ -55,6 +55,9 @@
#define TEMP_IAVG_STORAGE 0x105
#define TEMP_IAVG_STORAGE_USE_MASK 0x0F
+#define PON_CNTRL_6 0x018
+#define WD_BIT BIT(7)
+
enum pmic_bms_interrupts {
PM8921_BMS_SBI_WRITE_OK,
PM8921_BMS_CC_THR,
@@ -630,17 +633,17 @@
return 0;
}
-#define MBG_TRANSIENT_ERROR_RAW 51
-static void adjust_pon_ocv_raw(struct pm8921_bms_chip *chip,
- struct pm8921_soc_params *raw)
+#define MBG_TRANSIENT_ERROR_UV 15000
+static void adjust_pon_ocv(struct pm8921_bms_chip *chip, int *uv)
{
- /* in 8921 parts the PON ocv is taken when the MBG is not settled.
+ /*
+ * In 8921 parts the PON ocv is taken when the MBG is not settled.
* decrease the pon ocv by 15mV raw value to account for it
* Since a 1/3rd of vbatt is supplied to the adc the raw value
* needs to be adjusted by 5mV worth bits
*/
- if (raw->last_good_ocv_raw >= MBG_TRANSIENT_ERROR_RAW)
- raw->last_good_ocv_raw -= MBG_TRANSIENT_ERROR_RAW;
+ if (*uv >= MBG_TRANSIENT_ERROR_UV)
+ *uv -= MBG_TRANSIENT_ERROR_UV;
}
#define SEL_ALT_OREG_BIT BIT(2)
@@ -663,10 +666,71 @@
return compensated_ocv;
}
+#define RESET_CC_BIT BIT(3)
+static int reset_cc(struct pm8921_bms_chip *chip)
+{
+ int rc;
+
+ rc = pm_bms_masked_write(chip, BMS_TEST1, RESET_CC_BIT, RESET_CC_BIT);
+ if (rc < 0) {
+ pr_err("err setting cc reset rc = %d\n", rc);
+ return rc;
+ }
+
+ /* sleep 100uS for the coulomb counter to reset */
+ udelay(100);
+
+ rc = pm_bms_masked_write(chip, BMS_TEST1, RESET_CC_BIT, 0);
+ if (rc < 0)
+ pr_err("err clearing cc reset rc = %d\n", rc);
+ return rc;
+}
+
+static int estimate_ocv(struct pm8921_bms_chip *chip)
+{
+ int ibat_ua, vbat_uv, ocv_est_uv;
+ int rc;
+
+ int rbatt_mohm = chip->default_rbatt_mohm + chip->rconn_mohm;
+
+ rc = pm8921_bms_get_simultaneous_battery_voltage_and_current(
+ &ibat_ua,
+ &vbat_uv);
+ if (rc) {
+ pr_err("simultaneous failed rc = %d\n", rc);
+ return rc;
+ }
+
+ ocv_est_uv = vbat_uv + (ibat_ua * rbatt_mohm) / 1000;
+ pr_debug("estimated pon ocv = %d\n", ocv_est_uv);
+ return ocv_est_uv;
+}
+
+static bool is_warm_restart(struct pm8921_bms_chip *chip)
+{
+ u8 reg;
+ int rc;
+
+ rc = pm8xxx_readb(chip->dev->parent, PON_CNTRL_6, ®);
+ if (rc) {
+ pr_err("err reading pon 6 rc = %d\n", rc);
+ return false;
+ }
+ return reg & WD_BIT;
+}
+/*
+ * This reflects what should the CC readings should be for
+ * a 5mAh discharge. This value is dependent on
+ * CC_RESOLUTION_N, CC_RESOLUTION_D, CC_READING_TICKS
+ * and rsense
+ */
+#define CC_RAW_5MAH 0x00110000
+#define MIN_OCV_UV 2000000
static int read_soc_params_raw(struct pm8921_bms_chip *chip,
struct pm8921_soc_params *raw)
{
int usb_chg;
+ int est_ocv_uv;
mutex_lock(&chip->bms_output_lock);
pm_bms_lock_output_data(chip);
@@ -682,12 +746,40 @@
if (chip->prev_last_good_ocv_raw == 0) {
chip->prev_last_good_ocv_raw = raw->last_good_ocv_raw;
- adjust_pon_ocv_raw(chip, raw);
+
convert_vbatt_raw_to_uv(chip, usb_chg,
raw->last_good_ocv_raw, &raw->last_good_ocv_uv);
+ adjust_pon_ocv(chip, &raw->last_good_ocv_uv);
raw->last_good_ocv_uv = ocv_ir_compensation(chip,
raw->last_good_ocv_uv);
chip->last_ocv_uv = raw->last_good_ocv_uv;
+
+ if (is_warm_restart(chip)
+ || raw->cc > CC_RAW_5MAH
+ || (raw->last_good_ocv_uv < MIN_OCV_UV
+ && raw->cc > 0)) {
+ /*
+ * The CC value is higher than 5mAh.
+ * The phone started without going through a pon
+ * sequence
+ * OR
+ * The ocv was very small and there was no
+ * charging in the bootloader
+ * - reset the CC and take an ocv again
+ */
+ pr_debug("cc_raw = 0x%x may be > 5mAh(0x%x)\n",
+ raw->cc, CC_RAW_5MAH);
+ pr_debug("ocv_uv = %d ocv_raw = 0x%x may be < 2V\n",
+ chip->last_ocv_uv,
+ raw->last_good_ocv_raw);
+ est_ocv_uv = estimate_ocv(chip);
+ if (est_ocv_uv > 0) {
+ raw->last_good_ocv_uv = est_ocv_uv;
+ chip->last_ocv_uv = est_ocv_uv;
+ reset_cc(chip);
+ raw->cc = 0;
+ }
+ }
pr_debug("PON_OCV_UV = %d\n", chip->last_ocv_uv);
} else if (chip->prev_last_good_ocv_raw != raw->last_good_ocv_raw) {
chip->prev_last_good_ocv_raw = raw->last_good_ocv_raw;
diff --git a/drivers/power/pm8921-charger.c b/drivers/power/pm8921-charger.c
index 94ef4f9..e39e9d7 100644
--- a/drivers/power/pm8921-charger.c
+++ b/drivers/power/pm8921-charger.c
@@ -2543,15 +2543,25 @@
struct pm8921_chg_chip *chip = container_of(dwork,
struct pm8921_chg_chip, vin_collapse_check_work);
- /* AICL only for wall-chargers */
- if (is_usb_chg_plugged_in(chip) &&
- usb_target_ma > USB_WALL_THRESHOLD_MA) {
+ /*
+ * AICL only for wall-chargers. If the charger appears to be plugged
+ * back in now, the corresponding unplug must have been because of we
+ * were trying to draw more current than the charger can support. In
+ * such a case reset usb current to 500mA and decrease the target.
+ * The AICL algorithm will step up the current from 500mA to target
+ */
+ if (is_usb_chg_plugged_in(chip)
+ && usb_target_ma > USB_WALL_THRESHOLD_MA) {
/* decrease usb_target_ma */
decrease_usb_ma_value(&usb_target_ma);
/* reset here, increase in unplug_check_worker */
__pm8921_charger_vbus_draw(USB_WALL_THRESHOLD_MA);
pr_debug("usb_now=%d, usb_target = %d\n",
USB_WALL_THRESHOLD_MA, usb_target_ma);
+ if (!delayed_work_pending(&chip->unplug_check_work))
+ schedule_delayed_work(&chip->unplug_check_work,
+ round_jiffies_relative(msecs_to_jiffies
+ (UNPLUG_CHECK_WAIT_PERIOD_MS)));
} else {
handle_usb_insertion_removal(chip);
}
diff --git a/include/linux/msm_kgsl.h b/include/linux/msm_kgsl.h
index 6912087..ee136cac 100644
--- a/include/linux/msm_kgsl.h
+++ b/include/linux/msm_kgsl.h
@@ -1,6 +1,13 @@
#ifndef _MSM_KGSL_H
#define _MSM_KGSL_H
+/*
+ * The KGSL version has proven not to be very useful in userspace if features
+ * are cherry picked into other trees out of order so it is frozen as of 3.14.
+ * It is left here for backwards compatabilty and as a reminder that
+ * software releases are never linear. Also, I like pie.
+ */
+
#define KGSL_VERSION_MAJOR 3
#define KGSL_VERSION_MINOR 14
@@ -16,13 +23,24 @@
#define KGSL_CONTEXT_INVALID 0xffffffff
-/* Memory allocayion flags */
-#define KGSL_MEMFLAGS_GPUREADONLY 0x01000000
+/* --- Memory allocation flags --- */
+/* General allocation hints */
+#define KGSL_MEMFLAGS_GPUREADONLY 0x01000000
+
+/* Memory caching hints */
+#define KGSL_CACHEMODE_MASK 0x0C000000
+#define KGSL_CACHEMODE_SHIFT 26
+
+#define KGSL_CACHEMODE_WRITECOMBINE 0
+#define KGSL_CACHEMODE_UNCACHED 1
+#define KGSL_CACHEMODE_WRITETHROUGH 2
+#define KGSL_CACHEMODE_WRITEBACK 3
+
+/* Memory types for which allocations are made */
#define KGSL_MEMTYPE_MASK 0x0000FF00
#define KGSL_MEMTYPE_SHIFT 8
-/* Memory types for which allocations are made */
#define KGSL_MEMTYPE_OBJECTANY 0
#define KGSL_MEMTYPE_FRAMEBUFFER 1
#define KGSL_MEMTYPE_RENDERBUFFER 2
@@ -53,7 +71,8 @@
#define KGSL_MEMALIGN_MASK 0x00FF0000
#define KGSL_MEMALIGN_SHIFT 16
-/* generic flag values */
+/* --- generic KGSL flag values --- */
+
#define KGSL_FLAGS_NORMALMODE 0x00000000
#define KGSL_FLAGS_SAFEMODE 0x00000001
#define KGSL_FLAGS_INITIALIZED0 0x00000002
@@ -419,6 +438,14 @@
#define IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC \
_IOWR(KGSL_IOC_TYPE, 0x23, struct kgsl_sharedmem_from_vmalloc)
+/*
+ * This is being deprecated in favor of IOCTL_KGSL_GPUMEM_CACHE_SYNC which
+ * supports both directions (flush and invalidate). This code will still
+ * work, but by definition it will do a flush of the cache which might not be
+ * what you want to have happen on a buffer following a GPU operation. It is
+ * safer to go with IOCTL_KGSL_GPUMEM_CACHE_SYNC
+ */
+
#define IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE \
_IOW(KGSL_IOC_TYPE, 0x24, struct kgsl_sharedmem_free)
@@ -511,6 +538,113 @@
#define IOCTL_KGSL_TIMESTAMP_EVENT \
_IOWR(KGSL_IOC_TYPE, 0x33, struct kgsl_timestamp_event)
+/**
+ * struct kgsl_gpumem_alloc_id - argument to IOCTL_KGSL_GPUMEM_ALLOC_ID
+ * @id: returned id value for this allocation.
+ * @flags: mask of KGSL_MEM* values requested and actual flags on return.
+ * @size: requested size of the allocation and actual size on return.
+ * @mmapsize: returned size to pass to mmap() which may be larger than 'size'
+ * @gpuaddr: returned GPU address for the allocation
+ *
+ * Allocate memory for access by the GPU. The flags and size fields are echoed
+ * back by the kernel, so that the caller can know if the request was
+ * adjusted.
+ *
+ * Supported flags:
+ * KGSL_MEMFLAGS_GPUREADONLY: the GPU will be unable to write to the buffer
+ * KGSL_MEMTYPE*: usage hint for debugging aid
+ * KGSL_MEMALIGN*: alignment hint, may be ignored or adjusted by the kernel.
+ */
+struct kgsl_gpumem_alloc_id {
+ unsigned int id;
+ unsigned int flags;
+ unsigned int size;
+ unsigned int mmapsize;
+ unsigned long gpuaddr;
+/* private: reserved for future use*/
+ unsigned int __pad[2];
+};
+
+#define IOCTL_KGSL_GPUMEM_ALLOC_ID \
+ _IOWR(KGSL_IOC_TYPE, 0x34, struct kgsl_gpumem_alloc_id)
+
+/**
+ * struct kgsl_gpumem_free_id - argument to IOCTL_KGSL_GPUMEM_FREE_ID
+ * @id: GPU allocation id to free
+ *
+ * Free an allocation by id, in case a GPU address has not been assigned or
+ * is unknown. Freeing an allocation by id with this ioctl or by GPU address
+ * with IOCTL_KGSL_SHAREDMEM_FREE are equivalent.
+ */
+struct kgsl_gpumem_free_id {
+ unsigned int id;
+/* private: reserved for future use*/
+ unsigned int __pad;
+};
+
+#define IOCTL_KGSL_GPUMEM_FREE_ID \
+ _IOWR(KGSL_IOC_TYPE, 0x35, struct kgsl_gpumem_free_id)
+
+/**
+ * struct kgsl_gpumem_get_info - argument to IOCTL_KGSL_GPUMEM_GET_INFO
+ * @gpuaddr: GPU address to query. Also set on return.
+ * @id: GPU allocation id to query. Also set on return.
+ * @flags: returned mask of KGSL_MEM* values.
+ * @size: returned size of the allocation.
+ * @mmapsize: returned size to pass mmap(), which may be larger than 'size'
+ * @useraddr: returned address of the userspace mapping for this buffer
+ *
+ * This ioctl allows querying of all user visible attributes of an existing
+ * allocation, by either the GPU address or the id returned by a previous
+ * call to IOCTL_KGSL_GPUMEM_ALLOC_ID. Legacy allocation ioctls may not
+ * return all attributes so this ioctl can be used to look them up if needed.
+ *
+ */
+struct kgsl_gpumem_get_info {
+ unsigned long gpuaddr;
+ unsigned int id;
+ unsigned int flags;
+ unsigned int size;
+ unsigned int mmapsize;
+ unsigned long useraddr;
+/* private: reserved for future use*/
+ unsigned int __pad[4];
+};
+
+#define IOCTL_KGSL_GPUMEM_GET_INFO\
+ _IOWR(KGSL_IOC_TYPE, 0x36, struct kgsl_gpumem_get_info)
+
+/**
+ * struct kgsl_gpumem_sync_cache - argument to IOCTL_KGSL_GPUMEM_SYNC_CACHE
+ * @gpuaddr: GPU address of the buffer to sync.
+ * @id: id of the buffer to sync. Either gpuaddr or id is sufficient.
+ * @op: a mask of KGSL_GPUMEM_CACHE_* values
+ *
+ * Sync the L2 cache for memory headed to and from the GPU - this replaces
+ * KGSL_SHAREDMEM_FLUSH_CACHE since it can handle cache management for both
+ * directions
+ *
+ */
+struct kgsl_gpumem_sync_cache {
+ unsigned int gpuaddr;
+ unsigned int id;
+ unsigned int op;
+/* private: reserved for future use*/
+ unsigned int __pad[2]; /* For future binary compatibility */
+};
+
+#define KGSL_GPUMEM_CACHE_CLEAN (1 << 0)
+#define KGSL_GPUMEM_CACHE_TO_GPU KGSL_GPUMEM_CACHE_CLEAN
+
+#define KGSL_GPUMEM_CACHE_INV (1 << 1)
+#define KGSL_GPUMEM_CACHE_FROM_GPU KGSL_GPUMEM_CACHE_INV
+
+#define KGSL_GPUMEM_CACHE_FLUSH \
+ (KGSL_GPUMEM_CACHE_CLEAN | KGSL_GPUMEM_CACHE_INV)
+
+#define IOCTL_KGSL_GPUMEM_SYNC_CACHE \
+ _IOW(KGSL_IOC_TYPE, 0x37, struct kgsl_gpumem_sync_cache)
+
#ifdef __KERNEL__
#ifdef CONFIG_MSM_KGSL_DRM
int kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start,
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 14ccf3e..3bf514f 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -247,6 +247,11 @@
bdaddr_t bdaddr;
} __packed;
+#define MGMT_OP_CANCEL_RESOLVE_NAME 0x0024
+struct mgmt_cp_cancel_resolve_name {
+ bdaddr_t bdaddr;
+} __packed;
+
#define MGMT_OP_LE_READ_WHITE_LIST_SIZE 0xE000
#define MGMT_OP_LE_CLEAR_WHITE_LIST 0xE001
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 28eb7ea..493801a 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -1956,6 +1956,38 @@
return err;
}
+static int cancel_resolve_name(struct sock *sk, u16 index, unsigned char *data,
+ u16 len)
+{
+ struct mgmt_cp_cancel_resolve_name *mgmt_cp = (void *) data;
+ struct hci_cp_remote_name_req_cancel hci_cp;
+ struct hci_dev *hdev;
+ int err;
+
+ BT_DBG("");
+
+ if (len != sizeof(*mgmt_cp))
+ return cmd_status(sk, index, MGMT_OP_CANCEL_RESOLVE_NAME,
+ EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_CANCEL_RESOLVE_NAME,
+ ENODEV);
+
+ hci_dev_lock_bh(hdev);
+
+ memset(&hci_cp, 0, sizeof(hci_cp));
+ bacpy(&hci_cp.bdaddr, &mgmt_cp->bdaddr);
+ err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(hci_cp),
+ &hci_cp);
+
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
static int set_connection_params(struct sock *sk, u16 index,
unsigned char *data, u16 len)
{
@@ -2189,10 +2221,11 @@
struct mgmt_mode cp = {0};
int err = -1;
- BT_DBG("");
-
hdev = hci_dev_get(index);
+ if (hdev)
+ BT_DBG("disco_state: %d", hdev->disco_state);
+
if (!hdev || !lmp_le_capable(hdev)) {
mgmt_pending_foreach(MGMT_OP_STOP_DISCOVERY, index,
@@ -2200,6 +2233,8 @@
mgmt_event(MGMT_EV_DISCOVERING, index, &cp, sizeof(cp), NULL);
+ hdev->disco_state = SCAN_IDLE;
+
if (hdev)
goto done;
else
@@ -2315,6 +2350,7 @@
if (!hdev)
return cmd_status(sk, index, MGMT_OP_START_DISCOVERY, ENODEV);
+ BT_DBG("disco_state: %d", hdev->disco_state);
hci_dev_lock_bh(hdev);
if (hdev->disco_state && timer_pending(&hdev->disco_timer)) {
@@ -2395,6 +2431,8 @@
if (!hdev)
return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY, ENODEV);
+ BT_DBG("disco_state: %d", hdev->disco_state);
+
hci_dev_lock_bh(hdev);
state = hdev->disco_state;
@@ -2659,6 +2697,9 @@
case MGMT_OP_RESOLVE_NAME:
err = resolve_name(sk, index, buf + sizeof(*hdr), len);
break;
+ case MGMT_OP_CANCEL_RESOLVE_NAME:
+ err = cancel_resolve_name(sk, index, buf + sizeof(*hdr), len);
+ break;
case MGMT_OP_SET_CONNECTION_PARAMS:
err = set_connection_params(sk, index, buf + sizeof(*hdr), len);
break;
diff --git a/sound/soc/msm/mdm9615.c b/sound/soc/msm/mdm9615.c
index 76cd625..d7e9851 100644
--- a/sound/soc/msm/mdm9615.c
+++ b/sound/soc/msm/mdm9615.c
@@ -1024,6 +1024,9 @@
}
codec_clk = clk_get(cpu_dai->dev, "osr_clk");
err = tabla_hs_detect(codec, &mbhc_cfg);
+ msm_gpiomux_install(
+ msm9615_audio_prim_i2s_codec_configs,
+ ARRAY_SIZE(msm9615_audio_prim_i2s_codec_configs));
return err;
}
diff --git a/sound/soc/msm/qdsp6/q6adm.c b/sound/soc/msm/qdsp6/q6adm.c
index 00394aa..119e017 100644
--- a/sound/soc/msm/qdsp6/q6adm.c
+++ b/sound/soc/msm/qdsp6/q6adm.c
@@ -853,24 +853,15 @@
} else if (channel_mode == 4) {
open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
- open.dev_channel_mapping[2] = PCM_CHANNEL_RB;
- open.dev_channel_mapping[3] = PCM_CHANNEL_LB;
+ open.dev_channel_mapping[2] = PCM_CHANNEL_LS;
+ open.dev_channel_mapping[3] = PCM_CHANNEL_RS;
} else if (channel_mode == 6) {
- open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
- open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
- open.dev_channel_mapping[2] = PCM_CHANNEL_LFE;
- open.dev_channel_mapping[3] = PCM_CHANNEL_FC;
- open.dev_channel_mapping[4] = PCM_CHANNEL_LB;
- open.dev_channel_mapping[5] = PCM_CHANNEL_RB;
- } else if (channel_mode == 8) {
- open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
- open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
- open.dev_channel_mapping[2] = PCM_CHANNEL_LFE;
- open.dev_channel_mapping[3] = PCM_CHANNEL_FC;
- open.dev_channel_mapping[4] = PCM_CHANNEL_LB;
- open.dev_channel_mapping[5] = PCM_CHANNEL_RB;
- open.dev_channel_mapping[6] = PCM_CHANNEL_FLC;
- open.dev_channel_mapping[7] = PCM_CHANNEL_FRC;
+ open.dev_channel_mapping[0] = PCM_CHANNEL_FC;
+ open.dev_channel_mapping[1] = PCM_CHANNEL_FL;
+ open.dev_channel_mapping[2] = PCM_CHANNEL_FR;
+ open.dev_channel_mapping[3] = PCM_CHANNEL_LS;
+ open.dev_channel_mapping[4] = PCM_CHANNEL_RS;
+ open.dev_channel_mapping[5] = PCM_CHANNEL_LFE;
} else {
pr_err("%s invalid num_chan %d\n", __func__,
channel_mode);
diff --git a/sound/soc/msm/qdsp6/q6asm.c b/sound/soc/msm/qdsp6/q6asm.c
index 1dfa605..52e481a 100644
--- a/sound/soc/msm/qdsp6/q6asm.c
+++ b/sound/soc/msm/qdsp6/q6asm.c
@@ -1152,7 +1152,9 @@
enc_cfg.enc_blk.cfg_size = sizeof(struct asm_dts_enc_cfg);
enc_cfg.enc_blk.cfg.dts.sample_rate = sample_rate;
enc_cfg.enc_blk.cfg.dts.num_channels = channels;
- if (channels == 2) {
+ if (channels == 1) {
+ enc_cfg.enc_blk.cfg.dts.channel_mapping[0] = PCM_CHANNEL_FC;
+ } else if (channels == 2) {
enc_cfg.enc_blk.cfg.dts.channel_mapping[0] = PCM_CHANNEL_FL;
enc_cfg.enc_blk.cfg.dts.channel_mapping[1] = PCM_CHANNEL_FR;
enc_cfg.enc_blk.cfg.dts.channel_mapping[2] = 0;
@@ -1167,12 +1169,12 @@
enc_cfg.enc_blk.cfg.dts.channel_mapping[4] = 0;
enc_cfg.enc_blk.cfg.dts.channel_mapping[5] = 0;
} else if (channels == 6) {
- enc_cfg.enc_blk.cfg.dts.channel_mapping[0] = PCM_CHANNEL_FL;
- enc_cfg.enc_blk.cfg.dts.channel_mapping[1] = PCM_CHANNEL_FR;
- enc_cfg.enc_blk.cfg.dts.channel_mapping[2] = PCM_CHANNEL_LFE;
+ enc_cfg.enc_blk.cfg.dts.channel_mapping[0] = PCM_CHANNEL_FC;
+ enc_cfg.enc_blk.cfg.dts.channel_mapping[1] = PCM_CHANNEL_FL;
+ enc_cfg.enc_blk.cfg.dts.channel_mapping[2] = PCM_CHANNEL_FR;
enc_cfg.enc_blk.cfg.dts.channel_mapping[3] = PCM_CHANNEL_LS;
enc_cfg.enc_blk.cfg.dts.channel_mapping[4] = PCM_CHANNEL_RS;
- enc_cfg.enc_blk.cfg.dts.channel_mapping[5] = PCM_CHANNEL_FC;
+ enc_cfg.enc_blk.cfg.dts.channel_mapping[5] = PCM_CHANNEL_LFE;
}
rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
if (rc < 0) {