Merge "platform: msm: qpnp-pwm: Define PWM devicetree bindings" into msm-3.4
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_ion.txt b/Documentation/devicetree/bindings/arm/msm/msm_ion.txt
new file mode 100644
index 0000000..5c6b804
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_ion.txt
@@ -0,0 +1,50 @@
+ION Memory Manager (ION)
+
+ION is a memory manager that allows for sharing of buffers between different
+processes and between user space and kernel space. ION manages different
+memory spaces by separating the memory spaces into "heaps". Depending on the
+type of heap ION must reserve memory using the msm specific memory reservation
+bindings (see Documentation/devicetree/bindings/arm/msm/memory-reserve.txt).
+
+Required properties
+
+- compatible: "qcom,msm-ion"
+- reg: The ID of the ION heap.
+
+Optional properties
+
+- compatible: "qcom,msm-ion-reserve" This is required if memory is to be reserved
+ as specified by qcom,memory-reservation-size below.
+- qcom,heap-align: Alignment of start of the memory in the heap.
+- qcom,heap-adjacent: ID of heap this heap needs to be adjacent to.
+- qcom,memory-reservation-size: size of reserved memory for the ION heap.
+- qcom,memory-reservation-type: type of memory to be reserved
+(see memory-reserve.txt for information about memory reservations)
+
+Example:
+ qcom,ion {
+ compatible = "qcom,msm-ion";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,ion-heap@30 { /* SYSTEM HEAP */
+ reg = <30>;
+ };
+
+ qcom,ion-heap@8 { /* CP_MM HEAP */
+ compatible = "qcom,msm-ion-reserve";
+ reg = <8>;
+ qcom,heap-align = <0x1000>;
+ qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
+ qcom,memory-reservation-size = <0x7800000>;
+ };
+
+ qcom,ion-heap@29 { /* FIRMWARE HEAP */
+ compatible = "qcom,msm-ion-reserve";
+ reg = <29>;
+ qcom,heap-align = <0x20000>;
+ qcom,heap-adjacent = <8>;
+ qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
+ qcom,memory-reservation-size = <0xA00000>;
+
+ };
diff --git a/Documentation/devicetree/bindings/usb/msm-hsusb.txt b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
index 95ddf34..0516dff 100644
--- a/Documentation/devicetree/bindings/usb/msm-hsusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
@@ -49,3 +49,70 @@
qcom,hsusb-otg-pclk-src-name = "dfab_usb_clk";
qcom,hsusb-otg-pmic-id-irq = <47>
};
+
+BAM:
+
+Required properties:
+- compatible: should be "qcom,usb-bam-msm"
+- regs: offset and length of the register set in the memory map
+- interrupts: IRQ line
+- qcom,usb-active-bam: active BAM type. Can be one of
+ 0 - HSUSB_BAM
+ 1 - HSIC_BAM
+- qcom,usb-total-bam-num: total number of BAMs that are supported
+- qcom,usb-bam-num-pipes: max number of pipes that can be used
+- qcom,usb-base-address: physical base address of the BAM
+
+A number of USB BAM pipe parameters are represented as sub-nodes:
+
+Subnode Required:
+- label: a string describing the pipe's direction and use
+- qcom,usb-bam-type: BAM type. Can be one of
+ 0 - HSUSB_BAM
+ 1 - HSIC_BAM
+- qcom,src-bam-physical-address: source BAM physical address
+- qcom,src-bam-pipe-index: source BAM pipe index
+- qcom,dst-bam-physical-address: destination BAM physical address
+- qcom,dst-bam-pipe-index: destination BAM pipe index
+- qcom,data-fifo-offset: data fifo offset address
+- qcom,data-fifo-size: data fifo size
+- qcom,descriptor-fifo-offset: descriptor fifo offset address
+- qcom,descriptor-fifo-size: descriptor fifo size
+
+Example USB BAM controller device node:
+
+ qcom,usbbam@f9304000 {
+ compatible = "qcom,usb-bam-msm";
+ reg = <0xf9304000 0x9000>;
+ interrupts = <0 132 0>;
+ qcom,usb-active-bam = <0>;
+ qcom,usb-total-bam-num = <1>;
+ qcom,usb-bam-num-pipes = <16>;
+ qcom,usb-base-address = <0xf9200000>;
+
+ qcom,pipe1 {
+ label = "usb-to-peri-qdss-dwc3";
+ qcom,usb-bam-type = <0>;
+ qcom,src-bam-physical-address = <0>;
+ qcom,src-bam-pipe-index = <0>;
+ qcom,dst-bam-physical-address = <0>;
+ qcom,dst-bam-pipe-index = <0>;
+ qcom,data-fifo-offset = <0>;
+ qcom,data-fifo-size = <0>;
+ qcom,descriptor-fifo-offset = <0>;
+ qcom,descriptor-fifo-size = <0>;
+ };
+
+ qcom,pipe2 {
+ label = "peri-to-usb-qdss-dwc3";
+ qcom,usb-bam-type = <0>;
+ qcom,src-bam-physical-address = <0xfc37C000>;
+ qcom,src-bam-pipe-index = <0>;
+ qcom,dst-bam-physical-address = <0xf9304000>;
+ qcom,dst-bam-pipe-index = <2>;
+ qcom,data-fifo-offset = <0xf0000>;
+ qcom,data-fifo-size = <0x4000>;
+ qcom,descriptor-fifo-offset = <0xf4000>;
+ qcom,descriptor-fifo-size = <0x1400>;
+ };
+ };
diff --git a/Documentation/usb/misc_ksbridge.txt b/Documentation/usb/misc_ksbridge.txt
new file mode 100644
index 0000000..f409dc1
--- /dev/null
+++ b/Documentation/usb/misc_ksbridge.txt
@@ -0,0 +1,46 @@
+Introduction
+--------------
+ksbridge is a simple misc device which bridges Kickstart application
+to HSIC h/w. Driver supports two instances, one instance for
+flash-less-boot/ram-dumps and other instance for EFS Sync.
+
+Initialization
+--------------
+Create two bridge instances and register for usb devices 0x9008 and
+0x9048/0x904C. Misc device name depends on the USB PID.
+For PID: 9008, misc device name is ks_bridge and for PID:9048/904C,
+misc device name is efs_bridge. After KS opens the misc device, IN
+URBs will be submitted to H/W; By default IN URBS are configured
+to 20.
+
+TX PATH
+-------
+Transmit path is very simple. Bridge driver will exposes write system
+call to kickstart. Data from write call will be put into a list and a
+work is scheduled to take the data from the list and write to HSIC.
+
+Functions:
+ksb_fs_write: System call invoked when kickstart writes the data
+ksb_tomdm_work: Work function which submits data to HSIC h/w.
+
+Data Structures:
+to_mdm_list: Data is stored in this list
+to_mdm_work: mapped to ksb_tomdm_work function
+
+RX PATH
+-------
+During initialization 20 IN URBs are submitted to hsic controller. In
+completion handler of each URB, buffer is de-queued and add to a list.
+Read function is woken-up. A new buffer is created and submitted to
+controller.
+
+Functions:
+ksb_fs_read: system call invoked by ks when it tries to read the data
+ksb_rx_cb: rx urb completion handler
+ksb_start_rx_work: function called during initialization.
+
+Data Structures:
+ks_wait_q: read system call will block on this queue until data is
+available or device is disconnected
+to_ks_list: data queued to this list by rx urb completion handler,
+later de-queued by read system call.
diff --git a/arch/arm/boot/dts/msm8974-regulator.dtsi b/arch/arm/boot/dts/msm8974-regulator.dtsi
index b376544..a187223 100644
--- a/arch/arm/boot/dts/msm8974-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8974-regulator.dtsi
@@ -127,9 +127,9 @@
rpm-regulator-smpb3 {
status = "okay";
pm8841_s3: regulator-s3 {
- regulator-min-microvolt = <1150000>;
- regulator-max-microvolt = <1150000>;
- qcom,init-voltage = <1150000>;
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ qcom,init-voltage = <1050000>;
status = "okay";
};
};
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index f3a62a5..6f4db42 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -52,6 +52,10 @@
CONFIG_MSM_DIRECT_SCLK_ACCESS=y
CONFIG_MSM_OCMEM=y
CONFIG_MSM_MEMORY_DUMP=y
+CONFIG_MSM_CACHE_ERP=y
+CONFIG_MSM_L1_ERR_PANIC=y
+CONFIG_MSM_L2_ERP_PRINT_ACCESS_ERRORS=y
+CONFIG_MSM_L2_ERP_2BIT_PANIC=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
@@ -160,6 +164,7 @@
# CONFIG_RADIO_ADAPTERS is not set
CONFIG_ION=y
CONFIG_ION_MSM=y
+CONFIG_MSM_KGSL=y
CONFIG_FB=y
CONFIG_FB_MSM=y
# CONFIG_FB_MSM_BACKLIGHT is not set
@@ -239,8 +244,6 @@
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_LL=y
-CONFIG_EARLY_PRINTK=y
CONFIG_KEYS=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_SHA256=y
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 94aa75e..bc81696 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -37,9 +37,11 @@
#endif
/*
- * The fixup involves disabling interrupts during execution of the WFE
- * instruction. This could potentially lead to deadlock if a thread is trying
- * to acquire a spinlock which is being released from an interrupt context.
+ * The fixup involves disabling FIQs during execution of the WFE instruction.
+ * This could potentially lead to deadlock if a thread is trying to acquire a
+ * spinlock which is being released from an FIQ. This should not be a problem
+ * because FIQs are handled by the secure environment and do not directly
+ * manipulate spinlocks.
*/
#ifdef CONFIG_MSM_KRAIT_WFE_FIXUP
#define WFE_SAFE(fixup, tmp) \
@@ -47,7 +49,7 @@
" cmp " fixup ", #0\n" \
" wfeeq\n" \
" beq 10f\n" \
-" cpsid if\n" \
+" cpsid f\n" \
" mrc p15, 7, " fixup ", c15, c0, 5\n" \
" bic " fixup ", " fixup ", #0x10000\n" \
" mcr p15, 7, " fixup ", c15, c0, 5\n" \
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 7c44acd..7a8c2d6 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -966,7 +966,7 @@
* SP points to a minimal amount of processor-private memory, the address
* of which is copied into r0 for the mode specific abort handler.
*/
- .macro vector_stub, name, mode, correction=0
+ .macro vector_stub, name, mode, fixup, correction=0
.align 5
vector_\name:
@@ -995,6 +995,18 @@
and lr, lr, #0x0f
THUMB( adr r0, 1f )
THUMB( ldr lr, [r0, lr, lsl #2] )
+ .if \fixup
+#ifdef CONFIG_MSM_KRAIT_WFE_FIXUP
+ ldr r0, .krait_fixup
+ ldr r0, [r0]
+ cmp r0, #0
+ beq 10f
+ mrc p15, 7, r0, c15, c0, 5
+ orr r0, r0, #0x10000
+ mcr p15, 7, r0, c15, c0, 5
+10: isb
+#endif
+ .endif
mov r0, sp
ARM( ldr lr, [pc, lr, lsl #2] )
movs pc, lr @ branch to handler in SVC mode
@@ -1010,7 +1022,7 @@
/*
* Interrupt dispatcher
*/
- vector_stub irq, IRQ_MODE, 4
+ vector_stub irq, IRQ_MODE, 1, 4
.long __irq_usr @ 0 (USR_26 / USR_32)
.long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
@@ -1033,7 +1045,7 @@
* Data abort dispatcher
* Enter in ABT mode, spsr = USR CPSR, lr = USR PC
*/
- vector_stub dabt, ABT_MODE, 8
+ vector_stub dabt, ABT_MODE, 0, 8
.long __dabt_usr @ 0 (USR_26 / USR_32)
.long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
@@ -1056,7 +1068,7 @@
* Prefetch abort dispatcher
* Enter in ABT mode, spsr = USR CPSR, lr = USR PC
*/
- vector_stub pabt, ABT_MODE, 4
+ vector_stub pabt, ABT_MODE, 0, 4
.long __pabt_usr @ 0 (USR_26 / USR_32)
.long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
@@ -1079,7 +1091,7 @@
* Undef instr entry dispatcher
* Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
*/
- vector_stub und, UND_MODE
+ vector_stub und, UND_MODE, 0
.long __und_usr @ 0 (USR_26 / USR_32)
.long __und_invalid @ 1 (FIQ_26 / FIQ_32)
@@ -1131,6 +1143,8 @@
.LCvswi:
.word vector_swi
+.krait_fixup:
+ .word msm_krait_need_wfe_fixup
.globl __stubs_end
__stubs_end:
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 33b153f..b6ce351 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -251,7 +251,7 @@
obj-$(CONFIG_MACH_MSM7627A_QRD1) += board-qrd7627a.o board-7627a-all.o
obj-$(CONFIG_MACH_MSM7627A_QRD3) += board-qrd7627a.o board-7627a-all.o
obj-$(CONFIG_MACH_MSM7627A_EVB) += board-qrd7627a.o board-7627a-all.o
-obj-$(CONFIG_ARCH_MSM8625) += devices-msm7x27a.o clock-pcom-lookup.o mpm-8625.o
+obj-$(CONFIG_ARCH_MSM8625) += msm_smem_iface.o devices-msm7x27a.o clock-pcom-lookup.o mpm-8625.o
obj-$(CONFIG_MACH_MSM8625_RUMI3) += board-msm7x27a.o
obj-$(CONFIG_MACH_MSM8625_SURF) += board-msm7x27a.o board-7627a-all.o
obj-$(CONFIG_MACH_MSM8625_EVB) += board-qrd7627a.o board-7627a-all.o
diff --git a/arch/arm/mach-msm/acpuclock-8974.c b/arch/arm/mach-msm/acpuclock-8974.c
index 8c89014..22275b4 100644
--- a/arch/arm/mach-msm/acpuclock-8974.c
+++ b/arch/arm/mach-msm/acpuclock-8974.c
@@ -23,7 +23,7 @@
#include "acpuclock-krait.h"
/* Corner type vreg VDD values */
-#define LVL_NONE RPM_REGULATOR_CORNER_RETENTION
+#define LVL_NONE RPM_REGULATOR_CORNER_NONE
#define LVL_LOW RPM_REGULATOR_CORNER_SVS_SOC
#define LVL_NOM RPM_REGULATOR_CORNER_NORMAL
#define LVL_HIGH RPM_REGULATOR_CORNER_SUPER_TURBO
@@ -94,11 +94,10 @@
};
static struct msm_bus_paths bw_level_tbl[] __initdata = {
- [0] = BW_MBPS(400), /* At least 50 MHz on bus. */
- [1] = BW_MBPS(800), /* At least 100 MHz on bus. */
- [2] = BW_MBPS(1334), /* At least 167 MHz on bus. */
- [3] = BW_MBPS(2666), /* At least 200 MHz on bus. */
- [4] = BW_MBPS(3200), /* At least 333 MHz on bus. */
+ [0] = BW_MBPS(552), /* At least 69 MHz on bus. */
+ [1] = BW_MBPS(1112), /* At least 139 MHz on bus. */
+ [2] = BW_MBPS(2224), /* At least 278 MHz on bus. */
+ [3] = BW_MBPS(4448), /* At least 556 MHz on bus. */
};
static struct msm_bus_scale_pdata bus_scale_data __initdata = {
@@ -109,31 +108,59 @@
};
static struct l2_level l2_freq_tbl[] __initdata = {
- [0] = { { 300000, PLL_0, 0, 2, 0 }, LVL_LOW, 1050000, 2 },
- [1] = { { 384000, HFPLL, 2, 0, 40 }, LVL_NOM, 1050000, 2 },
- [2] = { { 460800, HFPLL, 2, 0, 48 }, LVL_NOM, 1050000, 2 },
- [3] = { { 537600, HFPLL, 1, 0, 28 }, LVL_NOM, 1050000, 2 },
- [4] = { { 576000, HFPLL, 1, 0, 30 }, LVL_NOM, 1050000, 3 },
- [5] = { { 652800, HFPLL, 1, 0, 34 }, LVL_NOM, 1050000, 3 },
- [6] = { { 729600, HFPLL, 1, 0, 38 }, LVL_NOM, 1050000, 3 },
- [7] = { { 806400, HFPLL, 1, 0, 42 }, LVL_NOM, 1050000, 3 },
- [8] = { { 883200, HFPLL, 1, 0, 46 }, LVL_NOM, 1050000, 4 },
- [9] = { { 960000, HFPLL, 1, 0, 50 }, LVL_NOM, 1050000, 4 },
- [10] = { { 1036800, HFPLL, 1, 0, 54 }, LVL_NOM, 1050000, 4 },
+ [0] = { { 300000, PLL_0, 0, 2, 0 }, LVL_LOW, 950000, 0 },
+ [1] = { { 384000, HFPLL, 2, 0, 40 }, LVL_NOM, 950000, 1 },
+ [2] = { { 460800, HFPLL, 2, 0, 48 }, LVL_NOM, 950000, 1 },
+ [3] = { { 537600, HFPLL, 1, 0, 28 }, LVL_NOM, 950000, 2 },
+ [4] = { { 576000, HFPLL, 1, 0, 30 }, LVL_NOM, 950000, 2 },
+ [5] = { { 652800, HFPLL, 1, 0, 34 }, LVL_NOM, 950000, 2 },
+ [6] = { { 729600, HFPLL, 1, 0, 38 }, LVL_NOM, 950000, 2 },
+ [7] = { { 806400, HFPLL, 1, 0, 42 }, LVL_NOM, 950000, 2 },
+ [8] = { { 883200, HFPLL, 1, 0, 46 }, LVL_HIGH, 1050000, 2 },
+ [9] = { { 960000, HFPLL, 1, 0, 50 }, LVL_HIGH, 1050000, 2 },
+ [10] = { { 1036800, HFPLL, 1, 0, 54 }, LVL_HIGH, 1050000, 3 },
+ [11] = { { 1113600, HFPLL, 1, 0, 58 }, LVL_HIGH, 1050000, 3 },
+ [12] = { { 1190400, HFPLL, 1, 0, 62 }, LVL_HIGH, 1050000, 3 },
+ [13] = { { 1267200, HFPLL, 1, 0, 66 }, LVL_HIGH, 1050000, 3 },
+ [14] = { { 1344000, HFPLL, 1, 0, 70 }, LVL_HIGH, 1050000, 3 },
+ [15] = { { 1420800, HFPLL, 1, 0, 74 }, LVL_HIGH, 1050000, 3 },
+ [16] = { { 1497600, HFPLL, 1, 0, 78 }, LVL_HIGH, 1050000, 3 },
+ [17] = { { 1574400, HFPLL, 1, 0, 82 }, LVL_HIGH, 1050000, 3 },
+ [18] = { { 1651200, HFPLL, 1, 0, 86 }, LVL_HIGH, 1050000, 3 },
+ [19] = { { 1728000, HFPLL, 1, 0, 90 }, LVL_HIGH, 1050000, 3 },
+ [20] = { { 1804800, HFPLL, 1, 0, 94 }, LVL_HIGH, 1050000, 3 },
+ [21] = { { 1881600, HFPLL, 1, 0, 98 }, LVL_HIGH, 1050000, 3 },
+ [22] = { { 1958400, HFPLL, 1, 0, 102 }, LVL_HIGH, 1050000, 3 },
+ [23] = { { 2035200, HFPLL, 1, 0, 106 }, LVL_HIGH, 1050000, 3 },
+ [24] = { { 2112000, HFPLL, 1, 0, 110 }, LVL_HIGH, 1050000, 3 },
+ [25] = { { 2188800, HFPLL, 1, 0, 114 }, LVL_HIGH, 1050000, 3 },
};
static struct acpu_level acpu_freq_tbl[] __initdata = {
- { 1, { 300000, PLL_0, 0, 2, 0 }, L2(0), 1050000, 3200000 },
- { 1, { 384000, HFPLL, 2, 0, 40 }, L2(1), 1050000, 3200000 },
- { 1, { 460800, HFPLL, 2, 0, 48 }, L2(2), 1050000, 3200000 },
- { 1, { 537600, HFPLL, 1, 0, 28 }, L2(3), 1050000, 3200000 },
- { 1, { 576000, HFPLL, 1, 0, 30 }, L2(4), 1050000, 3200000 },
- { 1, { 652800, HFPLL, 1, 0, 34 }, L2(5), 1050000, 3200000 },
- { 1, { 729600, HFPLL, 1, 0, 38 }, L2(6), 1050000, 3200000 },
- { 1, { 806400, HFPLL, 1, 0, 42 }, L2(7), 1050000, 3200000 },
- { 1, { 883200, HFPLL, 1, 0, 46 }, L2(8), 1050000, 3200000 },
- { 1, { 960000, HFPLL, 1, 0, 50 }, L2(9), 1050000, 3200000 },
- { 1, { 1036800, HFPLL, 1, 0, 54 }, L2(10), 1050000, 3200000 },
+ { 1, { 300000, PLL_0, 0, 2, 0 }, L2(0), 950000, 3200000 },
+ { 1, { 384000, HFPLL, 2, 0, 40 }, L2(3), 950000, 3200000 },
+ { 1, { 460800, HFPLL, 2, 0, 48 }, L2(3), 950000, 3200000 },
+ { 1, { 537600, HFPLL, 1, 0, 28 }, L2(5), 950000, 3200000 },
+ { 1, { 576000, HFPLL, 1, 0, 30 }, L2(5), 950000, 3200000 },
+ { 1, { 652800, HFPLL, 1, 0, 34 }, L2(5), 950000, 3200000 },
+ { 1, { 729600, HFPLL, 1, 0, 38 }, L2(5), 950000, 3200000 },
+ { 1, { 806400, HFPLL, 1, 0, 42 }, L2(7), 950000, 3200000 },
+ { 1, { 883200, HFPLL, 1, 0, 46 }, L2(7), 950000, 3200000 },
+ { 1, { 960000, HFPLL, 1, 0, 50 }, L2(7), 950000, 3200000 },
+ { 1, { 1036800, HFPLL, 1, 0, 54 }, L2(7), 950000, 3200000 },
+ { 0, { 1113600, HFPLL, 1, 0, 58 }, L2(12), 1050000, 3200000 },
+ { 0, { 1190400, HFPLL, 1, 0, 62 }, L2(12), 1050000, 3200000 },
+ { 0, { 1267200, HFPLL, 1, 0, 66 }, L2(12), 1050000, 3200000 },
+ { 0, { 1344000, HFPLL, 1, 0, 70 }, L2(15), 1050000, 3200000 },
+ { 0, { 1420800, HFPLL, 1, 0, 74 }, L2(15), 1050000, 3200000 },
+ { 0, { 1497600, HFPLL, 1, 0, 78 }, L2(15), 1050000, 3200000 },
+ { 0, { 1574400, HFPLL, 1, 0, 82 }, L2(20), 1050000, 3200000 },
+ { 0, { 1651200, HFPLL, 1, 0, 86 }, L2(20), 1050000, 3200000 },
+ { 0, { 1728000, HFPLL, 1, 0, 90 }, L2(20), 1050000, 3200000 },
+ { 0, { 1804800, HFPLL, 1, 0, 94 }, L2(25), 1050000, 3200000 },
+ { 0, { 1881600, HFPLL, 1, 0, 98 }, L2(25), 1050000, 3200000 },
+ { 0, { 1958400, HFPLL, 1, 0, 102 }, L2(25), 1050000, 3200000 },
+ { 0, { 1996800, HFPLL, 1, 0, 104 }, L2(25), 1050000, 3200000 },
{ 0, { 0 } }
};
diff --git a/arch/arm/mach-msm/board-8064-gpiomux.c b/arch/arm/mach-msm/board-8064-gpiomux.c
index 1c19442..7898cf6 100644
--- a/arch/arm/mach-msm/board-8064-gpiomux.c
+++ b/arch/arm/mach-msm/board-8064-gpiomux.c
@@ -788,7 +788,7 @@
static struct gpiomux_setting mdm2ap_status_cfg = {
.func = GPIOMUX_FUNC_GPIO,
.drv = GPIOMUX_DRV_8MA,
- .pull = GPIOMUX_PULL_NONE,
+ .pull = GPIOMUX_PULL_DOWN,
};
static struct gpiomux_setting mdm2ap_errfatal_cfg = {
@@ -828,6 +828,7 @@
{
.gpio = 49,
.settings = {
+ [GPIOMUX_ACTIVE] = &mdm2ap_status_cfg,
[GPIOMUX_SUSPENDED] = &mdm2ap_status_cfg,
}
},
diff --git a/arch/arm/mach-msm/board-8064-pmic.c b/arch/arm/mach-msm/board-8064-pmic.c
index 879434d..e77e7c0 100644
--- a/arch/arm/mach-msm/board-8064-pmic.c
+++ b/arch/arm/mach-msm/board-8064-pmic.c
@@ -141,6 +141,12 @@
PM8921_GPIO_INPUT(17, PM_GPIO_PULL_UP_1P5), /* SD_WP */
};
+static struct pm8xxx_gpio_init pm8921_mpq_gpios[] __initdata = {
+ PM8921_GPIO_INIT(27, PM_GPIO_DIR_IN, PM_GPIO_OUT_BUF_CMOS, 0,
+ PM_GPIO_PULL_NO, PM_GPIO_VIN_VPH, PM_GPIO_STRENGTH_NO,
+ PM_GPIO_FUNC_NORMAL, 0, 0),
+};
+
/* Initial PM8XXX MPP configurations */
static struct pm8xxx_mpp_init pm8xxx_mpps[] __initdata = {
PM8921_MPP_INIT(3, D_OUTPUT, PM8921_MPP_DIG_LEVEL_VPH, DOUT_CTRL_LOW),
@@ -186,6 +192,18 @@
}
}
+ if (machine_is_mpq8064_cdp() || machine_is_mpq8064_hrd()
+ || machine_is_mpq8064_dtv())
+ for (i = 0; i < ARRAY_SIZE(pm8921_mpq_gpios); i++) {
+ rc = pm8xxx_gpio_config(pm8921_mpq_gpios[i].gpio,
+ &pm8921_mpq_gpios[i].config);
+ if (rc) {
+ pr_err("%s: pm8xxx_gpio_config: rc=%d\n",
+ __func__, rc);
+ break;
+ }
+ }
+
for (i = 0; i < ARRAY_SIZE(pm8xxx_mpps); i++) {
rc = pm8xxx_mpp_config(pm8xxx_mpps[i].mpp,
&pm8xxx_mpps[i].config);
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index 90563ad..d6217022 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -888,6 +888,8 @@
-1
};
+#define PMIC_GPIO_DP 27 /* PMIC GPIO for D+ change */
+#define PMIC_GPIO_DP_IRQ PM8921_GPIO_IRQ(PM8921_IRQ_BASE, PMIC_GPIO_DP)
static struct msm_otg_platform_data msm_otg_pdata = {
.mode = USB_OTG,
.otg_control = OTG_PMIC_CONTROL,
@@ -913,6 +915,9 @@
if (machine_is_apq8064_liquid())
msm_ehci_host_pdata3.dock_connect_irq =
PM8921_MPP_IRQ(PM8921_IRQ_BASE, 9);
+ else
+ msm_ehci_host_pdata3.pmic_gpio_dp_irq =
+ PMIC_GPIO_DP_IRQ;
apq8064_device_ehci_host3.dev.platform_data =
&msm_ehci_host_pdata3;
@@ -1727,6 +1732,12 @@
.mdm2ap_vddmin_gpio = 80,
};
+static struct gpiomux_setting mdm2ap_status_gpio_run_cfg = {
+ .func = GPIOMUX_FUNC_GPIO,
+ .drv = GPIOMUX_DRV_8MA,
+ .pull = GPIOMUX_PULL_NONE,
+};
+
static struct mdm_platform_data mdm_platform_data = {
.mdm_version = "3.0",
.ramdump_delay_ms = 2000,
@@ -1735,6 +1746,7 @@
.vddmin_resource = &mdm_vddmin_rscs,
.peripheral_platform_device = &apq8064_device_hsic_host,
.ramdump_timeout_ms = 120000,
+ .mdm2ap_status_gpio_run_cfg = &mdm2ap_status_gpio_run_cfg,
};
static struct tsens_platform_data apq_tsens_pdata = {
diff --git a/arch/arm/mach-msm/clock-8960.c b/arch/arm/mach-msm/clock-8960.c
index 02b28b6..1827773 100644
--- a/arch/arm/mach-msm/clock-8960.c
+++ b/arch/arm/mach-msm/clock-8960.c
@@ -3200,12 +3200,36 @@
F_END
};
+static struct branch_clk dsi1_reset_clk = {
+ .b = {
+ .reset_reg = SW_RESET_CORE_REG,
+ .reset_mask = BIT(7),
+ .halt_check = NOCHECK,
+ },
+ .c = {
+ .dbg_name = "dsi1_reset_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(dsi1_reset_clk.c),
+ },
+};
+
+static struct branch_clk dsi2_reset_clk = {
+ .b = {
+ .reset_reg = SW_RESET_CORE_REG,
+ .reset_mask = BIT(25),
+ .halt_check = NOCHECK,
+ },
+ .c = {
+ .dbg_name = "dsi2_reset_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(dsi2_reset_clk.c),
+ },
+};
+
static struct rcg_clk dsi1_byte_clk = {
.b = {
.ctl_reg = DSI1_BYTE_CC_REG,
.en_mask = BIT(0),
- .reset_reg = SW_RESET_CORE_REG,
- .reset_mask = BIT(7),
.halt_reg = DBG_BUS_VEC_B_REG,
.halt_bit = 21,
.retain_reg = DSI1_BYTE_CC_REG,
@@ -3228,8 +3252,6 @@
.b = {
.ctl_reg = DSI2_BYTE_CC_REG,
.en_mask = BIT(0),
- .reset_reg = SW_RESET_CORE_REG,
- .reset_mask = BIT(25),
.halt_reg = DBG_BUS_VEC_B_REG,
.halt_bit = 20,
.retain_reg = DSI2_BYTE_CC_REG,
@@ -3252,7 +3274,6 @@
.b = {
.ctl_reg = DSI1_ESC_CC_REG,
.en_mask = BIT(0),
- .reset_reg = SW_RESET_CORE_REG,
.halt_reg = DBG_BUS_VEC_I_REG,
.halt_bit = 1,
},
@@ -5435,6 +5456,9 @@
CLK_LOOKUP("mem_clk", ebi1_acpu_a_clk.c, ""),
CLK_LOOKUP("bus_clk", afab_acpu_a_clk.c, ""),
+ CLK_LOOKUP("reset1_clk", dsi1_reset_clk.c, "footswitch-8x60.4"),
+ CLK_LOOKUP("reset2_clk", dsi2_reset_clk.c, "footswitch-8x60.4"),
+
CLK_LOOKUP("l2_mclk", l2_m_clk, ""),
CLK_LOOKUP("krait0_mclk", krait0_m_clk, ""),
CLK_LOOKUP("krait1_mclk", krait1_m_clk, ""),
@@ -5752,6 +5776,9 @@
CLK_LOOKUP("mem_clk", ebi1_acpu_a_clk.c, ""),
CLK_LOOKUP("bus_clk", afab_acpu_a_clk.c, ""),
+ CLK_LOOKUP("reset1_clk", dsi1_reset_clk.c, "footswitch-8x60.4"),
+ CLK_LOOKUP("reset2_clk", dsi2_reset_clk.c, "footswitch-8x60.4"),
+
CLK_LOOKUP("l2_mclk", l2_m_clk, ""),
CLK_LOOKUP("krait0_mclk", krait0_m_clk, ""),
CLK_LOOKUP("krait1_mclk", krait1_m_clk, ""),
@@ -6077,6 +6104,8 @@
CLK_LOOKUP("mem_clk", ebi1_acpu_a_clk.c, ""),
CLK_LOOKUP("bus_clk", afab_acpu_a_clk.c, ""),
+ CLK_LOOKUP("reset1_clk", dsi1_reset_clk.c, "footswitch-8x60.4"),
+
CLK_LOOKUP("l2_mclk", l2_m_clk, ""),
CLK_LOOKUP("krait0_mclk", krait0_m_clk, ""),
CLK_LOOKUP("krait1_mclk", krait1_m_clk, ""),
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index 59d53bb..658b3dc 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -4356,72 +4356,76 @@
};
struct measure_mux_entry measure_mux[] = {
- {&gcc_bam_dma_ahb_clk.c, GCC_BASE, 0x00e8},
- {&gcc_blsp1_ahb_clk.c, GCC_BASE, 0x0090},
- {&gcc_blsp1_qup1_i2c_apps_clk.c, GCC_BASE, 0x0093},
- {&gcc_blsp1_qup1_spi_apps_clk.c, GCC_BASE, 0x0092},
- {&gcc_blsp1_qup2_i2c_apps_clk.c, GCC_BASE, 0x0098},
- {&gcc_blsp1_qup2_spi_apps_clk.c, GCC_BASE, 0x0096},
- {&gcc_blsp1_qup3_i2c_apps_clk.c, GCC_BASE, 0x009c},
- {&gcc_blsp1_qup3_spi_apps_clk.c, GCC_BASE, 0x009b},
- {&gcc_blsp1_qup4_i2c_apps_clk.c, GCC_BASE, 0x00a1},
- {&gcc_blsp1_qup4_spi_apps_clk.c, GCC_BASE, 0x00a0},
- {&gcc_blsp1_qup5_i2c_apps_clk.c, GCC_BASE, 0x00a5},
- {&gcc_blsp1_qup5_spi_apps_clk.c, GCC_BASE, 0x00a4},
- {&gcc_blsp1_qup6_i2c_apps_clk.c, GCC_BASE, 0x00aa},
- {&gcc_blsp1_qup6_spi_apps_clk.c, GCC_BASE, 0x00a9},
- {&gcc_blsp1_uart1_apps_clk.c, GCC_BASE, 0x0094},
- {&gcc_blsp1_uart2_apps_clk.c, GCC_BASE, 0x0099},
- {&gcc_blsp1_uart3_apps_clk.c, GCC_BASE, 0x009d},
- {&gcc_blsp1_uart4_apps_clk.c, GCC_BASE, 0x00a2},
- {&gcc_blsp1_uart5_apps_clk.c, GCC_BASE, 0x00a6},
- {&gcc_blsp1_uart6_apps_clk.c, GCC_BASE, 0x00ab},
- {&gcc_blsp2_ahb_clk.c, GCC_BASE, 0x00b0},
- {&gcc_blsp2_qup1_i2c_apps_clk.c, GCC_BASE, 0x00b3},
- {&gcc_blsp2_qup1_spi_apps_clk.c, GCC_BASE, 0x00b2},
- {&gcc_blsp2_qup2_i2c_apps_clk.c, GCC_BASE, 0x00b8},
- {&gcc_blsp2_qup2_spi_apps_clk.c, GCC_BASE, 0x00b6},
- {&gcc_blsp2_qup3_i2c_apps_clk.c, GCC_BASE, 0x00bc},
- {&gcc_blsp2_qup3_spi_apps_clk.c, GCC_BASE, 0x00bb},
- {&gcc_blsp2_qup4_i2c_apps_clk.c, GCC_BASE, 0x00c1},
- {&gcc_blsp2_qup4_spi_apps_clk.c, GCC_BASE, 0x00c0},
- {&gcc_blsp2_qup5_i2c_apps_clk.c, GCC_BASE, 0x00c5},
- {&gcc_blsp2_qup5_spi_apps_clk.c, GCC_BASE, 0x00c4},
- {&gcc_blsp2_qup6_i2c_apps_clk.c, GCC_BASE, 0x00ca},
- {&gcc_blsp2_qup6_spi_apps_clk.c, GCC_BASE, 0x00c9},
- {&gcc_blsp2_uart1_apps_clk.c, GCC_BASE, 0x00b4},
- {&gcc_blsp2_uart2_apps_clk.c, GCC_BASE, 0x00b9},
- {&gcc_blsp2_uart3_apps_clk.c, GCC_BASE, 0x00bd},
- {&gcc_blsp2_uart4_apps_clk.c, GCC_BASE, 0x00c2},
- {&gcc_blsp2_uart5_apps_clk.c, GCC_BASE, 0x00c6},
- {&gcc_blsp2_uart6_apps_clk.c, GCC_BASE, 0x00cb},
- {&gcc_boot_rom_ahb_clk.c, GCC_BASE, 0x0100},
- {&gcc_ocmem_noc_cfg_ahb_clk.c, GCC_BASE, 0x0029},
- {&gcc_mmss_noc_cfg_ahb_clk.c, GCC_BASE, 0x002A},
- {&gcc_mss_cfg_ahb_clk.c, GCC_BASE, 0x0030},
- {&gcc_ce1_clk.c, GCC_BASE, 0x0140},
- {&gcc_ce2_clk.c, GCC_BASE, 0x0148},
- {&gcc_pdm2_clk.c, GCC_BASE, 0x00da},
- {&gcc_pdm_ahb_clk.c, GCC_BASE, 0x00d8},
- {&gcc_prng_ahb_clk.c, GCC_BASE, 0x00e0},
- {&gcc_sdcc1_ahb_clk.c, GCC_BASE, 0x0071},
- {&gcc_sdcc1_apps_clk.c, GCC_BASE, 0x0070},
- {&gcc_sdcc2_ahb_clk.c, GCC_BASE, 0x0079},
- {&gcc_sdcc2_apps_clk.c, GCC_BASE, 0x0078},
- {&gcc_sdcc3_ahb_clk.c, GCC_BASE, 0x0081},
- {&gcc_sdcc3_apps_clk.c, GCC_BASE, 0x0080},
- {&gcc_sdcc4_ahb_clk.c, GCC_BASE, 0x0089},
- {&gcc_sdcc4_apps_clk.c, GCC_BASE, 0x0088},
- {&gcc_tsif_ahb_clk.c, GCC_BASE, 0x00f0},
- {&gcc_tsif_ref_clk.c, GCC_BASE, 0x00f1},
+ {&gcc_pdm_ahb_clk.c, GCC_BASE, 0x00d0},
+ {&gcc_blsp2_qup1_i2c_apps_clk.c, GCC_BASE, 0x00ab},
+ {&gcc_blsp2_qup3_spi_apps_clk.c, GCC_BASE, 0x00b3},
+ {&gcc_blsp2_uart5_apps_clk.c, GCC_BASE, 0x00be},
{&gcc_usb30_master_clk.c, GCC_BASE, 0x0050},
+ {&gcc_blsp2_qup3_i2c_apps_clk.c, GCC_BASE, 0x00b4},
+ {&gcc_usb_hsic_system_clk.c, GCC_BASE, 0x0059},
+ {&gcc_blsp2_uart3_apps_clk.c, GCC_BASE, 0x00b5},
+ {&gcc_usb_hsic_io_cal_clk.c, GCC_BASE, 0x005b},
+ {&gcc_ce2_axi_clk.c, GCC_BASE, 0x0141},
+ {&gcc_sdcc3_ahb_clk.c, GCC_BASE, 0x0079},
+ {&gcc_blsp1_qup5_i2c_apps_clk.c, GCC_BASE, 0x009d},
+ {&gcc_blsp1_qup1_spi_apps_clk.c, GCC_BASE, 0x008a},
+ {&gcc_blsp2_uart4_apps_clk.c, GCC_BASE, 0x00ba},
+ {&gcc_ce2_clk.c, GCC_BASE, 0x0140},
+ {&gcc_blsp1_uart2_apps_clk.c, GCC_BASE, 0x0091},
+ {&gcc_sdcc1_ahb_clk.c, GCC_BASE, 0x0069},
+ {&gcc_mss_cfg_ahb_clk.c, GCC_BASE, 0x0030},
+ {&gcc_tsif_ahb_clk.c, GCC_BASE, 0x00e8},
+ {&gcc_sdcc4_ahb_clk.c, GCC_BASE, 0x0081},
+ {&gcc_blsp1_qup4_spi_apps_clk.c, GCC_BASE, 0x0098},
+ {&gcc_blsp2_qup4_spi_apps_clk.c, GCC_BASE, 0x00b8},
+ {&gcc_blsp1_qup3_spi_apps_clk.c, GCC_BASE, 0x0093},
+ {&gcc_blsp1_qup6_i2c_apps_clk.c, GCC_BASE, 0x00a2},
+ {&gcc_blsp2_qup6_i2c_apps_clk.c, GCC_BASE, 0x00c2},
+ {&gcc_bam_dma_ahb_clk.c, GCC_BASE, 0x00e0},
+ {&gcc_sdcc3_apps_clk.c, GCC_BASE, 0x0078},
+ {&gcc_usb_hs_system_clk.c, GCC_BASE, 0x0060},
+ {&gcc_blsp1_ahb_clk.c, GCC_BASE, 0x0088},
+ {&gcc_sdcc1_apps_clk.c, GCC_BASE, 0x0068},
+ {&gcc_blsp2_qup5_i2c_apps_clk.c, GCC_BASE, 0x00bd},
+ {&gcc_blsp1_uart4_apps_clk.c, GCC_BASE, 0x009a},
+ {&gcc_blsp2_qup2_spi_apps_clk.c, GCC_BASE, 0x00ae},
+ {&gcc_blsp2_qup6_spi_apps_clk.c, GCC_BASE, 0x00c1},
+ {&gcc_blsp2_uart2_apps_clk.c, GCC_BASE, 0x00b1},
+ {&gcc_blsp1_qup2_spi_apps_clk.c, GCC_BASE, 0x008e},
+ {&gcc_usb_hsic_ahb_clk.c, GCC_BASE, 0x0058},
+ {&gcc_blsp1_uart3_apps_clk.c, GCC_BASE, 0x0095},
{&gcc_usb30_mock_utmi_clk.c, GCC_BASE, 0x0052},
- {&gcc_usb_hs_ahb_clk.c, GCC_BASE, 0x0069},
- {&gcc_usb_hs_system_clk.c, GCC_BASE, 0x0068},
- {&gcc_usb_hsic_ahb_clk.c, GCC_BASE, 0x0060},
- {&gcc_usb_hsic_clk.c, GCC_BASE, 0x0062},
- {&gcc_usb_hsic_io_cal_clk.c, GCC_BASE, 0x0063},
- {&gcc_usb_hsic_system_clk.c, GCC_BASE, 0x0061},
+ {&gcc_ce1_axi_clk.c, GCC_BASE, 0x0139},
+ {&gcc_sdcc4_apps_clk.c, GCC_BASE, 0x0080},
+ {&gcc_blsp1_qup5_spi_apps_clk.c, GCC_BASE, 0x009c},
+ {&gcc_usb_hs_ahb_clk.c, GCC_BASE, 0x0061},
+ {&gcc_blsp1_qup6_spi_apps_clk.c, GCC_BASE, 0x00a1},
+ {&gcc_blsp2_qup2_i2c_apps_clk.c, GCC_BASE, 0x00b0},
+ {&gcc_prng_ahb_clk.c, GCC_BASE, 0x00d8},
+ {&gcc_blsp1_qup3_i2c_apps_clk.c, GCC_BASE, 0x0094},
+ {&gcc_usb_hsic_clk.c, GCC_BASE, 0x005a},
+ {&gcc_blsp1_uart6_apps_clk.c, GCC_BASE, 0x00a3},
+ {&gcc_sdcc2_apps_clk.c, GCC_BASE, 0x0070},
+ {&gcc_tsif_ref_clk.c, GCC_BASE, 0x00e9},
+ {&gcc_blsp1_uart1_apps_clk.c, GCC_BASE, 0x008c},
+ {&gcc_blsp2_qup5_spi_apps_clk.c, GCC_BASE, 0x00bc},
+ {&gcc_blsp1_qup4_i2c_apps_clk.c, GCC_BASE, 0x0099},
+ {&gcc_mmss_noc_cfg_ahb_clk.c, GCC_BASE, 0x002a},
+ {&gcc_blsp2_ahb_clk.c, GCC_BASE, 0x00a8},
+ {&gcc_boot_rom_ahb_clk.c, GCC_BASE, 0x00f8},
+ {&gcc_ce1_ahb_clk.c, GCC_BASE, 0x013a},
+ {&gcc_pdm2_clk.c, GCC_BASE, 0x00d2},
+ {&gcc_blsp2_qup4_i2c_apps_clk.c, GCC_BASE, 0x00b9},
+ {&gcc_ce2_ahb_clk.c, GCC_BASE, 0x0142},
+ {&gcc_blsp1_uart5_apps_clk.c, GCC_BASE, 0x009e},
+ {&gcc_blsp2_qup1_spi_apps_clk.c, GCC_BASE, 0x00aa},
+ {&gcc_blsp1_qup2_i2c_apps_clk.c, GCC_BASE, 0x0090},
+ {&gcc_blsp2_uart1_apps_clk.c, GCC_BASE, 0x00ac},
+ {&gcc_blsp1_qup1_i2c_apps_clk.c, GCC_BASE, 0x008b},
+ {&gcc_blsp2_uart6_apps_clk.c, GCC_BASE, 0x00c3},
+ {&gcc_sdcc2_ahb_clk.c, GCC_BASE, 0x0071},
+ {&gcc_ocmem_noc_cfg_ahb_clk.c, GCC_BASE, 0x0029},
+ {&gcc_ce1_clk.c, GCC_BASE, 0x0138},
{&mmss_mmssnoc_ahb_clk.c, MMSS_BASE, 0x0001},
{&mmss_mmssnoc_axi_clk.c, MMSS_BASE, 0x0004},
{&ocmemnoc_clk.c, MMSS_BASE, 0x0007},
diff --git a/arch/arm/mach-msm/devices-8064.c b/arch/arm/mach-msm/devices-8064.c
index ac26acf..1f0bd2c 100644
--- a/arch/arm/mach-msm/devices-8064.c
+++ b/arch/arm/mach-msm/devices-8064.c
@@ -1837,6 +1837,8 @@
{ .name = "lut_clk" },
{ .name = "tv_src_clk" },
{ .name = "tv_clk" },
+ { .name = "reset1_clk" },
+ { .name = "reset2_clk" },
{ 0 }
},
.bus_port0 = MSM_BUS_MASTER_MDP_PORT0,
diff --git a/arch/arm/mach-msm/devices-8930.c b/arch/arm/mach-msm/devices-8930.c
index 7cb6e95..fa24ba9 100644
--- a/arch/arm/mach-msm/devices-8930.c
+++ b/arch/arm/mach-msm/devices-8930.c
@@ -425,6 +425,7 @@
{ .name = "lut_clk" },
{ .name = "tv_src_clk" },
{ .name = "tv_clk" },
+ { .name = "reset1_clk" },
{ 0 }
},
.bus_port0 = MSM_BUS_MASTER_MDP_PORT0,
diff --git a/arch/arm/mach-msm/devices-8960.c b/arch/arm/mach-msm/devices-8960.c
index 3d1926c..724eed8 100644
--- a/arch/arm/mach-msm/devices-8960.c
+++ b/arch/arm/mach-msm/devices-8960.c
@@ -2151,6 +2151,8 @@
{ .name = "lut_clk" },
{ .name = "tv_src_clk" },
{ .name = "tv_clk" },
+ { .name = "reset1_clk" },
+ { .name = "reset2_clk" },
{ 0 }
},
.bus_port0 = MSM_BUS_MASTER_MDP_PORT0,
diff --git a/arch/arm/mach-msm/devices-msm7x27a.c b/arch/arm/mach-msm/devices-msm7x27a.c
index 8fef953..2642864 100644
--- a/arch/arm/mach-msm/devices-msm7x27a.c
+++ b/arch/arm/mach-msm/devices-msm7x27a.c
@@ -1661,18 +1661,23 @@
/* Part number for 1GHz part */
case 0x770:
case 0x771:
+ case 0x77C:
case 0x780:
+ case 0x8D0:
cpu = MSM8625;
break;
/* Part number for 1.2GHz part */
case 0x773:
case 0x774:
case 0x781:
+ case 0x8D1:
cpu = MSM8625A;
break;
case 0x775:
case 0x776:
+ case 0x77D:
case 0x782:
+ case 0x8D2:
cpu = MSM8625AB;
break;
default:
diff --git a/arch/arm/mach-msm/include/mach/mdm2.h b/arch/arm/mach-msm/include/mach/mdm2.h
index c4877cc..6ec12c1 100644
--- a/arch/arm/mach-msm/include/mach/mdm2.h
+++ b/arch/arm/mach-msm/include/mach/mdm2.h
@@ -32,6 +32,7 @@
struct platform_device *peripheral_platform_device;
const unsigned int ramdump_timeout_ms;
int image_upgrade_supported;
+ struct gpiomux_setting *mdm2ap_status_gpio_run_cfg;
};
#endif
diff --git a/arch/arm/mach-msm/include/mach/memory.h b/arch/arm/mach-msm/include/mach/memory.h
index bf92f7d..6b7ad9a 100644
--- a/arch/arm/mach-msm/include/mach/memory.h
+++ b/arch/arm/mach-msm/include/mach/memory.h
@@ -78,6 +78,7 @@
int platform_physical_remove_pages(u64, u64);
int platform_physical_active_pages(u64, u64);
int platform_physical_low_power_pages(u64, u64);
+int msm_get_memory_type_from_name(const char *memtype_name);
extern int (*change_memory_power)(u64, u64, int);
diff --git a/arch/arm/mach-msm/include/mach/msm_hsusb.h b/arch/arm/mach-msm/include/mach/msm_hsusb.h
index 4f140cc..4e22b0f 100644
--- a/arch/arm/mach-msm/include/mach/msm_hsusb.h
+++ b/arch/arm/mach-msm/include/mach/msm_hsusb.h
@@ -202,8 +202,4 @@
struct clk *ebi1_clk;
};
-int msm_ep_config(struct usb_ep *ep);
-int msm_ep_unconfig(struct usb_ep *ep);
-int msm_data_fifo_config(struct usb_ep *ep, u32 addr, u32 size);
-
#endif
diff --git a/arch/arm/mach-msm/include/mach/usb_bam.h b/arch/arm/mach-msm/include/mach/usb_bam.h
index ec135a3..47313a7 100644
--- a/arch/arm/mach-msm/include/mach/usb_bam.h
+++ b/arch/arm/mach-msm/include/mach/usb_bam.h
@@ -12,6 +12,7 @@
#ifndef _USB_BAM_H_
#define _USB_BAM_H_
+#include "sps.h"
/**
* SPS Pipes direction.
@@ -43,7 +44,7 @@
* @return 0 on success, negative value on error
*
*/
-int usb_bam_connect(u8 idx, u8 *src_pipe_idx, u8 *dst_pipe_idx);
+int usb_bam_connect(u8 idx, u32 *src_pipe_idx, u32 *dst_pipe_idx);
/**
* Register a wakeup callback from peer BAM.
@@ -57,8 +58,40 @@
*/
int usb_bam_register_wake_cb(u8 idx,
int (*callback)(void *), void* param);
+
+/**
+ * Disconnect USB-to-Periperal SPS connection.
+ *
+ * @idx - Connection index.
+ *
+ * @return 0 on success, negative value on error
+ */
+int usb_bam_disconnect_pipe(u8 idx);
+
+/**
+ * Returns usb bam connection parameters.
+ *
+ * @conn_idx - Connection index.
+ *
+ * @usb_bam_pipe_dir - Usb pipe direction to/from peripheral.
+ *
+ * @usb_bam_handle - Usb bam handle.
+ *
+ * @usb_bam_pipe_idx - Usb bam pipe index.
+ *
+ * @peer_pipe_idx - Peer pipe index.
+ *
+ * @desc_fifo - Descriptor fifo parameters.
+ *
+ * @data_fifo - Data fifo parameters.
+ *
+ */
+void get_bam2bam_connection_info(u8 conn_idx, enum usb_bam_pipe_dir pipe_dir,
+ u32 *usb_bam_handle, u32 *usb_bam_pipe_idx, u32 *peer_pipe_idx,
+ struct sps_mem_buffer *desc_fifo, struct sps_mem_buffer *data_fifo);
+
#else
-static inline int usb_bam_connect(u8 idx, u8 *src_pipe_idx, u8 *dst_pipe_idx)
+static inline int usb_bam_connect(u8 idx, u32 *src_pipe_idx, u32 *dst_pipe_idx)
{
return -ENODEV;
}
@@ -68,6 +101,18 @@
{
return -ENODEV;
}
+
+static inline int usb_bam_disconnect_pipe(u8 idx)
+{
+ return -ENODEV;
+}
+
+static inline void get_bam2bam_connection_info(u8 conn_idx,
+ enum usb_bam_pipe_dir pipe_dir, u32 *usb_bam_handle,
+ u32 *usb_bam_pipe_idx, u32 *peer_pipe_idx,
+ struct sps_mem_buffer *desc_fifo, struct sps_mem_buffer *data_fifo)
+{
+ return;
+}
#endif
#endif /* _USB_BAM_H_ */
-
diff --git a/arch/arm/mach-msm/mdm_common.c b/arch/arm/mach-msm/mdm_common.c
index 6b40cda..d1865e4 100644
--- a/arch/arm/mach-msm/mdm_common.c
+++ b/arch/arm/mach-msm/mdm_common.c
@@ -30,6 +30,7 @@
#include <linux/workqueue.h>
#include <linux/clk.h>
#include <linux/mfd/pmic8058.h>
+#include <linux/msm_charm.h>
#include <asm/mach-types.h>
#include <asm/uaccess.h>
#include <mach/mdm2.h>
@@ -37,7 +38,7 @@
#include <mach/subsystem_notif.h>
#include <mach/subsystem_restart.h>
#include <mach/rpm.h>
-#include <linux/msm_charm.h>
+#include <mach/gpiomux.h>
#include "msm_watchdog.h"
#include "mdm_private.h"
#include "sysmon.h"
@@ -68,6 +69,13 @@
#define SFR_MAX_RETRIES 10
#define SFR_RETRY_INTERVAL 1000
+enum gpio_update_config {
+ GPIO_UPDATE_BOOTING_CONFIG = 1,
+ GPIO_UPDATE_RUNNING_CONFIG,
+};
+static int mdm2ap_status_valid_old_config;
+static struct gpiomux_setting mdm2ap_status_old_config;
+
static irqreturn_t mdm_vddmin_change(int irq, void *dev_id)
{
int value = gpio_get_value(
@@ -163,6 +171,37 @@
static DECLARE_DELAYED_WORK(mdm2ap_status_check_work, mdm2ap_status_check);
+static void mdm_update_gpio_configs(enum gpio_update_config gpio_config)
+{
+ /* Some gpio configuration may need updating after modem bootup.*/
+ switch (gpio_config) {
+ case GPIO_UPDATE_RUNNING_CONFIG:
+ if (mdm_drv->pdata->mdm2ap_status_gpio_run_cfg) {
+ if (msm_gpiomux_write(mdm_drv->mdm2ap_status_gpio,
+ GPIOMUX_ACTIVE,
+ mdm_drv->pdata->mdm2ap_status_gpio_run_cfg,
+ &mdm2ap_status_old_config))
+ pr_err("%s: failed updating running gpio config\n",
+ __func__);
+ else
+ mdm2ap_status_valid_old_config = 1;
+ }
+ break;
+ case GPIO_UPDATE_BOOTING_CONFIG:
+ if (mdm2ap_status_valid_old_config) {
+ msm_gpiomux_write(mdm_drv->mdm2ap_status_gpio,
+ GPIOMUX_ACTIVE,
+ &mdm2ap_status_old_config,
+ NULL);
+ mdm2ap_status_valid_old_config = 0;
+ }
+ break;
+ default:
+ pr_err("%s: called with no config\n", __func__);
+ break;
+ }
+}
+
long mdm_modem_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
@@ -205,11 +244,10 @@
else
first_boot = 0;
- /* Start a timer to check that the mdm2ap_status gpio
- * goes high.
+ /* If successful, start a timer to check that the mdm2ap_status
+ * gpio goes high.
*/
-
- if (gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0)
+ if (!status && gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0)
schedule_delayed_work(&mdm2ap_status_check_work,
msecs_to_jiffies(MDM2AP_STATUS_TIMEOUT_MS));
break;
@@ -266,6 +304,9 @@
pr_debug("%s: status:%d\n", __func__, value);
if (mdm_drv->mdm_ready && mdm_drv->ops->status_cb)
mdm_drv->ops->status_cb(mdm_drv, value);
+
+ /* Update gpio configuration to "running" config. */
+ mdm_update_gpio_configs(GPIO_UPDATE_RUNNING_CONFIG);
}
static DECLARE_WORK(mdm_status_work, mdm_status_fn);
@@ -364,6 +405,7 @@
static int mdm_subsys_shutdown(const struct subsys_desc *crashed_subsys)
{
mdm_drv->mdm_ready = 0;
+ cancel_delayed_work(&mdm2ap_status_check_work);
gpio_direction_output(mdm_drv->ap2mdm_errfatal_gpio, 1);
if (mdm_drv->pdata->ramdump_delay_ms > 0) {
/* Wait for the external modem to complete
@@ -371,10 +413,13 @@
*/
msleep(mdm_drv->pdata->ramdump_delay_ms);
}
- if (!mdm_drv->mdm_unexpected_reset_occurred)
+ if (!mdm_drv->mdm_unexpected_reset_occurred) {
mdm_drv->ops->reset_mdm_cb(mdm_drv);
- else
+ /* Update gpio configuration to "booting" config. */
+ mdm_update_gpio_configs(GPIO_UPDATE_BOOTING_CONFIG);
+ } else {
mdm_drv->mdm_unexpected_reset_occurred = 0;
+ }
return 0;
}
@@ -404,6 +449,7 @@
const struct subsys_desc *crashed_subsys)
{
mdm_drv->mdm_ram_dump_status = 0;
+ cancel_delayed_work(&mdm2ap_status_check_work);
if (want_dumps) {
mdm_drv->boot_type = CHARM_RAM_DUMPS;
complete(&mdm_needs_reload);
@@ -416,8 +462,11 @@
pr_info("%s: mdm modem ramdumps completed.\n",
__func__);
INIT_COMPLETION(mdm_ram_dumps);
- if (!mdm_drv->pdata->no_powerdown_after_ramdumps)
+ if (!mdm_drv->pdata->no_powerdown_after_ramdumps) {
mdm_drv->ops->power_down_mdm_cb(mdm_drv);
+ /* Update gpio configuration to "booting" config. */
+ mdm_update_gpio_configs(GPIO_UPDATE_BOOTING_CONFIG);
+ }
}
return mdm_drv->mdm_ram_dump_status;
}
diff --git a/arch/arm/mach-msm/memory.c b/arch/arm/mach-msm/memory.c
index 63c2d3a..4a2fd7c 100644
--- a/arch/arm/mach-msm/memory.c
+++ b/arch/arm/mach-msm/memory.c
@@ -390,24 +390,33 @@
[MEMTYPE_EBI1] = "EBI1",
};
-static int reserve_memory_type(char *mem_name,
- struct memtype_reserve *reserve_table,
- int size)
+int msm_get_memory_type_from_name(const char *memtype_name)
{
int i;
for (i = 0; i < ARRAY_SIZE(memtype_names); i++) {
- if (memtype_names[i] && strcmp(mem_name,
- memtype_names[i]) == 0) {
- reserve_table[i].size += size;
- return 0;
- }
+ if (memtype_names[i] &&
+ strcmp(memtype_name, memtype_names[i]) == 0)
+ return i;
}
- pr_err("Could not find memory type %s\n", mem_name);
+ pr_err("Could not find memory type %s\n", memtype_name);
return -EINVAL;
}
+static int reserve_memory_type(const char *mem_name,
+ struct memtype_reserve *reserve_table,
+ int size)
+{
+ int ret = msm_get_memory_type_from_name(mem_name);
+
+ if (ret >= 0) {
+ reserve_table[ret].size += size;
+ ret = 0;
+ }
+ return ret;
+}
+
static int check_for_compat(unsigned long node)
{
char **start = __compat_exports_start;
diff --git a/arch/arm/mach-msm/pil-q6v5-mss.c b/arch/arm/mach-msm/pil-q6v5-mss.c
index ff0e792d..5c9c3c4 100644
--- a/arch/arm/mach-msm/pil-q6v5-mss.c
+++ b/arch/arm/mach-msm/pil-q6v5-mss.c
@@ -237,7 +237,7 @@
if (IS_ERR(drv->vreg))
return PTR_ERR(drv->vreg);
- ret = regulator_set_voltage(drv->vreg, 1150000, 1150000);
+ ret = regulator_set_voltage(drv->vreg, 1050000, 1050000);
if (ret)
dev_err(&pdev->dev, "Failed to set regulator's voltage.\n");
diff --git a/drivers/gpu/ion/msm/msm_ion.c b/drivers/gpu/ion/msm/msm_ion.c
index eec3fe0..c65a000 100644
--- a/drivers/gpu/ion/msm/msm_ion.c
+++ b/drivers/gpu/ion/msm/msm_ion.c
@@ -18,15 +18,82 @@
#include <linux/slab.h>
#include <linux/memory_alloc.h>
#include <linux/fmem.h>
+#include <linux/of.h>
#include <mach/ion.h>
#include <mach/msm_memtypes.h>
#include "../ion_priv.h"
#include "ion_cp_common.h"
+#define ION_COMPAT_STR "qcom,msm-ion"
+#define ION_COMPAT_MEM_RESERVE_STR "qcom,msm-ion-reserve"
+
static struct ion_device *idev;
static int num_heaps;
static struct ion_heap **heaps;
+struct ion_heap_desc {
+ unsigned int id;
+ enum ion_heap_type type;
+ const char *name;
+ unsigned int permission_type;
+};
+
+
+static struct ion_heap_desc ion_heap_meta[] = {
+ {
+ .id = ION_SYSTEM_HEAP_ID,
+ .type = ION_HEAP_TYPE_SYSTEM,
+ .name = ION_VMALLOC_HEAP_NAME,
+ },
+ {
+ .id = ION_CP_MM_HEAP_ID,
+ .type = ION_HEAP_TYPE_CP,
+ .name = ION_MM_HEAP_NAME,
+ .permission_type = IPT_TYPE_MM_CARVEOUT,
+ },
+ {
+ .id = ION_MM_FIRMWARE_HEAP_ID,
+ .type = ION_HEAP_TYPE_CARVEOUT,
+ .name = ION_MM_FIRMWARE_HEAP_NAME,
+ },
+ {
+ .id = ION_CP_MFC_HEAP_ID,
+ .type = ION_HEAP_TYPE_CP,
+ .name = ION_MFC_HEAP_NAME,
+ .permission_type = IPT_TYPE_MFC_SHAREDMEM,
+ },
+ {
+ .id = ION_SF_HEAP_ID,
+ .type = ION_HEAP_TYPE_CARVEOUT,
+ .name = ION_SF_HEAP_NAME,
+ },
+ {
+ .id = ION_IOMMU_HEAP_ID,
+ .type = ION_HEAP_TYPE_IOMMU,
+ .name = ION_IOMMU_HEAP_NAME,
+ },
+ {
+ .id = ION_QSECOM_HEAP_ID,
+ .type = ION_HEAP_TYPE_CARVEOUT,
+ .name = ION_QSECOM_HEAP_NAME,
+ },
+ {
+ .id = ION_AUDIO_HEAP_ID,
+ .type = ION_HEAP_TYPE_CARVEOUT,
+ .name = ION_AUDIO_HEAP_NAME,
+ },
+ {
+ .id = ION_CP_WB_HEAP_ID,
+ .type = ION_HEAP_TYPE_CP,
+ .name = ION_WB_HEAP_NAME,
+ },
+ {
+ .id = ION_CAMERA_HEAP_ID,
+ .type = ION_HEAP_TYPE_CARVEOUT,
+ .name = ION_CAMERA_HEAP_NAME,
+ },
+};
+
struct ion_client *msm_ion_client_create(unsigned int heap_mask,
const char *name)
{
@@ -269,11 +336,243 @@
}
}
+static int msm_init_extra_data(struct ion_platform_heap *heap,
+ const struct ion_heap_desc *heap_desc)
+{
+ int ret = 0;
+
+ switch (heap->type) {
+ case ION_HEAP_TYPE_CP:
+ {
+ heap->extra_data = kzalloc(sizeof(struct ion_cp_heap_pdata),
+ GFP_KERNEL);
+ if (!heap->extra_data) {
+ ret = -ENOMEM;
+ } else {
+ struct ion_cp_heap_pdata *extra = heap->extra_data;
+ extra->permission_type = heap_desc->permission_type;
+ }
+ break;
+ }
+ case ION_HEAP_TYPE_CARVEOUT:
+ {
+ heap->extra_data = kzalloc(sizeof(struct ion_co_heap_pdata),
+ GFP_KERNEL);
+ if (!heap->extra_data)
+ ret = -ENOMEM;
+ break;
+ }
+ default:
+ heap->extra_data = 0;
+ break;
+ }
+ return ret;
+}
+
+static int msm_ion_populate_heap(struct ion_platform_heap *heap)
+{
+ unsigned int i;
+ int ret = -EINVAL;
+ unsigned int len = ARRAY_SIZE(ion_heap_meta);
+ for (i = 0; i < len; ++i) {
+ if (ion_heap_meta[i].id == heap->id) {
+ heap->name = ion_heap_meta[i].name;
+ heap->type = ion_heap_meta[i].type;
+ ret = msm_init_extra_data(heap, &ion_heap_meta[i]);
+ break;
+ }
+ }
+ if (ret)
+ pr_err("%s: Unable to populate heap, error: %d", __func__, ret);
+ return ret;
+}
+
+static void free_pdata(const struct ion_platform_data *pdata)
+{
+ unsigned int i;
+ for (i = 0; i < pdata->nr; ++i)
+ kfree(pdata->heaps[i].extra_data);
+ kfree(pdata);
+}
+
+static int memtype_to_ion_memtype[] = {
+ [MEMTYPE_SMI_KERNEL] = ION_SMI_TYPE,
+ [MEMTYPE_SMI] = ION_SMI_TYPE,
+ [MEMTYPE_EBI0] = ION_EBI_TYPE,
+ [MEMTYPE_EBI1] = ION_EBI_TYPE,
+};
+
+static void msm_ion_get_heap_align(struct device_node *node,
+ struct ion_platform_heap *heap)
+{
+ unsigned int val;
+
+ int ret = of_property_read_u32(node, "qcom,heap-align", &val);
+ if (!ret) {
+ switch (heap->type) {
+ case ION_HEAP_TYPE_CP:
+ {
+ struct ion_cp_heap_pdata *extra =
+ heap->extra_data;
+ extra->align = val;
+ break;
+ }
+ case ION_HEAP_TYPE_CARVEOUT:
+ {
+ struct ion_co_heap_pdata *extra =
+ heap->extra_data;
+ extra->align = val;
+ break;
+ }
+ default:
+ pr_err("ION-heap %s: Cannot specify alignment for this type of heap\n",
+ heap->name);
+ break;
+ }
+ }
+}
+
+static int msm_ion_get_heap_size(struct device_node *node,
+ struct ion_platform_heap *heap)
+{
+ unsigned int val;
+ int ret = 0;
+ const char *memory_name_prop;
+
+ ret = of_property_read_u32(node, "qcom,memory-reservation-size", &val);
+ if (!ret) {
+ heap->size = val;
+ ret = of_property_read_string(node,
+ "qcom,memory-reservation-type",
+ &memory_name_prop);
+
+ if (!ret && memory_name_prop) {
+ val = msm_get_memory_type_from_name(memory_name_prop);
+ if (val < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+ heap->memory_type = memtype_to_ion_memtype[val];
+ }
+ if (heap->size && (ret || !memory_name_prop)) {
+ pr_err("%s: Need to specify reservation type\n",
+ __func__);
+ ret = -EINVAL;
+ }
+ } else {
+ ret = 0;
+ }
+out:
+ return ret;
+}
+
+
+static void msm_ion_get_heap_adjacent(struct device_node *node,
+ struct ion_platform_heap *heap)
+{
+ unsigned int val;
+ int ret = of_property_read_u32(node, "qcom,heap-adjacent", &val);
+ if (!ret) {
+ switch (heap->type) {
+ case ION_HEAP_TYPE_CARVEOUT:
+ {
+ struct ion_co_heap_pdata *extra = heap->extra_data;
+ extra->adjacent_mem_id = val;
+ break;
+ }
+ default:
+ pr_err("ION-heap %s: Cannot specify adjcent mem id for this type of heap\n",
+ heap->name);
+ break;
+ }
+ } else {
+ switch (heap->type) {
+ case ION_HEAP_TYPE_CARVEOUT:
+ {
+ struct ion_co_heap_pdata *extra = heap->extra_data;
+ extra->adjacent_mem_id = INVALID_HEAP_ID;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+}
+
+static struct ion_platform_data *msm_ion_parse_dt(
+ const struct device_node *dt_node)
+{
+ struct ion_platform_data *pdata = 0;
+ struct device_node *node;
+ uint32_t val = 0;
+ int ret = 0;
+ uint32_t num_heaps = 0;
+ int idx = 0;
+
+ for_each_child_of_node(dt_node, node)
+ num_heaps++;
+
+ if (!num_heaps)
+ return ERR_PTR(-EINVAL);
+
+ pdata = kzalloc(sizeof(struct ion_platform_data) +
+ num_heaps*sizeof(struct ion_platform_heap), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ pdata->nr = num_heaps;
+
+ for_each_child_of_node(dt_node, node) {
+ /**
+ * TODO: Replace this with of_get_address() when this patch
+ * gets merged: http://
+ * permalink.gmane.org/gmane.linux.drivers.devicetree/18614
+ */
+ ret = of_property_read_u32(node, "reg", &val);
+ if (ret) {
+ pr_err("%s: Unable to find reg key", __func__);
+ goto free_heaps;
+ }
+ pdata->heaps[idx].id = val;
+
+ ret = msm_ion_populate_heap(&pdata->heaps[idx]);
+ if (ret)
+ goto free_heaps;
+
+ msm_ion_get_heap_align(node, &pdata->heaps[idx]);
+
+ ret = msm_ion_get_heap_size(node, &pdata->heaps[idx]);
+ if (ret)
+ goto free_heaps;
+
+ msm_ion_get_heap_adjacent(node, &pdata->heaps[idx]);
+
+ ++idx;
+ }
+ return pdata;
+
+free_heaps:
+ free_pdata(pdata);
+ return ERR_PTR(ret);
+}
+
static int msm_ion_probe(struct platform_device *pdev)
{
- struct ion_platform_data *pdata = pdev->dev.platform_data;
- int err;
+ struct ion_platform_data *pdata;
+ unsigned int pdata_needs_to_be_freed;
+ int err = -1;
int i;
+ if (pdev->dev.of_node) {
+ pdata = msm_ion_parse_dt(pdev->dev.of_node);
+ if (IS_ERR(pdata)) {
+ err = PTR_ERR(pdata);
+ goto out;
+ }
+ pdata_needs_to_be_freed = 1;
+ } else {
+ pdata = pdev->dev.platform_data;
+ pdata_needs_to_be_freed = 0;
+ }
num_heaps = pdata->nr;
@@ -315,6 +614,8 @@
ion_device_add_heap(idev, heaps[i]);
}
+ if (pdata_needs_to_be_freed)
+ free_pdata(pdata);
check_for_heap_overlap(pdata->heaps, num_heaps);
platform_set_drvdata(pdev, idev);
@@ -322,6 +623,8 @@
freeheaps:
kfree(heaps);
+ if (pdata_needs_to_be_freed)
+ free_pdata(pdata);
out:
return err;
}
@@ -339,10 +642,19 @@
return 0;
}
+static struct of_device_id msm_ion_match_table[] = {
+ {.compatible = ION_COMPAT_STR},
+ {},
+};
+EXPORT_COMPAT(ION_COMPAT_MEM_RESERVE_STR);
+
static struct platform_driver msm_ion_driver = {
.probe = msm_ion_probe,
.remove = msm_ion_remove,
- .driver = { .name = "ion-msm" }
+ .driver = {
+ .name = "ion-msm",
+ .of_match_table = msm_ion_match_table,
+ },
};
static int __init msm_ion_init(void)
diff --git a/drivers/gpu/msm/a3xx_reg.h b/drivers/gpu/msm/a3xx_reg.h
index 8ec9431..33fcbfd 100644
--- a/drivers/gpu/msm/a3xx_reg.h
+++ b/drivers/gpu/msm/a3xx_reg.h
@@ -248,6 +248,8 @@
#define A3XX_VBIF_OUT_WR_LIM_CONF0 0x3035
#define A3XX_VBIF_DDR_OUT_MAX_BURST 0x3036
#define A3XX_VBIF_ARB_CTL 0x303C
+#define A3XX_VBIF_ROUND_ROBIN_QOS_ARB 0x3049
+#define A3XX_VBIF_OUT_AXI_AMEMTYPE_CONF0 0x3058
#define A3XX_VBIF_OUT_AXI_AOOO_EN 0x305E
#define A3XX_VBIF_OUT_AXI_AOOO 0x305F
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 3047693..f7d1e59 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -15,8 +15,14 @@
#include <linux/vmalloc.h>
#include <linux/ioctl.h>
#include <linux/sched.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <mach/socinfo.h>
+#include <mach/msm_bus_board.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_dcvs.h>
+#include <mach/msm_dcvs_scm.h>
#include "kgsl.h"
#include "kgsl_pwrscale.h"
@@ -178,7 +184,9 @@
{ ADRENO_REV_A320, 3, 2, 0, ANY_ID,
"a300_pm4.fw", "a300_pfp.fw", &adreno_a3xx_gpudev,
512, 0, 2, SZ_512K },
-
+ { ADRENO_REV_A330, 3, 3, 0, 0,
+ "a330_pm4.fw", "a330_pfp.fw", &adreno_a3xx_gpudev,
+ 512, 0, 2, SZ_1M },
};
static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
@@ -647,12 +655,520 @@
adreno_dev->gmem_size = adreno_gpulist[i].gmem_size;
}
+static struct platform_device_id adreno_id_table[] = {
+ { DEVICE_3D0_NAME, (kernel_ulong_t)&device_3d0.dev, },
+ {},
+};
+
+MODULE_DEVICE_TABLE(platform, adreno_id_table);
+
+static struct of_device_id adreno_match_table[] = {
+ { .compatible = "qcom,kgsl-3d0", },
+ {}
+};
+
+static inline int adreno_of_read_property(struct device_node *node,
+ const char *prop, unsigned int *ptr)
+{
+ int ret = of_property_read_u32(node, prop, ptr);
+ if (ret)
+ KGSL_CORE_ERR("Unable to read '%s'\n", prop);
+ return ret;
+}
+
+static struct device_node *adreno_of_find_subnode(struct device_node *parent,
+ const char *name)
+{
+ struct device_node *child;
+
+ for_each_child_of_node(parent, child) {
+ if (of_device_is_compatible(child, name))
+ return child;
+ }
+
+ return NULL;
+}
+
+static int adreno_of_get_pwrlevels(struct device_node *parent,
+ struct kgsl_device_platform_data *pdata)
+{
+ struct device_node *node, *child;
+ int ret = -EINVAL;
+
+ node = adreno_of_find_subnode(parent, "qcom,gpu-pwrlevels");
+
+ if (node == NULL) {
+ KGSL_CORE_ERR("Unable to find 'qcom,gpu-pwrlevels'\n");
+ return -EINVAL;
+ }
+
+ pdata->num_levels = 0;
+
+ for_each_child_of_node(node, child) {
+ unsigned int index;
+ struct kgsl_pwrlevel *level;
+
+ if (adreno_of_read_property(child, "reg", &index))
+ goto done;
+
+ if (index >= KGSL_MAX_PWRLEVELS) {
+ KGSL_CORE_ERR("Pwrlevel index %d is out of range\n",
+ index);
+ continue;
+ }
+
+ if (index >= pdata->num_levels)
+ pdata->num_levels = index + 1;
+
+ level = &pdata->pwrlevel[index];
+
+ if (adreno_of_read_property(child, "qcom,gpu-freq",
+ &level->gpu_freq))
+ goto done;
+
+ if (adreno_of_read_property(child, "qcom,bus-freq",
+ &level->bus_freq))
+ goto done;
+
+ if (adreno_of_read_property(child, "qcom,io-fraction",
+ &level->io_fraction))
+ level->io_fraction = 0;
+ }
+
+ if (adreno_of_read_property(parent, "qcom,initial-pwrlevel",
+ &pdata->init_level))
+ pdata->init_level = 1;
+
+ if (pdata->init_level < 0 || pdata->init_level > pdata->num_levels) {
+ KGSL_CORE_ERR("Initial power level out of range\n");
+ pdata->init_level = 1;
+ }
+
+ ret = 0;
+done:
+ return ret;
+
+}
+static void adreno_of_free_bus_scale_info(struct msm_bus_scale_pdata *pdata)
+{
+ int i;
+
+ if (pdata == NULL)
+ return;
+
+ for (i = 0; pdata->usecase && i < pdata->num_usecases; i++)
+ kfree(pdata->usecase[i].vectors);
+
+ kfree(pdata->usecase);
+ kfree(pdata);
+}
+
+struct msm_bus_scale_pdata *adreno_of_get_bus_scale(struct device_node *node)
+{
+ static int bus_vectors_src[3] = {MSM_BUS_MASTER_GRAPHICS_3D,
+ MSM_BUS_MASTER_GRAPHICS_3D_PORT1, MSM_BUS_MASTER_V_OCMEM_GFX3D};
+ static int bus_vectors_dst[2] = {MSM_BUS_SLAVE_EBI_CH0,
+ MSM_BUS_SLAVE_OCMEM};
+ const unsigned int *vectors;
+ struct msm_bus_scale_pdata *pdata;
+ int i, j, len, num_paths;
+ int ret = -EINVAL;
+
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+
+ if (!pdata) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*pdata));
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (adreno_of_read_property(node, "qcom,grp3d-num-bus-scale-usecases",
+ &pdata->num_usecases)) {
+ pdata->num_usecases = 0;
+ goto err;
+ }
+
+ pdata->usecase = kzalloc(pdata->num_usecases *
+ sizeof(struct msm_bus_paths), GFP_KERNEL);
+
+ if (pdata->usecase == NULL) {
+ KGSL_CORE_ERR("kzalloc (%d) failed\n",
+ pdata->num_usecases * sizeof(struct msm_bus_paths));
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (adreno_of_read_property(node, "qcom,grp3d-num-vectors-per-usecase",
+ &num_paths))
+ goto err;
+
+ vectors = of_get_property(node, "qcom,grp3d-vectors", &len);
+
+ if (len != pdata->num_usecases * num_paths *
+ sizeof(struct msm_bus_vectors)) {
+ KGSL_CORE_ERR("Invalid size for the bus scale vectors\n");
+ goto err;
+ }
+
+ for (i = 0; i < pdata->num_usecases; i++) {
+ pdata->usecase[i].num_paths = num_paths;
+ pdata->usecase[i].vectors = kzalloc(num_paths *
+ sizeof(struct msm_bus_vectors),
+ GFP_KERNEL);
+ if (!pdata->usecase[i].vectors) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n",
+ num_paths * sizeof(struct msm_bus_vectors));
+ ret = -ENOMEM;
+ goto err;
+ }
+ for (j = 0; j < num_paths; j++) {
+ int index = (i * num_paths + j) * 4;
+ pdata->usecase[i].vectors[j].src =
+ bus_vectors_src[be32_to_cpu(vectors[index])];
+ pdata->usecase[i].vectors[j].dst =
+ bus_vectors_dst[
+ be32_to_cpu(vectors[index + 1])];
+ pdata->usecase[i].vectors[j].ab =
+ be32_to_cpu(vectors[index + 2]);
+ pdata->usecase[i].vectors[j].ib =
+ KGSL_CONVERT_TO_MBPS(
+ be32_to_cpu(vectors[index + 3]));
+ }
+ }
+
+ pdata->name = "grp3d";
+
+ return pdata;
+
+err:
+ adreno_of_free_bus_scale_info(pdata);
+
+ return ERR_PTR(ret);
+}
+
+static struct msm_dcvs_core_info *adreno_of_get_dcvs(struct device_node *parent)
+{
+ struct device_node *node, *child;
+ struct msm_dcvs_core_info *info = NULL;
+ int count = 0;
+ int ret = -EINVAL;
+
+ node = adreno_of_find_subnode(parent, "qcom,dcvs-core-info");
+ if (node == NULL)
+ return ERR_PTR(-EINVAL);
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+
+ if (info == NULL) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*info));
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ for_each_child_of_node(node, child)
+ count++;
+
+ info->core_param.num_freq = count;
+
+ info->freq_tbl = kzalloc(info->core_param.num_freq *
+ sizeof(struct msm_dcvs_freq_entry),
+ GFP_KERNEL);
+
+ if (info->freq_tbl == NULL) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n",
+ info->core_param.num_freq *
+ sizeof(struct msm_dcvs_freq_entry));
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ for_each_child_of_node(node, child) {
+ unsigned int index;
+
+ if (adreno_of_read_property(child, "reg", &index))
+ goto err;
+
+ if (index >= info->core_param.num_freq) {
+ KGSL_CORE_ERR("DCVS freq entry %d is out of range\n",
+ index);
+ continue;
+ }
+
+ if (adreno_of_read_property(child, "qcom,freq",
+ &info->freq_tbl[index].freq))
+ goto err;
+
+ if (adreno_of_read_property(child, "qcom,idle-energy",
+ &info->freq_tbl[index].idle_energy))
+ info->freq_tbl[index].idle_energy = 0;
+
+ if (adreno_of_read_property(child, "qcom,active-energy",
+ &info->freq_tbl[index].active_energy))
+ info->freq_tbl[index].active_energy = 0;
+ }
+
+ if (adreno_of_read_property(node, "qcom,core-max-time-us",
+ &info->core_param.max_time_us))
+ goto err;
+
+ if (adreno_of_read_property(node, "qcom,algo-slack-time-us",
+ &info->algo_param.slack_time_us))
+ goto err;
+
+ if (adreno_of_read_property(node, "qcom,algo-disable-pc-threshold",
+ &info->algo_param.disable_pc_threshold))
+ goto err;
+
+ if (adreno_of_read_property(node, "qcom,algo-ss-window-size",
+ &info->algo_param.ss_window_size))
+ goto err;
+
+ if (adreno_of_read_property(node, "qcom,algo-ss-util-pct",
+ &info->algo_param.ss_util_pct))
+ goto err;
+
+ if (adreno_of_read_property(node, "qcom,algo-em-max-util-pct",
+ &info->algo_param.em_max_util_pct))
+ goto err;
+
+ if (adreno_of_read_property(node, "qcom,algo-ss-iobusy-conv",
+ &info->algo_param.ss_iobusy_conv))
+ goto err;
+
+ return info;
+
+err:
+ if (info)
+ kfree(info->freq_tbl);
+
+ kfree(info);
+
+ return ERR_PTR(ret);
+}
+
+static int adreno_of_get_iommu(struct device_node *parent,
+ struct kgsl_device_platform_data *pdata)
+{
+ struct device_node *node, *child;
+ struct kgsl_device_iommu_data *data = NULL;
+ struct kgsl_iommu_ctx *ctxs = NULL;
+ u32 reg_val[2];
+ int ctx_index = 0;
+
+ node = of_parse_phandle(parent, "iommu", 0);
+ if (node == NULL)
+ return -EINVAL;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (data == NULL) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*data));
+ goto err;
+ }
+
+ if (of_property_read_u32_array(node, "reg", reg_val, 2))
+ goto err;
+
+ data->physstart = reg_val[0];
+ data->physend = data->physstart + reg_val[1] - 1;
+
+ data->iommu_ctx_count = 0;
+
+ for_each_child_of_node(node, child)
+ data->iommu_ctx_count++;
+
+ ctxs = kzalloc(data->iommu_ctx_count * sizeof(struct kgsl_iommu_ctx),
+ GFP_KERNEL);
+
+ if (ctxs == NULL) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n",
+ data->iommu_ctx_count * sizeof(struct kgsl_iommu_ctx));
+ goto err;
+ }
+
+ for_each_child_of_node(node, child) {
+ int ret = of_property_read_string(child, "label",
+ &ctxs[ctx_index].iommu_ctx_name);
+
+ if (ret) {
+ KGSL_CORE_ERR("Unable to read KGSL IOMMU 'label'\n");
+ goto err;
+ }
+
+ if (adreno_of_read_property(child, "qcom,iommu-ctx-sids",
+ &ctxs[ctx_index].ctx_id))
+ goto err;
+
+ ctx_index++;
+ }
+
+ data->iommu_ctxs = ctxs;
+
+ pdata->iommu_data = data;
+ pdata->iommu_count = 1;
+
+ return 0;
+
+err:
+ kfree(ctxs);
+ kfree(data);
+
+ return -EINVAL;
+}
+
+static int adreno_of_get_pdata(struct platform_device *pdev)
+{
+ struct kgsl_device_platform_data *pdata = NULL;
+ struct kgsl_device *device;
+ int ret = -EINVAL;
+
+ pdev->id_entry = adreno_id_table;
+
+ pdata = pdev->dev.platform_data;
+ if (pdata)
+ return 0;
+
+ if (of_property_read_string(pdev->dev.of_node, "label", &pdev->name)) {
+ KGSL_CORE_ERR("Unable to read 'label'\n");
+ goto err;
+ }
+
+ if (adreno_of_read_property(pdev->dev.of_node, "qcom,id", &pdev->id))
+ goto err;
+
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (pdata == NULL) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*pdata));
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (adreno_of_read_property(pdev->dev.of_node, "qcom,chipid",
+ &pdata->chipid))
+ goto err;
+
+ /* pwrlevel Data */
+ ret = adreno_of_get_pwrlevels(pdev->dev.of_node, pdata);
+ if (ret)
+ goto err;
+
+ /* Default value is 83, if not found in DT */
+ if (adreno_of_read_property(pdev->dev.of_node, "qcom,idle-timeout",
+ &pdata->idle_timeout))
+ pdata->idle_timeout = 83;
+
+ if (adreno_of_read_property(pdev->dev.of_node, "qcom,nap-allowed",
+ &pdata->nap_allowed))
+ pdata->nap_allowed = 1;
+
+ if (adreno_of_read_property(pdev->dev.of_node, "qcom,clk-map",
+ &pdata->clk_map))
+ goto err;
+
+ device = (struct kgsl_device *)pdev->id_entry->driver_data;
+
+ if (device->id != KGSL_DEVICE_3D0)
+ goto err;
+
+ /* Bus Scale Data */
+
+ pdata->bus_scale_table = adreno_of_get_bus_scale(pdev->dev.of_node);
+ if (IS_ERR_OR_NULL(pdata->bus_scale_table)) {
+ ret = PTR_ERR(pdata->bus_scale_table);
+ goto err;
+ }
+
+ pdata->core_info = adreno_of_get_dcvs(pdev->dev.of_node);
+ if (IS_ERR_OR_NULL(pdata->core_info)) {
+ ret = PTR_ERR(pdata->core_info);
+ goto err;
+ }
+
+ ret = adreno_of_get_iommu(pdev->dev.of_node, pdata);
+ if (ret)
+ goto err;
+
+ pdev->dev.platform_data = pdata;
+ return 0;
+
+err:
+ if (pdata) {
+ adreno_of_free_bus_scale_info(pdata->bus_scale_table);
+ if (pdata->core_info)
+ kfree(pdata->core_info->freq_tbl);
+ kfree(pdata->core_info);
+
+ if (pdata->iommu_data)
+ kfree(pdata->iommu_data->iommu_ctxs);
+
+ kfree(pdata->iommu_data);
+ }
+
+ kfree(pdata);
+
+ return ret;
+}
+
+#ifdef CONFIG_MSM_OCMEM
+static int
+adreno_ocmem_gmem_malloc(struct adreno_device *adreno_dev)
+{
+ if (adreno_dev->gpurev != ADRENO_REV_A330)
+ return 0;
+
+ /* OCMEM is only needed once, do not support consective allocation */
+ if (adreno_dev->ocmem_hdl != NULL)
+ return 0;
+
+ adreno_dev->ocmem_hdl =
+ ocmem_allocate(OCMEM_GRAPHICS, adreno_dev->gmem_size);
+ if (adreno_dev->ocmem_hdl == NULL)
+ return -ENOMEM;
+
+ adreno_dev->gmem_size = adreno_dev->ocmem_hdl->len;
+ adreno_dev->gmem_base = adreno_dev->ocmem_hdl->addr;
+
+ return 0;
+}
+
+static void
+adreno_ocmem_gmem_free(struct adreno_device *adreno_dev)
+{
+ if (adreno_dev->gpurev != ADRENO_REV_A330)
+ return;
+
+ if (adreno_dev->ocmem_hdl == NULL)
+ return;
+
+ ocmem_free(OCMEM_GRAPHICS, adreno_dev->ocmem_hdl);
+ adreno_dev->ocmem_hdl = NULL;
+}
+#else
+static int
+adreno_ocmem_gmem_malloc(struct adreno_device *adreno_dev)
+{
+ return 0;
+}
+
+static void
+adreno_ocmem_gmem_free(struct adreno_device *adreno_dev)
+{
+}
+#endif
+
static int __devinit
adreno_probe(struct platform_device *pdev)
{
struct kgsl_device *device;
struct adreno_device *adreno_dev;
int status = -EINVAL;
+ bool is_dt;
+
+ is_dt = of_match_device(adreno_match_table, &pdev->dev);
+
+ if (is_dt && pdev->dev.of_node) {
+ status = adreno_of_get_pdata(pdev);
+ if (status)
+ goto error_return;
+ }
device = (struct kgsl_device *)pdev->id_entry->driver_data;
adreno_dev = ADRENO_DEVICE(device);
@@ -678,6 +1194,7 @@
adreno_ringbuffer_close(&adreno_dev->ringbuffer);
error:
device->parentdev = NULL;
+error_return:
return status;
}
@@ -740,6 +1257,12 @@
if (status)
goto error_clk_off;
+ status = adreno_ocmem_gmem_malloc(adreno_dev);
+ if (status) {
+ KGSL_DRV_ERR(device, "OCMEM malloc failed\n");
+ goto error_mmu_off;
+ }
+
/* Start the GPU */
adreno_dev->gpudev->start(adreno_dev);
@@ -756,7 +1279,10 @@
}
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+
+error_mmu_off:
kgsl_mmu_stop(&device->mmu);
+
error_clk_off:
kgsl_pwrctrl_disable(device);
@@ -777,6 +1303,8 @@
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
del_timer_sync(&device->idle_timer);
+ adreno_ocmem_gmem_free(adreno_dev);
+
/* Power down the device */
kgsl_pwrctrl_disable(device);
@@ -1590,9 +2118,8 @@
cmds[1] = 0;
if (adreno_dev->drawctxt_active)
- adreno_ringbuffer_issuecmds(device,
- adreno_dev->drawctxt_active,
- KGSL_CMD_FLAGS_NONE, &cmds[0], 2);
+ adreno_ringbuffer_issuecmds_intr(device,
+ context, &cmds[0], 2);
else
/* We would never call this function if there
* was no active contexts running */
@@ -1927,12 +2454,6 @@
.setproperty = adreno_setproperty,
};
-static struct platform_device_id adreno_id_table[] = {
- { DEVICE_3D0_NAME, (kernel_ulong_t)&device_3d0.dev, },
- { },
-};
-MODULE_DEVICE_TABLE(platform, adreno_id_table);
-
static struct platform_driver adreno_platform_driver = {
.probe = adreno_probe,
.remove = __devexit_p(adreno_remove),
@@ -1943,6 +2464,7 @@
.owner = THIS_MODULE,
.name = DEVICE_3D_NAME,
.pm = &kgsl_pm_ops,
+ .of_match_table = adreno_match_table,
}
};
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 57f4859..279e7ed 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -17,6 +17,7 @@
#include "adreno_drawctxt.h"
#include "adreno_ringbuffer.h"
#include "kgsl_iommu.h"
+#include <mach/ocmem.h>
#define DEVICE_3D_NAME "kgsl-3d"
#define DEVICE_3D0_NAME "kgsl-3d0"
@@ -32,7 +33,7 @@
/* Flags to control command packet settings */
#define KGSL_CMD_FLAGS_NONE 0x00000000
#define KGSL_CMD_FLAGS_PMODE 0x00000001
-#define KGSL_CMD_FLAGS_NO_TS_CMP 0x00000002
+#define KGSL_CMD_FLAGS_DUMMY_INTR_CMD 0x00000002
/* Command identifiers */
#define KGSL_CONTEXT_TO_MEM_IDENTIFIER 0x2EADBEEF
@@ -61,6 +62,7 @@
ADRENO_REV_A225 = 225,
ADRENO_REV_A305 = 305,
ADRENO_REV_A320 = 320,
+ ADRENO_REV_A330 = 330,
};
struct adreno_gpudev;
@@ -87,6 +89,7 @@
unsigned int instruction_size;
unsigned int ib_check_level;
unsigned int fast_hang_detect;
+ struct ocmem_buf *ocmem_hdl;
};
struct adreno_gpudev {
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index bb89067..2dbfd8f 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -2701,24 +2701,46 @@
struct kgsl_device *device = &adreno_dev->dev;
/* Set up 16 deep read/write request queues */
+ if (adreno_dev->gpurev == ADRENO_REV_A330) {
+ adreno_regwrite(device, A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
+ adreno_regwrite(device, A3XX_VBIF_IN_RD_LIM_CONF1, 0x00001818);
+ adreno_regwrite(device, A3XX_VBIF_OUT_RD_LIM_CONF0, 0x00001818);
+ adreno_regwrite(device, A3XX_VBIF_OUT_WR_LIM_CONF0, 0x00001818);
+ adreno_regwrite(device, A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
+ adreno_regwrite(device, A3XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
+ adreno_regwrite(device, A3XX_VBIF_IN_WR_LIM_CONF1, 0x00001818);
+ /* Enable WR-REQ */
+ adreno_regwrite(device, A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000FF);
- adreno_regwrite(device, A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
- adreno_regwrite(device, A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
- adreno_regwrite(device, A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
- adreno_regwrite(device, A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
- adreno_regwrite(device, A3XX_VBIF_DDR_OUT_MAX_BURST, 0x00000303);
- adreno_regwrite(device, A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
- adreno_regwrite(device, A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
+ /* Set up round robin arbitration between both AXI ports */
+ adreno_regwrite(device, A3XX_VBIF_ARB_CTL, 0x00000030);
+ /* Set up VBIF_ROUND_ROBIN_QOS_ARB */
+ adreno_regwrite(device, A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001);
- /* Enable WR-REQ */
- adreno_regwrite(device, A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x000000FF);
+ /* Set up AOOO */
+ adreno_regwrite(device, A3XX_VBIF_OUT_AXI_AOOO_EN, 0x00000FFF);
+ adreno_regwrite(device, A3XX_VBIF_OUT_AXI_AOOO, 0x0FFF0FFF);
- /* Set up round robin arbitration between both AXI ports */
- adreno_regwrite(device, A3XX_VBIF_ARB_CTL, 0x00000030);
+ /* VBIF AXI AMEMTYPE CONFIG */
+ adreno_regwrite(device, A3XX_VBIF_OUT_AXI_AMEMTYPE_CONF0,
+ 0x22222222);
+ } else {
+ adreno_regwrite(device, A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
+ adreno_regwrite(device, A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
+ adreno_regwrite(device, A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
+ adreno_regwrite(device, A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
+ adreno_regwrite(device, A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
+ adreno_regwrite(device, A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
+ adreno_regwrite(device, A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
+ /* Enable WR-REQ */
+ adreno_regwrite(device, A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000FF);
- /* Set up AOOO */
- adreno_regwrite(device, A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003C);
- adreno_regwrite(device, A3XX_VBIF_OUT_AXI_AOOO, 0x003C003C);
+ /* Set up round robin arbitration between both AXI ports */
+ adreno_regwrite(device, A3XX_VBIF_ARB_CTL, 0x00000030);
+ /* Set up AOOO */
+ adreno_regwrite(device, A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003C);
+ adreno_regwrite(device, A3XX_VBIF_OUT_AXI_AOOO, 0x003C003C);
+ }
if (cpu_is_apq8064()) {
/* Enable 1K sort */
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index 098c4f5..6c74dfa 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -147,6 +147,7 @@
{
struct adreno_context *drawctxt;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
int ret;
drawctxt = kzalloc(sizeof(struct adreno_context), GFP_KERNEL);
@@ -157,6 +158,7 @@
drawctxt->pagetable = pagetable;
drawctxt->bin_base_offset = 0;
drawctxt->id = context->id;
+ rb->timestamp[context->id] = 0;
if (flags & KGSL_CONTEXT_PREAMBLE)
drawctxt->flags |= CTXT_FLAGS_PREAMBLE;
@@ -174,6 +176,12 @@
kgsl_sharedmem_writel(&device->memstore,
KGSL_MEMSTORE_OFFSET(drawctxt->id, ref_wait_ts),
KGSL_INIT_REFTIMESTAMP);
+ kgsl_sharedmem_writel(&device->memstore,
+ KGSL_MEMSTORE_OFFSET(drawctxt->id, ts_cmp_enable), 0);
+ kgsl_sharedmem_writel(&device->memstore,
+ KGSL_MEMSTORE_OFFSET(drawctxt->id, soptimestamp), 0);
+ kgsl_sharedmem_writel(&device->memstore,
+ KGSL_MEMSTORE_OFFSET(drawctxt->id, eoptimestamp), 0);
context->devctxt = drawctxt;
return 0;
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 86a349a..49786ba 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -404,11 +404,8 @@
void adreno_ringbuffer_stop(struct adreno_ringbuffer *rb)
{
- if (rb->flags & KGSL_FLAGS_STARTED) {
- /* ME_HALT */
- adreno_regwrite(rb->device, REG_CP_ME_CNTL, 0x10000000);
+ if (rb->flags & KGSL_FLAGS_STARTED)
rb->flags &= ~KGSL_FLAGS_STARTED;
- }
}
int adreno_ringbuffer_init(struct kgsl_device *device)
@@ -494,9 +491,9 @@
* error checking if needed
*/
total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
- total_sizedwords += !(flags & KGSL_CMD_FLAGS_NO_TS_CMP) ? 7 : 0;
/* 2 dwords to store the start of command sequence */
total_sizedwords += 2;
+ total_sizedwords += context ? 7 : 0;
if (adreno_is_a3xx(adreno_dev))
total_sizedwords += 7;
@@ -548,9 +545,10 @@
/* always increment the global timestamp. once. */
rb->timestamp[KGSL_MEMSTORE_GLOBAL]++;
- if (context) {
+
+ if (context && !(flags & KGSL_CMD_FLAGS_DUMMY_INTR_CMD)) {
if (context_id == KGSL_MEMSTORE_GLOBAL)
- rb->timestamp[context_id] =
+ rb->timestamp[context->id] =
rb->timestamp[KGSL_MEMSTORE_GLOBAL];
else
rb->timestamp[context_id]++;
@@ -580,7 +578,7 @@
GSL_RB_WRITE(ringcmds, rcmd_gpu,
cp_type3_packet(CP_MEM_WRITE, 2));
GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
- KGSL_MEMSTORE_OFFSET(context->id, soptimestamp)));
+ KGSL_MEMSTORE_OFFSET(context_id, soptimestamp)));
GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
/* end-of-pipeline timestamp */
@@ -588,14 +586,14 @@
cp_type3_packet(CP_EVENT_WRITE, 3));
GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
- KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp)));
+ KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp)));
GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
GSL_RB_WRITE(ringcmds, rcmd_gpu,
cp_type3_packet(CP_MEM_WRITE, 2));
GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- eoptimestamp)));
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ eoptimestamp)));
GSL_RB_WRITE(ringcmds, rcmd_gpu,
rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
} else {
@@ -603,13 +601,11 @@
cp_type3_packet(CP_EVENT_WRITE, 3));
GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- eoptimestamp)));
- GSL_RB_WRITE(ringcmds, rcmd_gpu,
- rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
+ KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp)));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp[context_id]);
}
- if (!(flags & KGSL_CMD_FLAGS_NO_TS_CMP)) {
+ if (context) {
/* Conditional execution based on memory values */
GSL_RB_WRITE(ringcmds, rcmd_gpu,
cp_type3_packet(CP_COND_EXEC, 4));
@@ -641,6 +637,30 @@
return timestamp;
}
+void
+adreno_ringbuffer_issuecmds_intr(struct kgsl_device *device,
+ struct kgsl_context *k_ctxt,
+ unsigned int *cmds,
+ int sizedwords)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+ struct adreno_context *a_ctxt = NULL;
+
+ if (!k_ctxt)
+ return;
+
+ a_ctxt = k_ctxt->devctxt;
+
+ if (k_ctxt->id == KGSL_CONTEXT_INVALID ||
+ a_ctxt == NULL ||
+ device->state & KGSL_STATE_HUNG)
+ return;
+
+ adreno_ringbuffer_addcmds(rb, a_ctxt, KGSL_CMD_FLAGS_DUMMY_INTR_CMD,
+ cmds, sizedwords);
+}
+
unsigned int
adreno_ringbuffer_issuecmds(struct kgsl_device *device,
struct adreno_context *drawctxt,
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index 4cc57c2..6c3d9b1 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -110,6 +110,11 @@
unsigned int *cmdaddr,
int sizedwords);
+void adreno_ringbuffer_issuecmds_intr(struct kgsl_device *device,
+ struct kgsl_context *k_ctxt,
+ unsigned int *cmdaddr,
+ int sizedwords);
+
void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb);
void kgsl_cp_intrcallback(struct kgsl_device *device);
diff --git a/drivers/gpu/msm/kgsl_gpummu.c b/drivers/gpu/msm/kgsl_gpummu.c
index edccff1..d8472f2 100644
--- a/drivers/gpu/msm/kgsl_gpummu.c
+++ b/drivers/gpu/msm/kgsl_gpummu.c
@@ -686,7 +686,6 @@
static void kgsl_gpummu_stop(struct kgsl_mmu *mmu)
{
- kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000000);
mmu->flags &= ~KGSL_FLAGS_STARTED;
}
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 016771b..e858651 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -880,7 +880,6 @@
*/
if (mmu->flags & KGSL_FLAGS_STARTED) {
- kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000000);
/* detach iommu attachment */
kgsl_detach_pagetable_iommu_domain(mmu);
mmu->hwpagetable = NULL;
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index bfe6957..6d4d4d3 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -439,8 +439,8 @@
if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
&pwr->power_flags)) {
trace_kgsl_rail(device, state);
- if (pwr->gpu_dig)
- regulator_disable(pwr->gpu_dig);
+ if (pwr->gpu_cx)
+ regulator_disable(pwr->gpu_cx);
if (pwr->gpu_reg)
regulator_disable(pwr->gpu_reg);
}
@@ -456,8 +456,8 @@
"failed: %d\n",
status);
}
- if (pwr->gpu_dig) {
- int status = regulator_enable(pwr->gpu_dig);
+ if (pwr->gpu_cx) {
+ int status = regulator_enable(pwr->gpu_cx);
if (status)
KGSL_DRV_ERR(device,
"cx regulator_enable "
@@ -547,11 +547,11 @@
pwr->gpu_reg = NULL;
if (pwr->gpu_reg) {
- pwr->gpu_dig = regulator_get(&pdev->dev, "vdd_dig");
- if (IS_ERR(pwr->gpu_dig))
- pwr->gpu_dig = NULL;
+ pwr->gpu_cx = regulator_get(&pdev->dev, "vddcx");
+ if (IS_ERR(pwr->gpu_cx))
+ pwr->gpu_cx = NULL;
} else
- pwr->gpu_dig = NULL;
+ pwr->gpu_cx = NULL;
pwr->power_flags = 0;
@@ -615,9 +615,9 @@
pwr->gpu_reg = NULL;
}
- if (pwr->gpu_dig) {
- regulator_put(pwr->gpu_dig);
- pwr->gpu_dig = NULL;
+ if (pwr->gpu_cx) {
+ regulator_put(pwr->gpu_cx);
+ pwr->gpu_cx = NULL;
}
for (i = 1; i < KGSL_MAX_CLKS; i++)
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index 954c818..cd44152 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -50,7 +50,7 @@
unsigned int interval_timeout;
bool strtstp_sleepwake;
struct regulator *gpu_reg;
- struct regulator *gpu_dig;
+ struct regulator *gpu_cx;
uint32_t pcl;
unsigned int nap_allowed;
unsigned int idle_needed;
diff --git a/drivers/gpu/msm/kgsl_pwrscale_msm.c b/drivers/gpu/msm/kgsl_pwrscale_msm.c
index baa0407..c6f8b1b 100644
--- a/drivers/gpu/msm/kgsl_pwrscale_msm.c
+++ b/drivers/gpu/msm/kgsl_pwrscale_msm.c
@@ -17,6 +17,7 @@
#include "kgsl_pwrscale.h"
#include "kgsl_device.h"
#include "a2xx_reg.h"
+#include "kgsl_trace.h"
struct msm_priv {
struct kgsl_device *device;
@@ -92,6 +93,7 @@
struct msm_priv *priv = pwrscale->priv;
if (priv->enabled && !priv->gpu_busy) {
msm_dcvs_idle(priv->handle, MSM_DCVS_IDLE_EXIT, 0);
+ trace_kgsl_mpdcvs(device, 1);
priv->gpu_busy = 1;
}
return;
@@ -105,6 +107,7 @@
if (priv->enabled && priv->gpu_busy)
if (device->ftbl->isidle(device)) {
msm_dcvs_idle(priv->handle, MSM_DCVS_IDLE_ENTER, 0);
+ trace_kgsl_mpdcvs(device, 0);
priv->gpu_busy = 0;
}
return;
@@ -117,6 +120,7 @@
if (priv->enabled && priv->gpu_busy) {
msm_dcvs_idle(priv->handle, MSM_DCVS_IDLE_ENTER, 0);
+ trace_kgsl_mpdcvs(device, 0);
priv->gpu_busy = 0;
}
diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h
index 3eff40f..81ab3fb 100644
--- a/drivers/gpu/msm/kgsl_trace.h
+++ b/drivers/gpu/msm/kgsl_trace.h
@@ -251,6 +251,29 @@
)
);
+TRACE_EVENT(kgsl_mpdcvs,
+
+ TP_PROTO(struct kgsl_device *device, unsigned int state),
+
+ TP_ARGS(device, state),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, state)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->state = state;
+ ),
+
+ TP_printk(
+ "d_name=%s %s",
+ __get_str(device_name),
+ __entry->state ? "BUSY" : "IDLE"
+ )
+);
+
DECLARE_EVENT_CLASS(kgsl_pwrstate_template,
TP_PROTO(struct kgsl_device *device, unsigned int state),
diff --git a/drivers/media/video/msm/msm.c b/drivers/media/video/msm/msm.c
index b14d4f6..e5c1091 100644
--- a/drivers/media/video/msm/msm.c
+++ b/drivers/media/video/msm/msm.c
@@ -189,12 +189,25 @@
{
int rc = 0, i, j;
struct msm_cam_v4l2_dev_inst *pcam_inst;
+ struct msm_cam_media_controller *pmctl;
+ struct msm_cam_v4l2_device *pcam = video_drvdata(f);
pcam_inst = container_of(f->private_data,
struct msm_cam_v4l2_dev_inst, eventHandle);
D("%s\n", __func__);
WARN_ON(pctx != f->private_data);
mutex_lock(&pcam_inst->inst_lock);
+ if (!pcam_inst->vbqueue_initialized && pb->count) {
+ pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
+ if (pmctl == NULL) {
+ pr_err("%s Invalid mctl ptr", __func__);
+ return -EINVAL;
+ }
+ pmctl->mctl_vbqueue_init(pcam_inst, &pcam_inst->vid_bufq,
+ pb->type);
+ pcam_inst->vbqueue_initialized = 1;
+ }
+
rc = vb2_reqbufs(&pcam_inst->vid_bufq, pb);
if (rc < 0) {
pr_err("%s reqbufs failed %d ", __func__, rc);
@@ -564,7 +577,6 @@
int rc;
/* get the video device */
struct msm_cam_v4l2_device *pcam = video_drvdata(f);
- struct msm_cam_media_controller *pmctl;
struct msm_cam_v4l2_dev_inst *pcam_inst;
pcam_inst = container_of(f->private_data,
struct msm_cam_v4l2_dev_inst, eventHandle);
@@ -575,16 +587,6 @@
(void *)pfmt->fmt.pix.priv);
WARN_ON(pctx != f->private_data);
- pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
- if (pmctl == NULL)
- return -EINVAL;
-
- if (!pcam_inst->vbqueue_initialized) {
- pmctl->mctl_vbqueue_init(pcam_inst, &pcam_inst->vid_bufq,
- V4L2_BUF_TYPE_VIDEO_CAPTURE);
- pcam_inst->vbqueue_initialized = 1;
- }
-
mutex_lock(&pcam->vid_lock);
rc = msm_server_set_fmt(pcam, pcam_inst->my_index, pfmt);
@@ -602,7 +604,6 @@
{
int rc;
struct msm_cam_v4l2_device *pcam = video_drvdata(f);
- struct msm_cam_media_controller *pmctl;
struct msm_cam_v4l2_dev_inst *pcam_inst;
pcam_inst = container_of(f->private_data,
struct msm_cam_v4l2_dev_inst, eventHandle);
@@ -610,16 +611,6 @@
D("%s Inst %p\n", __func__, pcam_inst);
WARN_ON(pctx != f->private_data);
- pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
- if (pmctl == NULL)
- return -EINVAL;
-
- if (!pcam_inst->vbqueue_initialized) {
- pmctl->mctl_vbqueue_init(pcam_inst, &pcam_inst->vid_bufq,
- V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
- pcam_inst->vbqueue_initialized = 1;
- }
-
mutex_lock(&pcam->vid_lock);
rc = msm_server_set_fmt_mplane(pcam, pcam_inst->my_index, pfmt);
mutex_unlock(&pcam->vid_lock);
diff --git a/drivers/media/video/msm/msm_mctl.c b/drivers/media/video/msm/msm_mctl.c
index a87b074..0bb9a5c 100644
--- a/drivers/media/video/msm/msm_mctl.c
+++ b/drivers/media/video/msm/msm_mctl.c
@@ -130,6 +130,14 @@
.pxlcode = V4L2_MBUS_FMT_SGRBG10_1X10, /* Bayer sensor */
.colorspace = V4L2_COLORSPACE_JPEG,
},
+ {
+ .name = "YUYV",
+ .depth = 16,
+ .bitsperpxl = 16,
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .pxlcode = V4L2_MBUS_FMT_YUYV8_2X8, /* YUV sensor */
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ },
};
@@ -988,12 +996,24 @@
{
int rc = 0, i, j;
struct msm_cam_v4l2_dev_inst *pcam_inst;
+ struct msm_cam_media_controller *pmctl;
+ struct msm_cam_v4l2_device *pcam = video_drvdata(f);
pcam_inst = container_of(f->private_data,
struct msm_cam_v4l2_dev_inst, eventHandle);
D("%s\n", __func__);
WARN_ON(pctx != f->private_data);
mutex_lock(&pcam_inst->inst_lock);
+ if (!pcam_inst->vbqueue_initialized && pb->count) {
+ pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
+ if (pmctl == NULL) {
+ pr_err("%s Invalid mctl ptr", __func__);
+ return -EINVAL;
+ }
+ pmctl->mctl_vbqueue_init(pcam_inst, &pcam_inst->vid_bufq,
+ pb->type);
+ pcam_inst->vbqueue_initialized = 1;
+ }
rc = vb2_reqbufs(&pcam_inst->vid_bufq, pb);
if (rc < 0) {
pr_err("%s reqbufs failed %d ", __func__, rc);
@@ -1306,30 +1326,10 @@
struct v4l2_format *pfmt)
{
int rc = 0;
- /* get the video device */
- struct msm_cam_v4l2_device *pcam = video_drvdata(f);
- struct msm_cam_media_controller *pmctl;
- struct msm_cam_v4l2_dev_inst *pcam_inst;
- pcam_inst = container_of(f->private_data,
- struct msm_cam_v4l2_dev_inst, eventHandle);
D("%s\n", __func__);
- D("%s, inst=0x%x,idx=%d,priv = 0x%p\n",
- __func__, (u32)pcam_inst, pcam_inst->my_index,
- (void *)pfmt->fmt.pix.priv);
WARN_ON(pctx != f->private_data);
- pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
- if (!pmctl) {
- pr_err("%s mctl ptr is null ", __func__);
- return -EINVAL;
- }
- if (!pcam_inst->vbqueue_initialized) {
- pmctl->mctl_vbqueue_init(pcam_inst, &pcam_inst->vid_bufq,
- V4L2_BUF_TYPE_VIDEO_CAPTURE);
- pcam_inst->vbqueue_initialized = 1;
- }
-
return rc;
}
@@ -1338,25 +1338,13 @@
{
int rc = 0, i;
struct msm_cam_v4l2_device *pcam = video_drvdata(f);
- struct msm_cam_media_controller *pmctl;
struct msm_cam_v4l2_dev_inst *pcam_inst;
pcam_inst = container_of(f->private_data,
struct msm_cam_v4l2_dev_inst, eventHandle);
- D("%s Inst %p vbqueue %d\n", __func__,
- pcam_inst, pcam_inst->vbqueue_initialized);
+ D("%s Inst %p\n", __func__, pcam_inst);
WARN_ON(pctx != f->private_data);
- pmctl = msm_cam_server_get_mctl(pcam->mctl_handle);
- if (!pmctl) {
- pr_err("%s mctl ptr is null ", __func__);
- return -EINVAL;
- }
- if (!pcam_inst->vbqueue_initialized) {
- pmctl->mctl_vbqueue_init(pcam_inst, &pcam_inst->vid_bufq,
- V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
- pcam_inst->vbqueue_initialized = 1;
- }
for (i = 0; i < pcam->num_fmts; i++)
if (pcam->usr_fmts[i].fourcc == pfmt->fmt.pix_mp.pixelformat)
break;
diff --git a/drivers/media/video/msm/vfe/Makefile b/drivers/media/video/msm/vfe/Makefile
index 8068e4f..91f0e7f 100644
--- a/drivers/media/video/msm/vfe/Makefile
+++ b/drivers/media/video/msm/vfe/Makefile
@@ -16,4 +16,5 @@
obj-$(CONFIG_ARCH_MSM_ARM11) += msm_vfe7x.o
obj-$(CONFIG_ARCH_QSD8X50) += msm_vfe8x.o msm_vfe8x_proc.o
obj-$(CONFIG_ARCH_MSM8960) += msm_vfe32.o
+obj-$(CONFIG_ARCH_MSM8974) += msm_vfe40.o msm_vfe40_axi.o
obj-$(CONFIG_MSM_CAMERA_V4L2) += msm_vfe_stats_buf.o
diff --git a/drivers/media/video/msm/vfe/msm_vfe32.c b/drivers/media/video/msm/vfe/msm_vfe32.c
index 28b88dd..3e01437 100644
--- a/drivers/media/video/msm/vfe/msm_vfe32.c
+++ b/drivers/media/video/msm/vfe/msm_vfe32.c
@@ -1133,8 +1133,6 @@
struct msm_cam_media_controller *pmctl,
struct vfe32_ctrl_type *vfe32_ctrl)
{
- msm_camio_bus_scale_cfg(
- pmctl->sdata->pdata->cam_bus_scale_table, S_VIDEO);
vfe32_ctrl->share_ctrl->recording_state = VFE_STATE_START_REQUESTED;
msm_camera_io_w_mb(1,
vfe32_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
@@ -1148,8 +1146,6 @@
vfe32_ctrl->share_ctrl->recording_state = VFE_STATE_STOP_REQUESTED;
msm_camera_io_w_mb(1,
vfe32_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
- msm_camio_bus_scale_cfg(
- pmctl->sdata->pdata->cam_bus_scale_table, S_PREVIEW);
return 0;
}
@@ -1162,8 +1158,6 @@
vfe32_ctrl->share_ctrl->vfe_capture_count =
vfe32_ctrl->share_ctrl->outpath.out0.capture_cnt;
- msm_camio_bus_scale_cfg(
- pmctl->sdata->pdata->cam_bus_scale_table, S_LIVESHOT);
vfe32_ctrl->share_ctrl->liveshot_state = VFE_STATE_START_REQUESTED;
msm_camera_io_w_mb(1, vfe32_ctrl->
share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
@@ -1176,8 +1170,6 @@
vfe32_ctrl->share_ctrl->liveshot_state = VFE_STATE_STOP_REQUESTED;
msm_camera_io_w_mb(1,
vfe32_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
- msm_camio_bus_scale_cfg(
- pmctl->sdata->pdata->cam_bus_scale_table, S_VIDEO);
}
static int vfe32_zsl(
@@ -1185,8 +1177,6 @@
struct vfe32_ctrl_type *vfe32_ctrl)
{
vfe32_start_common(vfe32_ctrl);
- msm_camio_bus_scale_cfg(
- pmctl->sdata->pdata->cam_bus_scale_table, S_ZSL);
msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase + 0x18C);
msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase + 0x188);
@@ -1199,8 +1189,6 @@
{
vfe32_ctrl->share_ctrl->outpath.out0.capture_cnt = num_frames_capture;
vfe32_ctrl->share_ctrl->vfe_capture_count = num_frames_capture;
- msm_camio_bus_scale_cfg(
- pmctl->sdata->pdata->cam_bus_scale_table, S_CAPTURE);
vfe32_start_common(vfe32_ctrl);
return 0;
}
@@ -1228,9 +1216,6 @@
vfe32_ctrl->share_ctrl->vfe_capture_count = num_frames_capture;
- msm_camio_bus_scale_cfg(
- pmctl->sdata->pdata->cam_bus_scale_table, S_CAPTURE);
-
vfe32_start_common(vfe32_ctrl);
/* for debug */
msm_camera_io_w(1, vfe32_ctrl->share_ctrl->vfebase + 0x18C);
@@ -1242,8 +1227,6 @@
struct msm_cam_media_controller *pmctl,
struct vfe32_ctrl_type *vfe32_ctrl)
{
- msm_camio_bus_scale_cfg(
- pmctl->sdata->pdata->cam_bus_scale_table, S_PREVIEW);
vfe32_start_common(vfe32_ctrl);
return 0;
}
@@ -1478,53 +1461,53 @@
b = &outch->free_buf;
return b;
}
-static int vfe32_configure_pingpong_buffers(
- int id, int path, struct vfe32_ctrl_type *vfe32_ctrl)
+static int configure_pingpong_buffers(
+ int id, int path, struct axi_ctrl_t *axi_ctrl)
{
struct vfe32_output_ch *outch = NULL;
int rc = 0;
uint32_t inst_handle = 0;
if (path == VFE_MSG_OUTPUT_PRIMARY)
- inst_handle = vfe32_ctrl->share_ctrl->outpath.out0.inst_handle;
+ inst_handle = axi_ctrl->share_ctrl->outpath.out0.inst_handle;
else if (path == VFE_MSG_OUTPUT_SECONDARY)
- inst_handle = vfe32_ctrl->share_ctrl->outpath.out1.inst_handle;
+ inst_handle = axi_ctrl->share_ctrl->outpath.out1.inst_handle;
else if (path == VFE_MSG_OUTPUT_TERTIARY1)
- inst_handle = vfe32_ctrl->share_ctrl->outpath.out2.inst_handle;
+ inst_handle = axi_ctrl->share_ctrl->outpath.out2.inst_handle;
else if (path == VFE_MSG_OUTPUT_TERTIARY2)
- inst_handle = vfe32_ctrl->share_ctrl->outpath.out3.inst_handle;
+ inst_handle = axi_ctrl->share_ctrl->outpath.out3.inst_handle;
vfe32_subdev_notify(id, path, inst_handle,
- &vfe32_ctrl->subdev, vfe32_ctrl->share_ctrl);
- outch = vfe32_get_ch(path, vfe32_ctrl->share_ctrl);
+ &axi_ctrl->subdev, axi_ctrl->share_ctrl);
+ outch = vfe32_get_ch(path, axi_ctrl->share_ctrl);
if (outch->ping.ch_paddr[0] && outch->pong.ch_paddr[0]) {
/* Configure Preview Ping Pong */
pr_info("%s Configure ping/pong address for %d",
__func__, path);
vfe32_put_ch_ping_addr(
- vfe32_ctrl->share_ctrl->vfebase, outch->ch0,
+ axi_ctrl->share_ctrl->vfebase, outch->ch0,
outch->ping.ch_paddr[0]);
vfe32_put_ch_pong_addr(
- vfe32_ctrl->share_ctrl->vfebase, outch->ch0,
+ axi_ctrl->share_ctrl->vfebase, outch->ch0,
outch->pong.ch_paddr[0]);
- if ((vfe32_ctrl->share_ctrl->current_mode !=
+ if ((axi_ctrl->share_ctrl->current_mode !=
VFE_OUTPUTS_RAW) && (path != VFE_MSG_OUTPUT_TERTIARY1)
&& (path != VFE_MSG_OUTPUT_TERTIARY2)) {
vfe32_put_ch_ping_addr(
- vfe32_ctrl->share_ctrl->vfebase, outch->ch1,
+ axi_ctrl->share_ctrl->vfebase, outch->ch1,
outch->ping.ch_paddr[1]);
vfe32_put_ch_pong_addr(
- vfe32_ctrl->share_ctrl->vfebase, outch->ch1,
+ axi_ctrl->share_ctrl->vfebase, outch->ch1,
outch->pong.ch_paddr[1]);
}
if (outch->ping.num_planes > 2)
vfe32_put_ch_ping_addr(
- vfe32_ctrl->share_ctrl->vfebase, outch->ch2,
+ axi_ctrl->share_ctrl->vfebase, outch->ch2,
outch->ping.ch_paddr[2]);
if (outch->pong.num_planes > 2)
vfe32_put_ch_pong_addr(
- vfe32_ctrl->share_ctrl->vfebase, outch->ch2,
+ axi_ctrl->share_ctrl->vfebase, outch->ch2,
outch->pong.ch_paddr[2]);
/* avoid stale info */
@@ -1579,7 +1562,6 @@
uint32_t *cmdp_local = NULL;
uint32_t snapshot_cnt = 0;
uint32_t temp1 = 0, temp2 = 0;
- uint16_t vfe_mode = 0;
struct msm_camera_vfe_params_t vfe_params;
CDBG("vfe32_proc_general: cmdID = %s, length = %d\n",
@@ -1602,42 +1584,7 @@
vfe32_ctrl->share_ctrl->current_mode =
vfe_params.operation_mode;
- vfe_mode = vfe32_ctrl->share_ctrl->current_mode
- & ~(VFE_OUTPUTS_RDI0|VFE_OUTPUTS_RDI1);
- if (vfe_mode) {
- if ((vfe32_ctrl->share_ctrl->current_mode &
- VFE_OUTPUTS_PREVIEW_AND_VIDEO) ||
- (vfe32_ctrl->share_ctrl->current_mode &
- VFE_OUTPUTS_PREVIEW))
- /* Configure primary channel */
- rc = vfe32_configure_pingpong_buffers(
- VFE_MSG_START,
- VFE_MSG_OUTPUT_PRIMARY,
- vfe32_ctrl);
- else
- /* Configure secondary channel */
- rc = vfe32_configure_pingpong_buffers(
- VFE_MSG_START,
- VFE_MSG_OUTPUT_SECONDARY,
- vfe32_ctrl);
- }
- if (vfe32_ctrl->share_ctrl->current_mode &
- VFE_OUTPUTS_RDI0)
- rc = vfe32_configure_pingpong_buffers(
- VFE_MSG_START, VFE_MSG_OUTPUT_TERTIARY1,
- vfe32_ctrl);
- if (vfe32_ctrl->share_ctrl->current_mode &
- VFE_OUTPUTS_RDI1)
- rc = vfe32_configure_pingpong_buffers(
- VFE_MSG_START, VFE_MSG_OUTPUT_TERTIARY2,
- vfe32_ctrl);
- if (rc < 0) {
- pr_err("%s error configuring pingpong buffers"
- " for preview", __func__);
- rc = -EINVAL;
- goto proc_general_done;
- }
rc = vfe32_start(pmctl, vfe32_ctrl);
break;
case VFE_CMD_UPDATE:
@@ -1655,15 +1602,6 @@
snapshot_cnt = vfe_params.capture_count;
vfe32_ctrl->share_ctrl->current_mode =
vfe_params.operation_mode;
- rc = vfe32_configure_pingpong_buffers(
- VFE_MSG_CAPTURE, VFE_MSG_OUTPUT_PRIMARY,
- vfe32_ctrl);
- if (rc < 0) {
- pr_err("%s error configuring pingpong buffers"
- " for snapshot", __func__);
- rc = -EINVAL;
- goto proc_general_done;
- }
rc = vfe32_capture_raw(pmctl, vfe32_ctrl, snapshot_cnt);
break;
case VFE_CMD_CAPTURE:
@@ -1677,78 +1615,12 @@
snapshot_cnt = vfe_params.capture_count;
vfe32_ctrl->share_ctrl->current_mode =
vfe_params.operation_mode;
- if (vfe32_ctrl->share_ctrl->current_mode ==
- VFE_OUTPUTS_JPEG_AND_THUMB ||
- vfe32_ctrl->share_ctrl->current_mode ==
- VFE_OUTPUTS_THUMB_AND_JPEG) {
- if (snapshot_cnt != 1) {
- pr_err("only support 1 inline snapshot\n");
- rc = -EINVAL;
- goto proc_general_done;
- }
- /* Configure primary channel for JPEG */
- rc = vfe32_configure_pingpong_buffers(
- VFE_MSG_JPEG_CAPTURE,
- VFE_MSG_OUTPUT_PRIMARY,
- vfe32_ctrl);
- } else {
- /* Configure primary channel */
- rc = vfe32_configure_pingpong_buffers(
- VFE_MSG_CAPTURE,
- VFE_MSG_OUTPUT_PRIMARY,
- vfe32_ctrl);
- }
- if (rc < 0) {
- pr_err("%s error configuring pingpong buffers"
- " for primary output", __func__);
- rc = -EINVAL;
- goto proc_general_done;
- }
- /* Configure secondary channel */
- rc = vfe32_configure_pingpong_buffers(
- VFE_MSG_CAPTURE, VFE_MSG_OUTPUT_SECONDARY,
- vfe32_ctrl);
- if (rc < 0) {
- pr_err("%s error configuring pingpong buffers"
- " for secondary output", __func__);
- rc = -EINVAL;
- goto proc_general_done;
- }
+
rc = vfe32_capture(pmctl, snapshot_cnt, vfe32_ctrl);
break;
case VFE_CMD_START_RECORDING:
pr_info("vfe32_proc_general: cmdID = %s\n",
vfe32_general_cmd[cmd->id]);
- if (copy_from_user(&temp1, (void __user *)(cmd->value),
- sizeof(uint32_t))) {
- pr_err("%s Error copying inst_handle for recording\n",
- __func__);
- rc = -EFAULT;
- goto proc_general_done;
- }
- if (vfe32_ctrl->share_ctrl->current_mode &
- VFE_OUTPUTS_PREVIEW_AND_VIDEO) {
- vfe32_ctrl->share_ctrl->outpath.out1.inst_handle =
- temp1;
- rc = vfe32_configure_pingpong_buffers(
- VFE_MSG_START_RECORDING,
- VFE_MSG_OUTPUT_SECONDARY,
- vfe32_ctrl);
- } else if (vfe32_ctrl->share_ctrl->current_mode &
- VFE_OUTPUTS_VIDEO_AND_PREVIEW) {
- vfe32_ctrl->share_ctrl->outpath.out0.inst_handle =
- temp1;
- rc = vfe32_configure_pingpong_buffers(
- VFE_MSG_START_RECORDING,
- VFE_MSG_OUTPUT_PRIMARY,
- vfe32_ctrl);
- }
- if (rc < 0) {
- pr_err("%s error configuring pingpong buffers"
- " for video", __func__);
- rc = -EINVAL;
- goto proc_general_done;
- }
rc = vfe32_start_recording(pmctl, vfe32_ctrl);
break;
case VFE_CMD_STOP_RECORDING:
@@ -2290,23 +2162,7 @@
break;
case VFE_CMD_LIVESHOT:
- if (copy_from_user(&temp1, (void __user *)(cmd->value),
- sizeof(uint32_t))) {
- pr_err("%s Error copying inst_handle for liveshot ",
- __func__);
- rc = -EFAULT;
- goto proc_general_done;
- }
- vfe32_ctrl->share_ctrl->outpath.out0.inst_handle = temp1;
/* Configure primary channel */
- rc = vfe32_configure_pingpong_buffers(VFE_MSG_CAPTURE,
- VFE_MSG_OUTPUT_PRIMARY, vfe32_ctrl);
- if (rc < 0) {
- pr_err("%s error configuring pingpong buffers"
- " for primary output", __func__);
- rc = -EINVAL;
- goto proc_general_done;
- }
vfe32_start_liveshot(pmctl, vfe32_ctrl);
break;
@@ -2848,14 +2704,6 @@
vfe32_ctrl->share_ctrl->current_mode =
vfe_params.operation_mode;
- rc = vfe32_configure_pingpong_buffers(VFE_MSG_START,
- VFE_MSG_OUTPUT_PRIMARY, vfe32_ctrl);
- if (rc < 0)
- goto proc_general_done;
- rc = vfe32_configure_pingpong_buffers(VFE_MSG_START,
- VFE_MSG_OUTPUT_SECONDARY, vfe32_ctrl);
- if (rc < 0)
- goto proc_general_done;
rc = vfe32_zsl(pmctl, vfe32_ctrl);
break;
@@ -5171,9 +5019,185 @@
axi_ctrl->share_ctrl->vfebase + VFE_GLOBAL_RESET);
}
-void axi_start(struct axi_ctrl_t *axi_ctrl, uint16_t cmd_type)
+int axi_config_buffers(struct axi_ctrl_t *axi_ctrl,
+ struct msm_camera_vfe_params_t vfe_params)
+{
+ uint16_t vfe_mode = axi_ctrl->share_ctrl->current_mode
+ & ~(VFE_OUTPUTS_RDI0|VFE_OUTPUTS_RDI1);
+ int rc = 0;
+ switch (vfe_params.cmd_type) {
+ case AXI_CMD_PREVIEW:
+ if (vfe_mode) {
+ if ((axi_ctrl->share_ctrl->current_mode &
+ VFE_OUTPUTS_PREVIEW_AND_VIDEO) ||
+ (axi_ctrl->share_ctrl->current_mode &
+ VFE_OUTPUTS_PREVIEW))
+ /* Configure primary channel */
+ rc = configure_pingpong_buffers(
+ VFE_MSG_START,
+ VFE_MSG_OUTPUT_PRIMARY,
+ axi_ctrl);
+ else
+ /* Configure secondary channel */
+ rc = configure_pingpong_buffers(
+ VFE_MSG_START,
+ VFE_MSG_OUTPUT_SECONDARY,
+ axi_ctrl);
+ }
+ if (axi_ctrl->share_ctrl->current_mode &
+ VFE_OUTPUTS_RDI0)
+ rc = configure_pingpong_buffers(
+ VFE_MSG_START, VFE_MSG_OUTPUT_TERTIARY1,
+ axi_ctrl);
+ if (axi_ctrl->share_ctrl->current_mode &
+ VFE_OUTPUTS_RDI1)
+ rc = configure_pingpong_buffers(
+ VFE_MSG_START, VFE_MSG_OUTPUT_TERTIARY2,
+ axi_ctrl);
+
+ if (rc < 0) {
+ pr_err("%s error configuring pingpong buffers for preview",
+ __func__);
+ rc = -EINVAL;
+ goto config_done;
+ }
+ break;
+ case AXI_CMD_RAW_CAPTURE:
+ rc = configure_pingpong_buffers(
+ VFE_MSG_CAPTURE, VFE_MSG_OUTPUT_PRIMARY,
+ axi_ctrl);
+ if (rc < 0) {
+ pr_err("%s error configuring pingpong buffers for snapshot",
+ __func__);
+ rc = -EINVAL;
+ goto config_done;
+ }
+ break;
+ case AXI_CMD_ZSL:
+ rc = configure_pingpong_buffers(VFE_MSG_START,
+ VFE_MSG_OUTPUT_PRIMARY, axi_ctrl);
+ if (rc < 0)
+ goto config_done;
+ rc = configure_pingpong_buffers(VFE_MSG_START,
+ VFE_MSG_OUTPUT_SECONDARY, axi_ctrl);
+ if (rc < 0)
+ goto config_done;
+ break;
+ case AXI_CMD_RECORD:
+ if (axi_ctrl->share_ctrl->current_mode &
+ VFE_OUTPUTS_PREVIEW_AND_VIDEO) {
+ axi_ctrl->share_ctrl->outpath.out1.inst_handle =
+ vfe_params.inst_handle;
+ rc = configure_pingpong_buffers(
+ VFE_MSG_START_RECORDING,
+ VFE_MSG_OUTPUT_SECONDARY,
+ axi_ctrl);
+ } else if (axi_ctrl->share_ctrl->current_mode &
+ VFE_OUTPUTS_VIDEO_AND_PREVIEW) {
+ axi_ctrl->share_ctrl->outpath.out0.inst_handle =
+ vfe_params.inst_handle;
+ rc = configure_pingpong_buffers(
+ VFE_MSG_START_RECORDING,
+ VFE_MSG_OUTPUT_PRIMARY,
+ axi_ctrl);
+ }
+ if (rc < 0) {
+ pr_err("%s error configuring pingpong buffers for video",
+ __func__);
+ rc = -EINVAL;
+ goto config_done;
+ }
+ break;
+ case AXI_CMD_LIVESHOT:
+ axi_ctrl->share_ctrl->outpath.out0.inst_handle =
+ vfe_params.inst_handle;
+ rc = configure_pingpong_buffers(VFE_MSG_CAPTURE,
+ VFE_MSG_OUTPUT_PRIMARY, axi_ctrl);
+ if (rc < 0) {
+ pr_err("%s error configuring pingpong buffers for primary output",
+ __func__);
+ rc = -EINVAL;
+ goto config_done;
+ }
+ break;
+ case AXI_CMD_CAPTURE:
+ if (axi_ctrl->share_ctrl->current_mode ==
+ VFE_OUTPUTS_JPEG_AND_THUMB ||
+ axi_ctrl->share_ctrl->current_mode ==
+ VFE_OUTPUTS_THUMB_AND_JPEG) {
+
+ /* Configure primary channel for JPEG */
+ rc = configure_pingpong_buffers(
+ VFE_MSG_JPEG_CAPTURE,
+ VFE_MSG_OUTPUT_PRIMARY,
+ axi_ctrl);
+ } else {
+ /* Configure primary channel */
+ rc = configure_pingpong_buffers(
+ VFE_MSG_CAPTURE,
+ VFE_MSG_OUTPUT_PRIMARY,
+ axi_ctrl);
+ }
+ if (rc < 0) {
+ pr_err("%s error configuring pingpong buffers for primary output",
+ __func__);
+ rc = -EINVAL;
+ goto config_done;
+ }
+ /* Configure secondary channel */
+ rc = configure_pingpong_buffers(
+ VFE_MSG_CAPTURE, VFE_MSG_OUTPUT_SECONDARY,
+ axi_ctrl);
+ if (rc < 0) {
+ pr_err("%s error configuring pingpong buffers for secondary output",
+ __func__);
+ rc = -EINVAL;
+ goto config_done;
+ }
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+
+ }
+config_done:
+ return rc;
+}
+
+void axi_start(struct msm_cam_media_controller *pmctl,
+ struct axi_ctrl_t *axi_ctrl, struct msm_camera_vfe_params_t vfe_params)
{
uint32_t irq_comp_mask = 0, irq_mask = 0;
+ int rc = 0;
+ rc = axi_config_buffers(axi_ctrl, vfe_params);
+ if (rc < 0)
+ return;
+
+ switch (vfe_params.cmd_type) {
+ case AXI_CMD_PREVIEW:
+ msm_camio_bus_scale_cfg(
+ pmctl->sdata->pdata->cam_bus_scale_table, S_PREVIEW);
+ break;
+ case AXI_CMD_CAPTURE:
+ case AXI_CMD_RAW_CAPTURE:
+ msm_camio_bus_scale_cfg(
+ pmctl->sdata->pdata->cam_bus_scale_table, S_CAPTURE);
+ break;
+ case AXI_CMD_RECORD:
+ msm_camio_bus_scale_cfg(
+ pmctl->sdata->pdata->cam_bus_scale_table, S_VIDEO);
+ return;
+ case AXI_CMD_ZSL:
+ msm_camio_bus_scale_cfg(
+ pmctl->sdata->pdata->cam_bus_scale_table, S_ZSL);
+ break;
+ case AXI_CMD_LIVESHOT:
+ msm_camio_bus_scale_cfg(
+ pmctl->sdata->pdata->cam_bus_scale_table, S_LIVESHOT);
+ return;
+ default:
+ return;
+ }
irq_comp_mask =
msm_camera_io_r(axi_ctrl->share_ctrl->vfebase +
@@ -5225,7 +5249,7 @@
msm_camera_io_w(irq_comp_mask,
axi_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
- switch (cmd_type) {
+ switch (vfe_params.cmd_type) {
case AXI_CMD_PREVIEW: {
uint16_t operation_mode =
(axi_ctrl->share_ctrl->operation_mode &
@@ -5335,7 +5359,8 @@
atomic_set(&axi_ctrl->share_ctrl->handle_axi_irq, 1);
}
-void axi_stop(struct axi_ctrl_t *axi_ctrl, uint16_t cmd_type)
+void axi_stop(struct msm_cam_media_controller *pmctl,
+ struct axi_ctrl_t *axi_ctrl, struct msm_camera_vfe_params_t vfe_params)
{
uint32_t reg_update = 0;
unsigned long flags;
@@ -5343,6 +5368,24 @@
axi_ctrl->share_ctrl->current_mode & ~(VFE_OUTPUTS_RDI0|
VFE_OUTPUTS_RDI1);
+ switch (vfe_params.cmd_type) {
+ case AXI_CMD_PREVIEW:
+ case AXI_CMD_CAPTURE:
+ case AXI_CMD_RAW_CAPTURE:
+ case AXI_CMD_ZSL:
+ break;
+ case AXI_CMD_RECORD:
+ msm_camio_bus_scale_cfg(
+ pmctl->sdata->pdata->cam_bus_scale_table, S_PREVIEW);
+ return;
+ case AXI_CMD_LIVESHOT:
+ msm_camio_bus_scale_cfg(
+ pmctl->sdata->pdata->cam_bus_scale_table, S_VIDEO);
+ return;
+ default:
+ return;
+ }
+
if (!axi_ctrl->share_ctrl->skip_abort) {
atomic_set(&axi_ctrl->share_ctrl->handle_axi_irq, 0);
axi_disable_irq(axi_ctrl);
@@ -5351,7 +5394,7 @@
spin_lock_irqsave(&axi_ctrl->share_ctrl->stop_flag_lock, flags);
axi_ctrl->share_ctrl->stop_ack_pending = TRUE;
spin_unlock_irqrestore(&axi_ctrl->share_ctrl->stop_flag_lock, flags);
- switch (cmd_type) {
+ switch (vfe_params.cmd_type) {
case AXI_CMD_PREVIEW: {
switch (operation_mode) {
case VFE_OUTPUTS_PREVIEW:
@@ -5474,6 +5517,8 @@
struct msm_vfe_cfg_cmd cfgcmd;
struct msm_isp_cmd vfecmd;
struct axi_ctrl_t *axi_ctrl = v4l2_get_subdevdata(sd);
+ struct msm_cam_media_controller *pmctl =
+ (struct msm_cam_media_controller *)v4l2_get_subdev_hostdata(sd);
int rc = 0, vfe_cmd_type = 0, rdi_mode = 0;
unsigned long flags;
@@ -5676,7 +5721,7 @@
vfe_params.skip_abort;
spin_unlock_irqrestore(&axi_ctrl->share_ctrl->abort_lock,
flags);
- axi_start(axi_ctrl, vfe_params.cmd_type);
+ axi_start(pmctl, axi_ctrl, vfe_params);
}
break;
case CMD_AXI_STOP: {
@@ -5693,7 +5738,7 @@
vfe_params.skip_abort;
spin_unlock_irqrestore(&axi_ctrl->share_ctrl->abort_lock,
flags);
- axi_stop(axi_ctrl, vfe_params.cmd_type);
+ axi_stop(pmctl, axi_ctrl, vfe_params);
}
break;
case CMD_AXI_RESET:
diff --git a/drivers/media/video/msm/vfe/msm_vfe40.c b/drivers/media/video/msm/vfe/msm_vfe40.c
new file mode 100644
index 0000000..5a1d488
--- /dev/null
+++ b/drivers/media/video/msm/vfe/msm_vfe40.c
@@ -0,0 +1,3699 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/atomic.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <mach/irqs.h>
+#include <mach/camera.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/msm_isp.h>
+
+#include "msm.h"
+#include "msm_vfe40.h"
+
+struct vfe40_isr_queue_cmd {
+ struct list_head list;
+ uint32_t vfeInterruptStatus0;
+ uint32_t vfeInterruptStatus1;
+};
+
+static const char * const vfe40_general_cmd[] = {
+ "DUMMY_0", /* 0 */
+ "SET_CLK",
+ "RESET",
+ "START",
+ "TEST_GEN_START",
+ "OPERATION_CFG", /* 5 */
+ "AXI_OUT_CFG",
+ "CAMIF_CFG",
+ "AXI_INPUT_CFG",
+ "BLACK_LEVEL_CFG",
+ "ROLL_OFF_CFG", /* 10 */
+ "DEMUX_CFG",
+ "FOV_CFG",
+ "MAIN_SCALER_CFG",
+ "WB_CFG",
+ "COLOR_COR_CFG", /* 15 */
+ "RGB_G_CFG",
+ "LA_CFG",
+ "CHROMA_EN_CFG",
+ "CHROMA_SUP_CFG",
+ "MCE_CFG", /* 20 */
+ "SK_ENHAN_CFG",
+ "ASF_CFG",
+ "S2Y_CFG",
+ "S2CbCr_CFG",
+ "CHROMA_SUBS_CFG", /* 25 */
+ "OUT_CLAMP_CFG",
+ "FRAME_SKIP_CFG",
+ "DUMMY_1",
+ "DUMMY_2",
+ "DUMMY_3", /* 30 */
+ "UPDATE",
+ "BL_LVL_UPDATE",
+ "DEMUX_UPDATE",
+ "FOV_UPDATE",
+ "MAIN_SCALER_UPDATE", /* 35 */
+ "WB_UPDATE",
+ "COLOR_COR_UPDATE",
+ "RGB_G_UPDATE",
+ "LA_UPDATE",
+ "CHROMA_EN_UPDATE", /* 40 */
+ "CHROMA_SUP_UPDATE",
+ "MCE_UPDATE",
+ "SK_ENHAN_UPDATE",
+ "S2CbCr_UPDATE",
+ "S2Y_UPDATE", /* 45 */
+ "ASF_UPDATE",
+ "FRAME_SKIP_UPDATE",
+ "CAMIF_FRAME_UPDATE",
+ "STATS_AF_UPDATE",
+ "STATS_AE_UPDATE", /* 50 */
+ "STATS_AWB_UPDATE",
+ "STATS_RS_UPDATE",
+ "STATS_CS_UPDATE",
+ "STATS_SKIN_UPDATE",
+ "STATS_IHIST_UPDATE", /* 55 */
+ "DUMMY_4",
+ "EPOCH1_ACK",
+ "EPOCH2_ACK",
+ "START_RECORDING",
+ "STOP_RECORDING", /* 60 */
+ "DUMMY_5",
+ "DUMMY_6",
+ "CAPTURE",
+ "DUMMY_7",
+ "STOP", /* 65 */
+ "GET_HW_VERSION",
+ "GET_FRAME_SKIP_COUNTS",
+ "OUTPUT1_BUFFER_ENQ",
+ "OUTPUT2_BUFFER_ENQ",
+ "OUTPUT3_BUFFER_ENQ", /* 70 */
+ "JPEG_OUT_BUF_ENQ",
+ "RAW_OUT_BUF_ENQ",
+ "RAW_IN_BUF_ENQ",
+ "STATS_AF_ENQ",
+ "STATS_AE_ENQ", /* 75 */
+ "STATS_AWB_ENQ",
+ "STATS_RS_ENQ",
+ "STATS_CS_ENQ",
+ "STATS_SKIN_ENQ",
+ "STATS_IHIST_ENQ", /* 80 */
+ "DUMMY_8",
+ "JPEG_ENC_CFG",
+ "DUMMY_9",
+ "STATS_AF_START",
+ "STATS_AF_STOP", /* 85 */
+ "STATS_AE_START",
+ "STATS_AE_STOP",
+ "STATS_AWB_START",
+ "STATS_AWB_STOP",
+ "STATS_RS_START", /* 90 */
+ "STATS_RS_STOP",
+ "STATS_CS_START",
+ "STATS_CS_STOP",
+ "STATS_SKIN_START",
+ "STATS_SKIN_STOP", /* 95 */
+ "STATS_IHIST_START",
+ "STATS_IHIST_STOP",
+ "DUMMY_10",
+ "SYNC_TIMER_SETTING",
+ "ASYNC_TIMER_SETTING", /* 100 */
+ "LIVESHOT",
+ "LA_SETUP",
+ "LINEARIZATION_CFG",
+ "DEMOSAICV3",
+ "DEMOSAICV3_ABCC_CFG", /* 105 */
+ "DEMOSAICV3_DBCC_CFG",
+ "DEMOSAICV3_DBPC_CFG",
+ "DEMOSAICV3_ABF_CFG",
+ "DEMOSAICV3_ABCC_UPDATE",
+ "DEMOSAICV3_DBCC_UPDATE", /* 110 */
+ "DEMOSAICV3_DBPC_UPDATE",
+ "XBAR_CFG",
+ "EZTUNE_CFG",
+ "V40_ZSL",
+ "LINEARIZATION_UPDATE", /*115*/
+ "DEMOSAICV3_ABF_UPDATE",
+ "CLF_CFG",
+ "CLF_LUMA_UPDATE",
+ "CLF_CHROMA_UPDATE",
+ "PCA_ROLL_OFF_CFG", /*120*/
+ "PCA_ROLL_OFF_UPDATE",
+ "GET_REG_DUMP",
+ "GET_LINEARIZATON_TABLE",
+ "GET_MESH_ROLLOFF_TABLE",
+ "GET_PCA_ROLLOFF_TABLE", /*125*/
+ "GET_RGB_G_TABLE",
+ "GET_LA_TABLE",
+ "DEMOSAICV3_UPDATE",
+ "ACTIVE_REGION_CONFIG",
+ "COLOR_PROCESSING_CONFIG", /*130*/
+ "STATS_WB_AEC_CONFIG",
+ "STATS_WB_AEC_UPDATE",
+ "Y_GAMMA_CONFIG",
+ "SCALE_OUTPUT1_CONFIG",
+ "SCALE_OUTPUT2_CONFIG", /*135*/
+ "CAPTURE_RAW",
+ "STOP_LIVESHOT",
+ "RECONFIG_VFE",
+ "STATS_REQBUF_CFG",
+ "STATS_ENQUEUEBUF_CFG",/*140*/
+ "STATS_FLUSH_BUFQ_CFG",
+ "FOV_ENC_CFG",
+ "FOV_VIEW_CFG",
+ "FOV_ENC_UPDATE",
+ "FOV_VIEW_UPDATE",/*145*/
+ "SCALER_ENC_CFG",
+ "SCALER_VIEW_CFG",
+ "SCALER_ENC_UPDATE",
+ "SCALER_VIEW_UPDATE",
+ "COLORXFORM_ENC_CFG",/*150*/
+ "COLORXFORM_VIEW_CFG",
+ "COLORXFORM_ENC_UPDATE",
+ "COLORXFORM_VIEW_UPDATE",
+};
+
+static void vfe40_stop(struct vfe40_ctrl_type *vfe40_ctrl)
+{
+ unsigned long flags;
+
+ atomic_set(&vfe40_ctrl->share_ctrl->vstate, 0);
+
+ /* for reset hw modules, and send msg when reset_irq comes.*/
+ spin_lock_irqsave(&vfe40_ctrl->share_ctrl->stop_flag_lock, flags);
+ vfe40_ctrl->share_ctrl->stop_ack_pending = TRUE;
+ spin_unlock_irqrestore(&vfe40_ctrl->share_ctrl->stop_flag_lock, flags);
+
+ /* disable all interrupts. */
+ msm_camera_io_w(VFE_DISABLE_ALL_IRQS,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_0);
+ msm_camera_io_w(VFE_DISABLE_ALL_IRQS,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
+
+ /* clear all pending interrupts*/
+ msm_camera_io_w(VFE_CLEAR_ALL_IRQ0,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_0);
+ msm_camera_io_w(VFE_CLEAR_ALL_IRQ1,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_1);
+ /* Ensure the write order while writing
+ to the command register using the barrier */
+ msm_camera_io_w_mb(1,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_CMD);
+
+ /* in either continuous or snapshot mode, stop command can be issued
+ * at any time. stop camif immediately. */
+ msm_camera_io_w(CAMIF_COMMAND_STOP_IMMEDIATELY,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_CAMIF_COMMAND);
+}
+
+void vfe40_subdev_notify(int id, int path, int image_mode,
+ struct v4l2_subdev *sd, struct vfe_share_ctrl_t *share_ctrl)
+{
+ struct msm_vfe_resp rp;
+ struct msm_frame_info frame_info;
+ unsigned long flags = 0;
+ spin_lock_irqsave(&share_ctrl->sd_notify_lock, flags);
+ CDBG("%s: msgId = %d\n", __func__, id);
+ memset(&rp, 0, sizeof(struct msm_vfe_resp));
+ rp.evt_msg.type = MSM_CAMERA_MSG;
+ frame_info.image_mode = image_mode;
+ frame_info.path = path;
+ rp.evt_msg.data = &frame_info;
+ rp.type = id;
+ v4l2_subdev_notify(sd, NOTIFY_VFE_BUF_EVT, &rp);
+ spin_unlock_irqrestore(&share_ctrl->sd_notify_lock, flags);
+}
+
+static void vfe40_reset_internal_variables(
+ struct vfe40_ctrl_type *vfe40_ctrl)
+{
+ unsigned long flags;
+ vfe40_ctrl->vfeImaskCompositePacked = 0;
+ /* state control variables */
+ vfe40_ctrl->start_ack_pending = FALSE;
+ atomic_set(&vfe40_ctrl->share_ctrl->irq_cnt, 0);
+
+ spin_lock_irqsave(&vfe40_ctrl->share_ctrl->stop_flag_lock, flags);
+ vfe40_ctrl->share_ctrl->stop_ack_pending = FALSE;
+ spin_unlock_irqrestore(&vfe40_ctrl->share_ctrl->stop_flag_lock, flags);
+
+ vfe40_ctrl->reset_ack_pending = FALSE;
+
+ spin_lock_irqsave(&vfe40_ctrl->update_ack_lock, flags);
+ vfe40_ctrl->update_ack_pending = FALSE;
+ spin_unlock_irqrestore(&vfe40_ctrl->update_ack_lock, flags);
+
+ vfe40_ctrl->recording_state = VFE_STATE_IDLE;
+ vfe40_ctrl->share_ctrl->liveshot_state = VFE_STATE_IDLE;
+
+ atomic_set(&vfe40_ctrl->share_ctrl->vstate, 0);
+
+ /* 0 for continuous mode, 1 for snapshot mode */
+ vfe40_ctrl->share_ctrl->operation_mode = 0;
+ vfe40_ctrl->share_ctrl->outpath.output_mode = 0;
+ vfe40_ctrl->share_ctrl->vfe_capture_count = 0;
+
+ /* this is unsigned 32 bit integer. */
+ vfe40_ctrl->share_ctrl->vfeFrameId = 0;
+ /* Stats control variables. */
+ memset(&(vfe40_ctrl->afStatsControl), 0,
+ sizeof(struct vfe_stats_control));
+
+ memset(&(vfe40_ctrl->awbStatsControl), 0,
+ sizeof(struct vfe_stats_control));
+
+ memset(&(vfe40_ctrl->aecStatsControl), 0,
+ sizeof(struct vfe_stats_control));
+
+ memset(&(vfe40_ctrl->ihistStatsControl), 0,
+ sizeof(struct vfe_stats_control));
+
+ memset(&(vfe40_ctrl->rsStatsControl), 0,
+ sizeof(struct vfe_stats_control));
+
+ memset(&(vfe40_ctrl->csStatsControl), 0,
+ sizeof(struct vfe_stats_control));
+
+ vfe40_ctrl->frame_skip_cnt = 31;
+ vfe40_ctrl->frame_skip_pattern = 0xffffffff;
+ vfe40_ctrl->snapshot_frame_cnt = 0;
+}
+
+static void vfe40_reset(struct vfe40_ctrl_type *vfe40_ctrl)
+{
+ vfe40_reset_internal_variables(vfe40_ctrl);
+ /* disable all interrupts. vfeImaskLocal is also reset to 0
+ * to begin with. */
+ msm_camera_io_w(VFE_DISABLE_ALL_IRQS,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_0);
+
+ msm_camera_io_w(VFE_DISABLE_ALL_IRQS,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
+
+ /* clear all pending interrupts*/
+ msm_camera_io_w(VFE_CLEAR_ALL_IRQS,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_0);
+ msm_camera_io_w(VFE_CLEAR_ALL_IRQS,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_1);
+
+ /* Ensure the write order while writing
+ to the command register using the barrier */
+ msm_camera_io_w_mb(1, vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_CMD);
+
+ /* enable reset_ack interrupt. */
+ msm_camera_io_w(VFE_IMASK_WHILE_STOPPING_1,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
+
+ /* Write to VFE_GLOBAL_RESET_CMD to reset the vfe hardware. Once reset
+ * is done, hardware interrupt will be generated. VFE ist processes
+ * the interrupt to complete the function call. Note that the reset
+ * function is synchronous. */
+
+ /* Ensure the write order while writing
+ to the command register using the barrier */
+ msm_camera_io_w_mb(VFE_RESET_UPON_RESET_CMD,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_GLOBAL_RESET);
+
+ msm_camera_io_w(0xAAAAAAAA,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_0_BUS_BDG_QOS_CFG_0);
+ msm_camera_io_w(0xAAAAAAAA,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_0_BUS_BDG_QOS_CFG_1);
+ msm_camera_io_w(0xAAAAAAAA,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_0_BUS_BDG_QOS_CFG_2);
+ msm_camera_io_w(0xAAAAAAAA,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_0_BUS_BDG_QOS_CFG_3);
+ msm_camera_io_w(0xAAAAAAAA,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_0_BUS_BDG_QOS_CFG_4);
+ msm_camera_io_w(0xAAAAAAAA,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_0_BUS_BDG_QOS_CFG_5);
+ msm_camera_io_w(0xAAAAAAAA,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_0_BUS_BDG_QOS_CFG_6);
+ msm_camera_io_w(0x0002AAAA,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_0_BUS_BDG_QOS_CFG_7);
+}
+
+static int vfe40_operation_config(uint32_t *cmd,
+ struct vfe40_ctrl_type *vfe40_ctrl)
+{
+ uint32_t *p = cmd;
+
+ vfe40_ctrl->share_ctrl->operation_mode = *p;
+ vfe40_ctrl->share_ctrl->stats_comp = *(++p);
+ vfe40_ctrl->hfr_mode = *(++p);
+
+ msm_camera_io_w(*(++p),
+ vfe40_ctrl->share_ctrl->vfebase + VFE_CFG);
+ msm_camera_io_w(*(++p),
+ vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+ msm_camera_io_w(*(++p),
+ vfe40_ctrl->share_ctrl->vfebase + VFE_RDI0_CFG);
+ if (msm_camera_io_r(vfe40_ctrl->share_ctrl->vfebase +
+ V40_GET_HW_VERSION_OFF) ==
+ VFE40_HW_NUMBER) {
+ msm_camera_io_w(*(++p),
+ vfe40_ctrl->share_ctrl->vfebase + VFE_RDI1_CFG);
+ msm_camera_io_w(*(++p),
+ vfe40_ctrl->share_ctrl->vfebase + VFE_RDI2_CFG);
+ } else {
+ ++p;
+ ++p;
+ }
+ msm_camera_io_w(*(++p),
+ vfe40_ctrl->share_ctrl->vfebase + VFE_REALIGN_BUF);
+ msm_camera_io_w(*(++p),
+ vfe40_ctrl->share_ctrl->vfebase + VFE_CHROMA_UP);
+ msm_camera_io_w(*(++p),
+ vfe40_ctrl->share_ctrl->vfebase + VFE_STATS_CFG);
+ return 0;
+}
+
+static unsigned long vfe40_stats_dqbuf(struct vfe40_ctrl_type *vfe40_ctrl,
+ enum msm_stats_enum_type stats_type)
+{
+ struct msm_stats_meta_buf *buf = NULL;
+ int rc = 0;
+ rc = vfe40_ctrl->stats_ops.dqbuf(
+ vfe40_ctrl->stats_ops.stats_ctrl, stats_type, &buf);
+ if (rc < 0) {
+ pr_err("%s: dq stats buf (type = %d) err = %d",
+ __func__, stats_type, rc);
+ return 0L;
+ }
+ return buf->paddr;
+}
+
+static unsigned long vfe40_stats_flush_enqueue(
+ struct vfe40_ctrl_type *vfe40_ctrl,
+ enum msm_stats_enum_type stats_type)
+{
+ struct msm_stats_bufq *bufq = NULL;
+ struct msm_stats_meta_buf *stats_buf = NULL;
+ int rc = 0;
+ int i;
+
+ /*
+ * Passing NULL for ion client as the buffers are already
+ * mapped at this stage, client is not required, flush all
+ * the buffers, and buffers move to PREPARE state
+ */
+
+ rc = vfe40_ctrl->stats_ops.bufq_flush(
+ vfe40_ctrl->stats_ops.stats_ctrl, stats_type, NULL);
+ if (rc < 0) {
+ pr_err("%s: dq stats buf (type = %d) err = %d",
+ __func__, stats_type, rc);
+ return 0L;
+ }
+ /* Queue all the buffers back to QUEUED state */
+ bufq = vfe40_ctrl->stats_ctrl.bufq[stats_type];
+ for (i = 0; i < bufq->num_bufs; i++) {
+ stats_buf = &bufq->bufs[i];
+ rc = vfe40_ctrl->stats_ops.enqueue_buf(
+ vfe40_ctrl->stats_ops.stats_ctrl,
+ &(stats_buf->info), NULL);
+ if (rc < 0) {
+ pr_err("%s: dq stats buf (type = %d) err = %d",
+ __func__, stats_type, rc);
+ return rc;
+ }
+ }
+ return 0L;
+}
+
+static int vfe_stats_awb_buf_init(
+ struct vfe40_ctrl_type *vfe40_ctrl,
+ struct vfe_cmd_stats_buf *in)
+{
+ uint32_t addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+ addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_AWB);
+ spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+ if (!addr) {
+ pr_err("%s: dq awb ping buf from free buf queue", __func__);
+ return -ENOMEM;
+ }
+ msm_camera_io_w(addr,
+ vfe40_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_AWB_WR_PING_ADDR);
+ spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+ addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_AWB);
+ spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+ if (!addr) {
+ pr_err("%s: dq awb ping buf from free buf queue",
+ __func__);
+ return -ENOMEM;
+ }
+ msm_camera_io_w(addr,
+ vfe40_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_AWB_WR_PONG_ADDR);
+ return 0;
+}
+
+static int vfe_stats_aec_buf_init(
+ struct vfe40_ctrl_type *vfe40_ctrl, struct vfe_cmd_stats_buf *in)
+{
+ uint32_t addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+ addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_AEC);
+ spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+ if (!addr) {
+ pr_err("%s: dq aec ping buf from free buf queue",
+ __func__);
+ return -ENOMEM;
+ }
+ msm_camera_io_w(addr,
+ vfe40_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_AEC_WR_PING_ADDR);
+ spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+ addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_AEC);
+ spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+ if (!addr) {
+ pr_err("%s: dq aec pong buf from free buf queue",
+ __func__);
+ return -ENOMEM;
+ }
+ msm_camera_io_w(addr,
+ vfe40_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_AEC_WR_PONG_ADDR);
+ return 0;
+}
+
+static int vfe_stats_af_buf_init(
+ struct vfe40_ctrl_type *vfe40_ctrl, struct vfe_cmd_stats_buf *in)
+{
+ uint32_t addr;
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+ rc = vfe40_stats_flush_enqueue(vfe40_ctrl, MSM_STATS_TYPE_AF);
+ if (rc < 0) {
+ pr_err("%s: dq stats buf err = %d",
+ __func__, rc);
+ spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+ return -EINVAL;
+ }
+ addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_AF);
+ spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+ if (!addr) {
+ pr_err("%s: dq af ping buf from free buf queue", __func__);
+ return -ENOMEM;
+ }
+ msm_camera_io_w(addr,
+ vfe40_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_AF_WR_PING_ADDR);
+ spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+ addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_AF);
+ spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+ if (!addr) {
+ pr_err("%s: dq af pong buf from free buf queue", __func__);
+ return -ENOMEM;
+ }
+ msm_camera_io_w(addr,
+ vfe40_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_AF_WR_PONG_ADDR);
+
+ return 0;
+}
+
+static int vfe_stats_ihist_buf_init(
+ struct vfe40_ctrl_type *vfe40_ctrl, struct vfe_cmd_stats_buf *in)
+{
+ uint32_t addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+ addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_IHIST);
+ spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+ if (!addr) {
+ pr_err("%s: dq ihist ping buf from free buf queue",
+ __func__);
+ return -ENOMEM;
+ }
+ msm_camera_io_w(addr,
+ vfe40_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_HIST_WR_PING_ADDR);
+ spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+ addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_IHIST);
+ spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+ if (!addr) {
+ pr_err("%s: dq ihist pong buf from free buf queue",
+ __func__);
+ return -ENOMEM;
+ }
+ msm_camera_io_w(addr,
+ vfe40_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_HIST_WR_PONG_ADDR);
+
+ return 0;
+}
+
+static int vfe_stats_rs_buf_init(
+ struct vfe40_ctrl_type *vfe40_ctrl, struct vfe_cmd_stats_buf *in)
+{
+ uint32_t addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+ addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_RS);
+ spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+ if (!addr) {
+ pr_err("%s: dq rs ping buf from free buf queue", __func__);
+ return -ENOMEM;
+ }
+ msm_camera_io_w(addr,
+ vfe40_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_RS_WR_PING_ADDR);
+ spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+ addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_RS);
+ spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+ if (!addr) {
+ pr_err("%s: dq rs pong buf from free buf queue", __func__);
+ return -ENOMEM;
+ }
+ msm_camera_io_w(addr,
+ vfe40_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_RS_WR_PONG_ADDR);
+ return 0;
+}
+
+static int vfe_stats_cs_buf_init(
+ struct vfe40_ctrl_type *vfe40_ctrl, struct vfe_cmd_stats_buf *in)
+{
+ uint32_t addr;
+ unsigned long flags;
+ spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+ addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_CS);
+ spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+ if (!addr) {
+ pr_err("%s: dq cs ping buf from free buf queue", __func__);
+ return -ENOMEM;
+ }
+ msm_camera_io_w(addr,
+ vfe40_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_CS_WR_PING_ADDR);
+ spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+ addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_CS);
+ spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+ if (!addr) {
+ pr_err("%s: dq cs pong buf from free buf queue", __func__);
+ return -ENOMEM;
+ }
+ msm_camera_io_w(addr,
+ vfe40_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_CS_WR_PONG_ADDR);
+ return 0;
+}
+
+static void vfe40_start_common(struct vfe40_ctrl_type *vfe40_ctrl)
+{
+ uint32_t irq_mask = 0x1E000011;
+ vfe40_ctrl->start_ack_pending = TRUE;
+ CDBG("VFE opertaion mode = 0x%x, output mode = 0x%x\n",
+ vfe40_ctrl->share_ctrl->operation_mode,
+ vfe40_ctrl->share_ctrl->outpath.output_mode);
+
+ msm_camera_io_w(irq_mask,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_0);
+ msm_camera_io_w(VFE_IMASK_WHILE_STOPPING_1,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
+
+ /* Ensure the write order while writing
+ to the command register using the barrier */
+ msm_camera_io_w_mb(1,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+ msm_camera_io_w_mb(1,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_CAMIF_COMMAND);
+
+ msm_camera_io_dump(vfe40_ctrl->share_ctrl->vfebase,
+ vfe40_ctrl->share_ctrl->register_total*4);
+
+ atomic_set(&vfe40_ctrl->share_ctrl->vstate, 1);
+}
+
+static int vfe40_start_recording(
+ struct msm_cam_media_controller *pmctl,
+ struct vfe40_ctrl_type *vfe40_ctrl)
+{
+ msm_camio_bus_scale_cfg(
+ pmctl->sdata->pdata->cam_bus_scale_table, S_VIDEO);
+ vfe40_ctrl->recording_state = VFE_STATE_START_REQUESTED;
+ msm_camera_io_w_mb(1,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+ return 0;
+}
+
+static int vfe40_stop_recording(
+ struct msm_cam_media_controller *pmctl,
+ struct vfe40_ctrl_type *vfe40_ctrl)
+{
+ vfe40_ctrl->recording_state = VFE_STATE_STOP_REQUESTED;
+ msm_camera_io_w_mb(1,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+ msm_camio_bus_scale_cfg(
+ pmctl->sdata->pdata->cam_bus_scale_table, S_PREVIEW);
+ return 0;
+}
+
+static void vfe40_start_liveshot(
+ struct msm_cam_media_controller *pmctl,
+ struct vfe40_ctrl_type *vfe40_ctrl)
+{
+ /* Hardcode 1 live snapshot for now. */
+ vfe40_ctrl->share_ctrl->outpath.out0.capture_cnt = 1;
+ vfe40_ctrl->share_ctrl->vfe_capture_count =
+ vfe40_ctrl->share_ctrl->outpath.out0.capture_cnt;
+
+ vfe40_ctrl->share_ctrl->liveshot_state = VFE_STATE_START_REQUESTED;
+ msm_camera_io_w_mb(1, vfe40_ctrl->
+ share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+}
+
+static int vfe40_zsl(
+ struct msm_cam_media_controller *pmctl,
+ struct vfe40_ctrl_type *vfe40_ctrl)
+{
+ uint32_t irq_comp_mask = 0;
+ /* capture command is valid for both idle and active state. */
+ irq_comp_mask =
+ msm_camera_io_r(vfe40_ctrl->
+ share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+
+ CDBG("%s:op mode %d O/P Mode %d\n", __func__,
+ vfe40_ctrl->share_ctrl->operation_mode,
+ vfe40_ctrl->share_ctrl->outpath.output_mode);
+
+ if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+ VFE40_OUTPUT_MODE_PRIMARY) {
+ irq_comp_mask |= (
+ (0x1 << (vfe40_ctrl->share_ctrl->outpath.out0.ch0)) |
+ (0x1 << (vfe40_ctrl->share_ctrl->outpath.out0.ch1)));
+ } else if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+ VFE40_OUTPUT_MODE_PRIMARY_ALL_CHNLS) {
+ irq_comp_mask |= (
+ (0x1 << (vfe40_ctrl->share_ctrl->outpath.out0.ch0)) |
+ (0x1 << (vfe40_ctrl->share_ctrl->outpath.out0.ch1)) |
+ (0x1 << (vfe40_ctrl->share_ctrl->outpath.out0.ch2)));
+ }
+
+ if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+ VFE40_OUTPUT_MODE_SECONDARY) {
+ irq_comp_mask |= ((0x1 << (vfe40_ctrl->
+ share_ctrl->outpath.out1.ch0 + 8)) |
+ (0x1 << (vfe40_ctrl->
+ share_ctrl->outpath.out1.ch1 + 8)));
+ } else if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+ VFE40_OUTPUT_MODE_SECONDARY_ALL_CHNLS) {
+ irq_comp_mask |= (
+ (0x1 << (vfe40_ctrl->
+ share_ctrl->outpath.out1.ch0 + 8)) |
+ (0x1 << (vfe40_ctrl->
+ share_ctrl->outpath.out1.ch1 + 8)) |
+ (0x1 << (vfe40_ctrl->
+ share_ctrl->outpath.out1.ch2 + 8)));
+ }
+
+ if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+ VFE40_OUTPUT_MODE_PRIMARY) {
+ msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out0.ch0]);
+ msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out0.ch1]);
+ } else if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+ VFE40_OUTPUT_MODE_PRIMARY_ALL_CHNLS) {
+ msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out0.ch0]);
+ msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out0.ch1]);
+ msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out0.ch2]);
+ }
+
+ if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+ VFE40_OUTPUT_MODE_SECONDARY) {
+ msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out1.ch0]);
+ msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out1.ch1]);
+ } else if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+ VFE40_OUTPUT_MODE_SECONDARY_ALL_CHNLS) {
+ msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out1.ch0]);
+ msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out1.ch1]);
+ msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out1.ch2]);
+ }
+
+ msm_camera_io_w(irq_comp_mask,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+ vfe40_start_common(vfe40_ctrl);
+ msm_camio_bus_scale_cfg(
+ pmctl->sdata->pdata->cam_bus_scale_table, S_ZSL);
+
+ msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase + 0x18C);
+ msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase + 0x188);
+ return 0;
+}
+static int vfe40_capture_raw(
+ struct msm_cam_media_controller *pmctl,
+ struct vfe40_ctrl_type *vfe40_ctrl,
+ uint32_t num_frames_capture)
+{
+ uint32_t irq_comp_mask = 0;
+
+ vfe40_ctrl->share_ctrl->outpath.out0.capture_cnt = num_frames_capture;
+ vfe40_ctrl->share_ctrl->vfe_capture_count = num_frames_capture;
+
+ irq_comp_mask =
+ msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+
+ if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+ VFE40_OUTPUT_MODE_PRIMARY) {
+ irq_comp_mask |=
+ (0x1 << (vfe40_ctrl->share_ctrl->outpath.out0.ch0));
+ msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out0.ch0]);
+ }
+
+ msm_camera_io_w(irq_comp_mask,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+ msm_camio_bus_scale_cfg(
+ pmctl->sdata->pdata->cam_bus_scale_table, S_CAPTURE);
+ vfe40_start_common(vfe40_ctrl);
+ return 0;
+}
+
+static int vfe40_capture(
+ struct msm_cam_media_controller *pmctl,
+ uint32_t num_frames_capture,
+ struct vfe40_ctrl_type *vfe40_ctrl)
+{
+ uint32_t irq_comp_mask = 0;
+
+ /* capture command is valid for both idle and active state. */
+ vfe40_ctrl->share_ctrl->outpath.out1.capture_cnt = num_frames_capture;
+ if (vfe40_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_MAIN_AND_THUMB ||
+ vfe40_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_THUMB_AND_MAIN ||
+ vfe40_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_JPEG_AND_THUMB ||
+ vfe40_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_THUMB_AND_JPEG) {
+ vfe40_ctrl->share_ctrl->outpath.out0.capture_cnt =
+ num_frames_capture;
+ }
+
+ vfe40_ctrl->share_ctrl->vfe_capture_count = num_frames_capture;
+ irq_comp_mask = msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+
+ if (vfe40_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_MAIN_AND_THUMB ||
+ vfe40_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_JPEG_AND_THUMB ||
+ vfe40_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_THUMB_AND_MAIN) {
+ if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+ VFE40_OUTPUT_MODE_PRIMARY) {
+ irq_comp_mask |= (0x1 << vfe40_ctrl->
+ share_ctrl->outpath.out0.ch0 |
+ 0x1 << vfe40_ctrl->
+ share_ctrl->outpath.out0.ch1);
+ }
+ if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+ VFE40_OUTPUT_MODE_SECONDARY) {
+ irq_comp_mask |=
+ (0x1 << (vfe40_ctrl->
+ share_ctrl->outpath.out1.ch0 + 8) |
+ 0x1 << (vfe40_ctrl->
+ share_ctrl->outpath.out1.ch1 + 8));
+ }
+ if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+ VFE40_OUTPUT_MODE_PRIMARY) {
+ msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out0.ch0]);
+ msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out0.ch1]);
+ }
+ if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+ VFE40_OUTPUT_MODE_SECONDARY) {
+ msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out1.ch0]);
+ msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out1.ch1]);
+ }
+ }
+
+ vfe40_ctrl->share_ctrl->vfe_capture_count = num_frames_capture;
+
+ msm_camera_io_w(irq_comp_mask,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+ msm_camera_io_r(vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+ msm_camio_bus_scale_cfg(
+ pmctl->sdata->pdata->cam_bus_scale_table, S_CAPTURE);
+
+ vfe40_start_common(vfe40_ctrl);
+ /* for debug */
+ msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase + 0x18C);
+ msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase + 0x188);
+ return 0;
+}
+
+static int vfe40_start(
+ struct msm_cam_media_controller *pmctl,
+ struct vfe40_ctrl_type *vfe40_ctrl)
+{
+ uint32_t irq_comp_mask = 0;
+ irq_comp_mask =
+ msm_camera_io_r(vfe40_ctrl->share_ctrl->vfebase +
+ VFE_IRQ_COMP_MASK);
+
+ if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+ VFE40_OUTPUT_MODE_PRIMARY) {
+ irq_comp_mask |= (
+ 0x1 << vfe40_ctrl->share_ctrl->outpath.out0.ch0 |
+ 0x1 << vfe40_ctrl->share_ctrl->outpath.out0.ch1);
+ } else if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+ VFE40_OUTPUT_MODE_PRIMARY_ALL_CHNLS) {
+ irq_comp_mask |= (
+ 0x1 << vfe40_ctrl->share_ctrl->outpath.out0.ch0 |
+ 0x1 << vfe40_ctrl->share_ctrl->outpath.out0.ch1 |
+ 0x1 << vfe40_ctrl->share_ctrl->outpath.out0.ch2);
+ }
+ if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+ VFE40_OUTPUT_MODE_SECONDARY) {
+ irq_comp_mask |= (
+ 0x1 << (vfe40_ctrl->share_ctrl->outpath.out1.ch0 + 8) |
+ 0x1 << (vfe40_ctrl->share_ctrl->outpath.out1.ch1 + 8));
+ } else if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+ VFE40_OUTPUT_MODE_SECONDARY_ALL_CHNLS) {
+ irq_comp_mask |= (
+ 0x1 << (vfe40_ctrl->share_ctrl->outpath.out1.ch0 + 8) |
+ 0x1 << (vfe40_ctrl->share_ctrl->outpath.out1.ch1 + 8) |
+ 0x1 << (vfe40_ctrl->share_ctrl->outpath.out1.ch2 + 8));
+ }
+ msm_camera_io_w(irq_comp_mask,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
+
+ /*
+ msm_camio_bus_scale_cfg(
+ pmctl->sdata->pdata->cam_bus_scale_table, S_PREVIEW);*/
+ vfe40_start_common(vfe40_ctrl);
+ return 0;
+}
+
+static void vfe40_update(struct vfe40_ctrl_type *vfe40_ctrl)
+{
+ unsigned long flags;
+ uint32_t value = 0;
+ if (vfe40_ctrl->update_linear) {
+ if (!msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase +
+ V40_LINEARIZATION_OFF1))
+ msm_camera_io_w(1,
+ vfe40_ctrl->share_ctrl->vfebase +
+ V40_LINEARIZATION_OFF1);
+ else
+ msm_camera_io_w(0,
+ vfe40_ctrl->share_ctrl->vfebase +
+ V40_LINEARIZATION_OFF1);
+ vfe40_ctrl->update_linear = false;
+ }
+
+ if (vfe40_ctrl->update_la) {
+ if (!msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase + V40_LA_OFF))
+ msm_camera_io_w(1,
+ vfe40_ctrl->share_ctrl->vfebase + V40_LA_OFF);
+ else
+ msm_camera_io_w(0,
+ vfe40_ctrl->share_ctrl->vfebase + V40_LA_OFF);
+ vfe40_ctrl->update_la = false;
+ }
+
+ if (vfe40_ctrl->update_gamma) {
+ value = msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase + V40_RGB_G_OFF);
+ value ^= V40_GAMMA_LUT_BANK_SEL_MASK;
+ msm_camera_io_w(value,
+ vfe40_ctrl->share_ctrl->vfebase + V40_RGB_G_OFF);
+ vfe40_ctrl->update_gamma = false;
+ }
+
+ spin_lock_irqsave(&vfe40_ctrl->update_ack_lock, flags);
+ vfe40_ctrl->update_ack_pending = TRUE;
+ spin_unlock_irqrestore(&vfe40_ctrl->update_ack_lock, flags);
+ /* Ensure the write order while writing
+ to the command register using the barrier */
+ msm_camera_io_w_mb(1,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+ return;
+}
+
+static void vfe40_sync_timer_stop(struct vfe40_ctrl_type *vfe40_ctrl)
+{
+ uint32_t value = 0;
+ vfe40_ctrl->sync_timer_state = 0;
+ if (vfe40_ctrl->sync_timer_number == 0)
+ value = 0x10000;
+ else if (vfe40_ctrl->sync_timer_number == 1)
+ value = 0x20000;
+ else if (vfe40_ctrl->sync_timer_number == 2)
+ value = 0x40000;
+
+ /* Timer Stop */
+ msm_camera_io_w(value,
+ vfe40_ctrl->share_ctrl->vfebase + V40_SYNC_TIMER_OFF);
+}
+
+static void vfe40_sync_timer_start(
+ const uint32_t *tbl,
+ struct vfe40_ctrl_type *vfe40_ctrl)
+{
+ /* set bit 8 for auto increment. */
+ uint32_t value = 1;
+ uint32_t val;
+
+ vfe40_ctrl->sync_timer_state = *tbl++;
+ vfe40_ctrl->sync_timer_repeat_count = *tbl++;
+ vfe40_ctrl->sync_timer_number = *tbl++;
+ CDBG("%s timer_state %d, repeat_cnt %d timer number %d\n",
+ __func__, vfe40_ctrl->sync_timer_state,
+ vfe40_ctrl->sync_timer_repeat_count,
+ vfe40_ctrl->sync_timer_number);
+
+ if (vfe40_ctrl->sync_timer_state) { /* Start Timer */
+ value = value << vfe40_ctrl->sync_timer_number;
+ } else { /* Stop Timer */
+ CDBG("Failed to Start timer\n");
+ return;
+ }
+
+ /* Timer Start */
+ msm_camera_io_w(value,
+ vfe40_ctrl->share_ctrl->vfebase + V40_SYNC_TIMER_OFF);
+ /* Sync Timer Line Start */
+ value = *tbl++;
+ msm_camera_io_w(value,
+ vfe40_ctrl->share_ctrl->vfebase + V40_SYNC_TIMER_OFF +
+ 4 + ((vfe40_ctrl->sync_timer_number) * 12));
+ /* Sync Timer Pixel Start */
+ value = *tbl++;
+ msm_camera_io_w(value,
+ vfe40_ctrl->share_ctrl->vfebase + V40_SYNC_TIMER_OFF +
+ 8 + ((vfe40_ctrl->sync_timer_number) * 12));
+ /* Sync Timer Pixel Duration */
+ value = *tbl++;
+ val = vfe40_ctrl->share_ctrl->vfe_clk_rate / 10000;
+ val = 10000000 / val;
+ val = value * 10000 / val;
+ CDBG("%s: Pixel Clk Cycles!!! %d\n", __func__, val);
+ msm_camera_io_w(val,
+ vfe40_ctrl->share_ctrl->vfebase + V40_SYNC_TIMER_OFF +
+ 12 + ((vfe40_ctrl->sync_timer_number) * 12));
+ /* Timer0 Active High/LOW */
+ value = *tbl++;
+ msm_camera_io_w(value,
+ vfe40_ctrl->share_ctrl->vfebase + V40_SYNC_TIMER_POLARITY_OFF);
+ /* Selects sync timer 0 output to drive onto timer1 port */
+ value = 0;
+ msm_camera_io_w(value,
+ vfe40_ctrl->share_ctrl->vfebase + V40_TIMER_SELECT_OFF);
+}
+
+static void vfe40_program_dmi_cfg(
+ enum VFE40_DMI_RAM_SEL bankSel,
+ struct vfe40_ctrl_type *vfe40_ctrl)
+{
+ /* set bit 8 for auto increment. */
+ uint32_t value = VFE_DMI_CFG_DEFAULT;
+ value += (uint32_t)bankSel;
+ CDBG("%s: banksel = %d\n", __func__, bankSel);
+
+ msm_camera_io_w(value, vfe40_ctrl->share_ctrl->vfebase + VFE_DMI_CFG);
+ /* by default, always starts with offset 0.*/
+ msm_camera_io_w(0, vfe40_ctrl->share_ctrl->vfebase + VFE_DMI_ADDR);
+}
+static void vfe40_write_gamma_cfg(
+ enum VFE40_DMI_RAM_SEL channel_sel,
+ const uint32_t *tbl,
+ struct vfe40_ctrl_type *vfe40_ctrl)
+{
+ int i;
+ uint32_t value, value1, value2;
+ vfe40_program_dmi_cfg(channel_sel, vfe40_ctrl);
+ for (i = 0 ; i < (VFE40_GAMMA_NUM_ENTRIES/2) ; i++) {
+ value = *tbl++;
+ value1 = value & 0x0000FFFF;
+ value2 = (value & 0xFFFF0000)>>16;
+ msm_camera_io_w((value1),
+ vfe40_ctrl->share_ctrl->vfebase + VFE_DMI_DATA_LO);
+ msm_camera_io_w((value2),
+ vfe40_ctrl->share_ctrl->vfebase + VFE_DMI_DATA_LO);
+ }
+ vfe40_program_dmi_cfg(NO_MEM_SELECTED, vfe40_ctrl);
+}
+
+static void vfe40_read_gamma_cfg(
+ enum VFE40_DMI_RAM_SEL channel_sel,
+ uint32_t *tbl,
+ struct vfe40_ctrl_type *vfe40_ctrl)
+{
+ int i;
+ vfe40_program_dmi_cfg(channel_sel, vfe40_ctrl);
+ CDBG("%s: Gamma table channel: %d\n", __func__, channel_sel);
+ for (i = 0 ; i < VFE40_GAMMA_NUM_ENTRIES ; i++) {
+ *tbl = msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase + VFE_DMI_DATA_LO);
+ CDBG("%s: %08x\n", __func__, *tbl);
+ tbl++;
+ }
+ vfe40_program_dmi_cfg(NO_MEM_SELECTED, vfe40_ctrl);
+}
+
+static void vfe40_write_la_cfg(
+ enum VFE40_DMI_RAM_SEL channel_sel,
+ const uint32_t *tbl,
+ struct vfe40_ctrl_type *vfe40_ctrl)
+{
+ uint32_t i;
+ uint32_t value, value1, value2;
+
+ vfe40_program_dmi_cfg(channel_sel, vfe40_ctrl);
+ for (i = 0 ; i < (VFE40_LA_TABLE_LENGTH/2) ; i++) {
+ value = *tbl++;
+ value1 = value & 0x0000FFFF;
+ value2 = (value & 0xFFFF0000)>>16;
+ msm_camera_io_w((value1),
+ vfe40_ctrl->share_ctrl->vfebase + VFE_DMI_DATA_LO);
+ msm_camera_io_w((value2),
+ vfe40_ctrl->share_ctrl->vfebase + VFE_DMI_DATA_LO);
+ }
+ vfe40_program_dmi_cfg(NO_MEM_SELECTED, vfe40_ctrl);
+}
+
+struct vfe40_output_ch *vfe40_get_ch(
+ int path, struct vfe_share_ctrl_t *share_ctrl)
+{
+ struct vfe40_output_ch *ch = NULL;
+
+ if (path == VFE_MSG_OUTPUT_PRIMARY)
+ ch = &share_ctrl->outpath.out0;
+ else if (path == VFE_MSG_OUTPUT_SECONDARY)
+ ch = &share_ctrl->outpath.out1;
+ else
+ pr_err("%s: Invalid path %d\n", __func__,
+ path);
+
+ BUG_ON(ch == NULL);
+ return ch;
+}
+
+static int vfe40_configure_pingpong_buffers(
+ int id, int path, struct vfe40_ctrl_type *vfe40_ctrl)
+{
+ struct vfe40_output_ch *outch = NULL;
+ int rc = 0;
+ uint32_t image_mode = 0;
+ if (path == VFE_MSG_OUTPUT_PRIMARY)
+ image_mode = vfe40_ctrl->share_ctrl->outpath.out0.image_mode;
+ else
+ image_mode = vfe40_ctrl->share_ctrl->outpath.out1.image_mode;
+
+ vfe40_subdev_notify(id, path, image_mode,
+ &vfe40_ctrl->subdev, vfe40_ctrl->share_ctrl);
+ outch = vfe40_get_ch(path, vfe40_ctrl->share_ctrl);
+ if (outch->ping.ch_paddr[0] && outch->pong.ch_paddr[0]) {
+ /* Configure Preview Ping Pong */
+ CDBG("%s Configure ping/pong address for %d",
+ __func__, path);
+ vfe40_put_ch_ping_addr(
+ vfe40_ctrl->share_ctrl->vfebase, outch->ch0,
+ outch->ping.ch_paddr[0]);
+ vfe40_put_ch_pong_addr(
+ vfe40_ctrl->share_ctrl->vfebase, outch->ch0,
+ outch->pong.ch_paddr[0]);
+
+ if (vfe40_ctrl->share_ctrl->operation_mode !=
+ VFE_OUTPUTS_RAW) {
+ vfe40_put_ch_ping_addr(
+ vfe40_ctrl->share_ctrl->vfebase, outch->ch1,
+ outch->ping.ch_paddr[1]);
+ vfe40_put_ch_pong_addr(
+ vfe40_ctrl->share_ctrl->vfebase, outch->ch1,
+ outch->pong.ch_paddr[1]);
+ }
+
+ if (outch->ping.num_planes > 2)
+ vfe40_put_ch_ping_addr(
+ vfe40_ctrl->share_ctrl->vfebase, outch->ch2,
+ outch->ping.ch_paddr[2]);
+ if (outch->pong.num_planes > 2)
+ vfe40_put_ch_pong_addr(
+ vfe40_ctrl->share_ctrl->vfebase, outch->ch2,
+ outch->pong.ch_paddr[2]);
+
+ /* avoid stale info */
+ memset(&outch->ping, 0, sizeof(struct msm_free_buf));
+ memset(&outch->pong, 0, sizeof(struct msm_free_buf));
+ } else {
+ pr_err("%s ping/pong addr is null!!", __func__);
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+static void vfe40_write_linear_cfg(
+ enum VFE40_DMI_RAM_SEL channel_sel,
+ const uint32_t *tbl, struct vfe40_ctrl_type *vfe40_ctrl)
+{
+ uint32_t i;
+
+ vfe40_program_dmi_cfg(channel_sel, vfe40_ctrl);
+ /* for loop for configuring LUT. */
+ for (i = 0 ; i < VFE40_LINEARIZATON_TABLE_LENGTH ; i++) {
+ msm_camera_io_w(*tbl,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_DMI_DATA_LO);
+ tbl++;
+ }
+ CDBG("done writing to linearization table\n");
+ vfe40_program_dmi_cfg(NO_MEM_SELECTED, vfe40_ctrl);
+}
+
+void vfe40_send_isp_msg(
+ struct v4l2_subdev *sd,
+ uint32_t vfeFrameId,
+ uint32_t isp_msg_id)
+{
+ struct isp_msg_event isp_msg_evt;
+
+ isp_msg_evt.msg_id = isp_msg_id;
+ isp_msg_evt.sof_count = vfeFrameId;
+ v4l2_subdev_notify(sd,
+ NOTIFY_ISP_MSG_EVT,
+ (void *)&isp_msg_evt);
+}
+
+static int vfe40_proc_general(
+ struct msm_cam_media_controller *pmctl,
+ struct msm_isp_cmd *cmd,
+ struct vfe40_ctrl_type *vfe40_ctrl)
+{
+ int i , rc = 0;
+ uint32_t old_val = 0 , new_val = 0;
+ uint32_t *cmdp = NULL;
+ uint32_t *cmdp_local = NULL;
+ uint32_t snapshot_cnt = 0;
+ uint32_t temp1 = 0, temp2 = 0;
+
+ CDBG("vfe40_proc_general: cmdID = %s, length = %d\n",
+ vfe40_general_cmd[cmd->id], cmd->length);
+ switch (cmd->id) {
+ case VFE_CMD_RESET:
+ CDBG("vfe40_proc_general: cmdID = %s\n",
+ vfe40_general_cmd[cmd->id]);
+ vfe40_reset(vfe40_ctrl);
+ break;
+ case VFE_CMD_START:
+ CDBG("vfe40_proc_general: cmdID = %s\n",
+ vfe40_general_cmd[cmd->id]);
+ if ((vfe40_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_PREVIEW_AND_VIDEO) ||
+ (vfe40_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_PREVIEW))
+ /* Configure primary channel */
+ rc = vfe40_configure_pingpong_buffers(
+ VFE_MSG_START, VFE_MSG_OUTPUT_PRIMARY,
+ vfe40_ctrl);
+ else
+ /* Configure secondary channel */
+ rc = vfe40_configure_pingpong_buffers(
+ VFE_MSG_START, VFE_MSG_OUTPUT_SECONDARY,
+ vfe40_ctrl);
+ if (rc < 0) {
+ pr_err(
+ "%s error configuring pingpong buffers for preview",
+ __func__);
+ rc = -EINVAL;
+ goto proc_general_done;
+ }
+
+ rc = vfe40_start(pmctl, vfe40_ctrl);
+ break;
+ case VFE_CMD_UPDATE:
+ vfe40_update(vfe40_ctrl);
+ break;
+ case VFE_CMD_CAPTURE_RAW:
+ CDBG("%s: cmdID = VFE_CMD_CAPTURE_RAW\n", __func__);
+ if (copy_from_user(&snapshot_cnt, (void __user *)(cmd->value),
+ sizeof(uint32_t))) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ rc = vfe40_configure_pingpong_buffers(
+ VFE_MSG_CAPTURE, VFE_MSG_OUTPUT_PRIMARY,
+ vfe40_ctrl);
+ if (rc < 0) {
+ pr_err(
+ "%s error configuring pingpong buffers for snapshot",
+ __func__);
+ rc = -EINVAL;
+ goto proc_general_done;
+ }
+ rc = vfe40_capture_raw(pmctl, vfe40_ctrl, snapshot_cnt);
+ break;
+ case VFE_CMD_CAPTURE:
+ if (copy_from_user(&snapshot_cnt, (void __user *)(cmd->value),
+ sizeof(uint32_t))) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+
+ if (vfe40_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_JPEG_AND_THUMB ||
+ vfe40_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_THUMB_AND_JPEG) {
+ if (snapshot_cnt != 1) {
+ pr_err("only support 1 inline snapshot\n");
+ rc = -EINVAL;
+ goto proc_general_done;
+ }
+ /* Configure primary channel for JPEG */
+ rc = vfe40_configure_pingpong_buffers(
+ VFE_MSG_JPEG_CAPTURE,
+ VFE_MSG_OUTPUT_PRIMARY,
+ vfe40_ctrl);
+ } else {
+ /* Configure primary channel */
+ rc = vfe40_configure_pingpong_buffers(
+ VFE_MSG_CAPTURE,
+ VFE_MSG_OUTPUT_PRIMARY,
+ vfe40_ctrl);
+ }
+ if (rc < 0) {
+ pr_err(
+ "%s error configuring pingpong buffers for primary output",
+ __func__);
+ rc = -EINVAL;
+ goto proc_general_done;
+ }
+ /* Configure secondary channel */
+ rc = vfe40_configure_pingpong_buffers(
+ VFE_MSG_CAPTURE, VFE_MSG_OUTPUT_SECONDARY,
+ vfe40_ctrl);
+ if (rc < 0) {
+ pr_err(
+ "%s error configuring pingpong buffers for secondary output",
+ __func__);
+ rc = -EINVAL;
+ goto proc_general_done;
+ }
+ rc = vfe40_capture(pmctl, snapshot_cnt, vfe40_ctrl);
+ break;
+ case VFE_CMD_START_RECORDING:
+ CDBG("vfe40_proc_general: cmdID = %s\n",
+ vfe40_general_cmd[cmd->id]);
+ if (vfe40_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_PREVIEW_AND_VIDEO)
+ rc = vfe40_configure_pingpong_buffers(
+ VFE_MSG_START_RECORDING,
+ VFE_MSG_OUTPUT_SECONDARY,
+ vfe40_ctrl);
+ else if (vfe40_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_VIDEO_AND_PREVIEW)
+ rc = vfe40_configure_pingpong_buffers(
+ VFE_MSG_START_RECORDING,
+ VFE_MSG_OUTPUT_PRIMARY,
+ vfe40_ctrl);
+ if (rc < 0) {
+ pr_err(
+ "%s error configuring pingpong buffers for video\n",
+ __func__);
+ rc = -EINVAL;
+ goto proc_general_done;
+ }
+ rc = vfe40_start_recording(pmctl, vfe40_ctrl);
+ break;
+ case VFE_CMD_STOP_RECORDING:
+ CDBG("vfe40_proc_general: cmdID = %s\n",
+ vfe40_general_cmd[cmd->id]);
+ rc = vfe40_stop_recording(pmctl, vfe40_ctrl);
+ break;
+ case VFE_CMD_OPERATION_CFG: {
+ if (cmd->length != V40_OPERATION_CFG_LEN) {
+ rc = -EINVAL;
+ goto proc_general_done;
+ }
+ cmdp = kmalloc(V40_OPERATION_CFG_LEN, GFP_ATOMIC);
+ if (copy_from_user(cmdp,
+ (void __user *)(cmd->value),
+ V40_OPERATION_CFG_LEN)) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ rc = vfe40_operation_config(cmdp, vfe40_ctrl);
+ }
+ break;
+
+ case VFE_CMD_STATS_AE_START: {
+ rc = vfe_stats_aec_buf_init(vfe40_ctrl, NULL);
+ if (rc < 0) {
+ pr_err("%s: cannot config ping/pong address of AEC",
+ __func__);
+ goto proc_general_done;
+ }
+ cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ if (copy_from_user(cmdp,
+ (void __user *)(cmd->value),
+ cmd->length)) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ old_val = msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+ old_val |= BG_ENABLE_MASK;
+ msm_camera_io_w(old_val,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_cmd[cmd->id].offset,
+ cmdp, (vfe40_cmd[cmd->id].length));
+ }
+ break;
+ case VFE_CMD_STATS_AF_START: {
+ rc = vfe_stats_af_buf_init(vfe40_ctrl, NULL);
+ if (rc < 0) {
+ pr_err("%s: cannot config ping/pong address of AF",
+ __func__);
+ goto proc_general_done;
+ }
+ cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ if (copy_from_user(cmdp,
+ (void __user *)(cmd->value),
+ cmd->length)) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ old_val = msm_camera_io_r(vfe40_ctrl->share_ctrl->vfebase +
+ VFE_MODULE_CFG);
+ old_val |= BF_ENABLE_MASK;
+ msm_camera_io_w(old_val,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_cmd[cmd->id].offset,
+ cmdp, (vfe40_cmd[cmd->id].length));
+ }
+ break;
+
+ case VFE_CMD_STATS_AWB_START: {
+ rc = vfe_stats_awb_buf_init(vfe40_ctrl, NULL);
+ if (rc < 0) {
+ pr_err("%s: cannot config ping/pong address of AWB",
+ __func__);
+ goto proc_general_done;
+ }
+ cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ if (copy_from_user(cmdp,
+ (void __user *)(cmd->value),
+ cmd->length)) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ old_val = msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+ old_val |= AWB_ENABLE_MASK;
+ msm_camera_io_w(old_val,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_cmd[cmd->id].offset,
+ cmdp, (vfe40_cmd[cmd->id].length));
+ }
+ break;
+
+ case VFE_CMD_STATS_IHIST_START: {
+ rc = vfe_stats_ihist_buf_init(vfe40_ctrl, NULL);
+ if (rc < 0) {
+ pr_err("%s: cannot config ping/pong address of IHIST",
+ __func__);
+ goto proc_general_done;
+ }
+ cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ if (copy_from_user(cmdp,
+ (void __user *)(cmd->value),
+ cmd->length)) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ old_val = msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+ old_val |= IHIST_ENABLE_MASK;
+ msm_camera_io_w(old_val,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_cmd[cmd->id].offset,
+ cmdp, (vfe40_cmd[cmd->id].length));
+ }
+ break;
+
+
+ case VFE_CMD_STATS_RS_START: {
+ rc = vfe_stats_rs_buf_init(vfe40_ctrl, NULL);
+ if (rc < 0) {
+ pr_err("%s: cannot config ping/pong address of RS",
+ __func__);
+ goto proc_general_done;
+ }
+ cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ if (copy_from_user(cmdp,
+ (void __user *)(cmd->value),
+ cmd->length)) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_cmd[cmd->id].offset,
+ cmdp, (vfe40_cmd[cmd->id].length));
+ }
+ break;
+
+ case VFE_CMD_STATS_CS_START: {
+ rc = vfe_stats_cs_buf_init(vfe40_ctrl, NULL);
+ if (rc < 0) {
+ pr_err("%s: cannot config ping/pong address of CS",
+ __func__);
+ goto proc_general_done;
+ }
+ cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ if (copy_from_user(cmdp,
+ (void __user *)(cmd->value),
+ cmd->length)) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_cmd[cmd->id].offset,
+ cmdp, (vfe40_cmd[cmd->id].length));
+ }
+ break;
+
+ case VFE_CMD_MCE_UPDATE:
+ case VFE_CMD_MCE_CFG:{
+ cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+ /* Incrementing with 4 so as to point to the 2nd Register as
+ the 2nd register has the mce_enable bit */
+ old_val = msm_camera_io_r(vfe40_ctrl->share_ctrl->vfebase +
+ V40_CHROMA_SUP_OFF + 4);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ if (copy_from_user(cmdp,
+ (void __user *)(cmd->value),
+ cmd->length)) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ cmdp_local = cmdp;
+ new_val = *cmdp_local;
+ old_val &= MCE_EN_MASK;
+ new_val = new_val | old_val;
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase +
+ V40_CHROMA_SUP_OFF + 4, &new_val, 4);
+ cmdp_local += 1;
+
+ old_val = msm_camera_io_r(vfe40_ctrl->share_ctrl->vfebase +
+ V40_CHROMA_SUP_OFF + 8);
+ new_val = *cmdp_local;
+ old_val &= MCE_Q_K_MASK;
+ new_val = new_val | old_val;
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase +
+ V40_CHROMA_SUP_OFF + 8, &new_val, 4);
+ cmdp_local += 1;
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_cmd[cmd->id].offset,
+ cmdp_local, (vfe40_cmd[cmd->id].length));
+ }
+ break;
+ case VFE_CMD_CHROMA_SUP_UPDATE:
+ case VFE_CMD_CHROMA_SUP_CFG:{
+ cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ if (copy_from_user(cmdp,
+ (void __user *)(cmd->value),
+ cmd->length)) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ cmdp_local = cmdp;
+ msm_camera_io_memcpy(vfe40_ctrl->share_ctrl->vfebase +
+ V40_CHROMA_SUP_OFF, cmdp_local, 4);
+
+ cmdp_local += 1;
+ new_val = *cmdp_local;
+ /* Incrementing with 4 so as to point to the 2nd Register as
+ * the 2nd register has the mce_enable bit
+ */
+ old_val = msm_camera_io_r(vfe40_ctrl->share_ctrl->vfebase +
+ V40_CHROMA_SUP_OFF + 4);
+ old_val &= ~MCE_EN_MASK;
+ new_val = new_val | old_val;
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase +
+ V40_CHROMA_SUP_OFF + 4, &new_val, 4);
+ cmdp_local += 1;
+
+ old_val = msm_camera_io_r(vfe40_ctrl->share_ctrl->vfebase +
+ V40_CHROMA_SUP_OFF + 8);
+ new_val = *cmdp_local;
+ old_val &= ~MCE_Q_K_MASK;
+ new_val = new_val | old_val;
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase +
+ V40_CHROMA_SUP_OFF + 8, &new_val, 4);
+ }
+ break;
+ case VFE_CMD_BLACK_LEVEL_CFG:
+ rc = -EFAULT;
+ goto proc_general_done;
+
+ case VFE_CMD_LA_CFG:
+ cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ if (copy_from_user(cmdp,
+ (void __user *)(cmd->value),
+ cmd->length)) {
+
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ cmdp_local = cmdp;
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_cmd[cmd->id].offset,
+ cmdp_local, (vfe40_cmd[cmd->id].length));
+
+ cmdp_local += 1;
+ vfe40_write_la_cfg(LUMA_ADAPT_LUT_RAM_BANK0,
+ cmdp_local, vfe40_ctrl);
+ break;
+
+ case VFE_CMD_LA_UPDATE: {
+ cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ if (copy_from_user(cmdp,
+ (void __user *)(cmd->value),
+ cmd->length)) {
+
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+
+ cmdp_local = cmdp + 1;
+ old_val = msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase + V40_LA_OFF);
+ if (old_val != 0x0)
+ vfe40_write_la_cfg(LUMA_ADAPT_LUT_RAM_BANK0,
+ cmdp_local, vfe40_ctrl);
+ else
+ vfe40_write_la_cfg(LUMA_ADAPT_LUT_RAM_BANK1,
+ cmdp_local, vfe40_ctrl);
+ }
+ vfe40_ctrl->update_la = true;
+ break;
+
+ case VFE_CMD_GET_LA_TABLE:
+ temp1 = sizeof(uint32_t) * VFE40_LA_TABLE_LENGTH / 2;
+ if (cmd->length != temp1) {
+ rc = -EINVAL;
+ goto proc_general_done;
+ }
+ cmdp = kzalloc(temp1, GFP_KERNEL);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ cmdp_local = cmdp;
+ if (msm_camera_io_r(vfe40_ctrl->
+ share_ctrl->vfebase + V40_LA_OFF))
+ vfe40_program_dmi_cfg(LUMA_ADAPT_LUT_RAM_BANK1,
+ vfe40_ctrl);
+ else
+ vfe40_program_dmi_cfg(LUMA_ADAPT_LUT_RAM_BANK0,
+ vfe40_ctrl);
+ for (i = 0 ; i < (VFE40_LA_TABLE_LENGTH / 2) ; i++) {
+ *cmdp_local =
+ msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase +
+ VFE_DMI_DATA_LO);
+ *cmdp_local |= (msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase +
+ VFE_DMI_DATA_LO)) << 16;
+ cmdp_local++;
+ }
+ vfe40_program_dmi_cfg(NO_MEM_SELECTED, vfe40_ctrl);
+ if (copy_to_user((void __user *)(cmd->value), cmdp,
+ temp1)) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ break;
+ case VFE_CMD_SK_ENHAN_CFG:
+ case VFE_CMD_SK_ENHAN_UPDATE:{
+ cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ if (copy_from_user(cmdp,
+ (void __user *)(cmd->value),
+ cmd->length)) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase + V40_SCE_OFF,
+ cmdp, V40_SCE_LEN);
+ }
+ break;
+
+ case VFE_CMD_LIVESHOT:
+ /* Configure primary channel */
+ rc = vfe40_configure_pingpong_buffers(VFE_MSG_CAPTURE,
+ VFE_MSG_OUTPUT_PRIMARY, vfe40_ctrl);
+ if (rc < 0) {
+ pr_err(
+ "%s error configuring pingpong buffers for primary output\n",
+ __func__);
+ rc = -EINVAL;
+ goto proc_general_done;
+ }
+ vfe40_start_liveshot(pmctl, vfe40_ctrl);
+ break;
+
+ case VFE_CMD_LINEARIZATION_CFG:
+ cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ if (copy_from_user(cmdp, (void __user *)(cmd->value),
+ cmd->length)) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ cmdp_local = cmdp;
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase +
+ V40_LINEARIZATION_OFF1,
+ cmdp_local, V40_LINEARIZATION_LEN1);
+
+ cmdp_local = cmdp + 17;
+ vfe40_write_linear_cfg(BLACK_LUT_RAM_BANK0,
+ cmdp_local, vfe40_ctrl);
+ break;
+
+ case VFE_CMD_LINEARIZATION_UPDATE:
+ cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ if (copy_from_user(cmdp, (void __user *)(cmd->value),
+ cmd->length)) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ cmdp_local = cmdp;
+ cmdp_local++;
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase +
+ V40_LINEARIZATION_OFF1 + 4,
+ cmdp_local, (V40_LINEARIZATION_LEN1 - 4));
+ cmdp_local = cmdp + 17;
+ /*extracting the bank select*/
+ old_val = msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase +
+ V40_LINEARIZATION_OFF1);
+
+ if (old_val != 0x0)
+ vfe40_write_linear_cfg(BLACK_LUT_RAM_BANK0,
+ cmdp_local, vfe40_ctrl);
+ else
+ vfe40_write_linear_cfg(BLACK_LUT_RAM_BANK1,
+ cmdp_local, vfe40_ctrl);
+ vfe40_ctrl->update_linear = true;
+ break;
+
+ case VFE_CMD_GET_LINEARIZATON_TABLE:
+ temp1 = sizeof(uint32_t) * VFE40_LINEARIZATON_TABLE_LENGTH;
+ if (cmd->length != temp1) {
+ rc = -EINVAL;
+ goto proc_general_done;
+ }
+ cmdp = kzalloc(temp1, GFP_KERNEL);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ cmdp_local = cmdp;
+ if (msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase +
+ V40_LINEARIZATION_OFF1))
+ vfe40_program_dmi_cfg(BLACK_LUT_RAM_BANK1, vfe40_ctrl);
+ else
+ vfe40_program_dmi_cfg(BLACK_LUT_RAM_BANK0, vfe40_ctrl);
+ CDBG("%s: Linearization Table\n", __func__);
+ for (i = 0 ; i < VFE40_LINEARIZATON_TABLE_LENGTH ; i++) {
+ *cmdp_local = msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase +
+ VFE_DMI_DATA_LO);
+ CDBG("%s: %08x\n", __func__, *cmdp_local);
+ cmdp_local++;
+ }
+ vfe40_program_dmi_cfg(NO_MEM_SELECTED, vfe40_ctrl);
+ if (copy_to_user((void __user *)(cmd->value), cmdp,
+ temp1)) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ break;
+ case VFE_CMD_DEMOSAICV3:
+ if (cmd->length !=
+ V40_DEMOSAICV3_0_LEN+V40_DEMOSAICV3_1_LEN) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ if (copy_from_user(cmdp,
+ (void __user *)(cmd->value),
+ cmd->length)) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ cmdp_local = cmdp;
+ new_val = *cmdp_local;
+
+ old_val = msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_0_OFF);
+ old_val &= DEMOSAIC_MASK;
+ new_val = new_val | old_val;
+ *cmdp_local = new_val;
+
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_0_OFF,
+ cmdp_local, V40_DEMOSAICV3_0_LEN);
+ cmdp_local += 1;
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_1_OFF,
+ cmdp_local, V40_DEMOSAICV3_1_LEN);
+ break;
+
+ case VFE_CMD_DEMOSAICV3_UPDATE:
+ if (cmd->length !=
+ V40_DEMOSAICV3_0_LEN * V40_DEMOSAICV3_UP_REG_CNT) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ if (copy_from_user(cmdp,
+ (void __user *)(cmd->value),
+ cmd->length)) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ cmdp_local = cmdp;
+ new_val = *cmdp_local;
+
+ old_val = msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_0_OFF);
+ old_val &= DEMOSAIC_MASK;
+ new_val = new_val | old_val;
+ *cmdp_local = new_val;
+
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_0_OFF,
+ cmdp_local, V40_DEMOSAICV3_0_LEN);
+ /* As the address space is not contiguous increment by 2
+ * before copying to next address space */
+ cmdp_local += 1;
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_1_OFF,
+ cmdp_local, 2 * V40_DEMOSAICV3_0_LEN);
+ /* As the address space is not contiguous increment by 2
+ * before copying to next address space */
+ cmdp_local += 2;
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_2_OFF,
+ cmdp_local, 2 * V40_DEMOSAICV3_0_LEN);
+ break;
+
+ case VFE_CMD_DEMOSAICV3_ABCC_CFG:
+ rc = -EFAULT;
+ break;
+
+ case VFE_CMD_DEMOSAICV3_ABF_UPDATE:/* 116 ABF update */
+ case VFE_CMD_DEMOSAICV3_ABF_CFG: { /* 108 ABF config */
+ cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ if (copy_from_user(cmdp,
+ (void __user *)(cmd->value),
+ cmd->length)) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ cmdp_local = cmdp;
+ new_val = *cmdp_local;
+
+ old_val = msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_0_OFF);
+ old_val &= ABF_MASK;
+ new_val = new_val | old_val;
+ *cmdp_local = new_val;
+
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_0_OFF,
+ cmdp_local, 4);
+
+ cmdp_local += 1;
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_cmd[cmd->id].offset,
+ cmdp_local, (vfe40_cmd[cmd->id].length));
+ }
+ break;
+
+ case VFE_CMD_DEMOSAICV3_DBCC_CFG:
+ case VFE_CMD_DEMOSAICV3_DBCC_UPDATE:
+ cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ if (copy_from_user(cmdp,
+ (void __user *)(cmd->value),
+ cmd->length)) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ cmdp_local = cmdp;
+ new_val = *cmdp_local;
+
+ old_val = msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_0_OFF);
+ old_val &= DBCC_MASK;
+
+ new_val = new_val | old_val;
+ *cmdp_local = new_val;
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_0_OFF,
+ cmdp_local, 4);
+ cmdp_local += 1;
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_cmd[cmd->id].offset,
+ cmdp_local, (vfe40_cmd[cmd->id].length));
+ break;
+
+ case VFE_CMD_DEMOSAICV3_DBPC_CFG:
+ case VFE_CMD_DEMOSAICV3_DBPC_UPDATE:
+ cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ if (copy_from_user(cmdp,
+ (void __user *)(cmd->value),
+ cmd->length)) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ cmdp_local = cmdp;
+ new_val = *cmdp_local;
+
+ old_val = msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase + V40_DEMOSAICV3_0_OFF);
+ old_val &= DBPC_MASK;
+
+ new_val = new_val | old_val;
+ *cmdp_local = new_val;
+ msm_camera_io_memcpy(vfe40_ctrl->share_ctrl->vfebase +
+ V40_DEMOSAICV3_0_OFF,
+ cmdp_local, V40_DEMOSAICV3_0_LEN);
+ cmdp_local += 1;
+ msm_camera_io_memcpy(vfe40_ctrl->share_ctrl->vfebase +
+ V40_DEMOSAICV3_DBPC_CFG_OFF,
+ cmdp_local, V40_DEMOSAICV3_DBPC_LEN);
+ cmdp_local += 1;
+ msm_camera_io_memcpy(vfe40_ctrl->share_ctrl->vfebase +
+ V40_DEMOSAICV3_DBPC_CFG_OFF0,
+ cmdp_local, V40_DEMOSAICV3_DBPC_LEN);
+ cmdp_local += 1;
+ msm_camera_io_memcpy(vfe40_ctrl->share_ctrl->vfebase +
+ V40_DEMOSAICV3_DBPC_CFG_OFF1,
+ cmdp_local, V40_DEMOSAICV3_DBPC_LEN);
+ cmdp_local += 1;
+ msm_camera_io_memcpy(vfe40_ctrl->share_ctrl->vfebase +
+ V40_DEMOSAICV3_DBPC_CFG_OFF2,
+ cmdp_local, V40_DEMOSAICV3_DBPC_LEN);
+ break;
+
+ case VFE_CMD_RGB_G_CFG: {
+ cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ if (copy_from_user(cmdp,
+ (void __user *)(cmd->value),
+ cmd->length)) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase + V40_RGB_G_OFF,
+ cmdp, 4);
+ cmdp += 1;
+
+ vfe40_write_gamma_cfg(RGBLUT_RAM_CH0_BANK0, cmdp, vfe40_ctrl);
+ vfe40_write_gamma_cfg(RGBLUT_RAM_CH1_BANK0, cmdp, vfe40_ctrl);
+ vfe40_write_gamma_cfg(RGBLUT_RAM_CH2_BANK0, cmdp, vfe40_ctrl);
+ }
+ cmdp -= 1;
+ break;
+
+ case VFE_CMD_RGB_G_UPDATE: {
+ cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ if (copy_from_user(cmdp, (void __user *)(cmd->value),
+ cmd->length)) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+
+ old_val = msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase + V40_RGB_G_OFF);
+ cmdp += 1;
+ if (old_val != 0x0) {
+ vfe40_write_gamma_cfg(
+ RGBLUT_RAM_CH0_BANK0, cmdp, vfe40_ctrl);
+ vfe40_write_gamma_cfg(
+ RGBLUT_RAM_CH1_BANK0, cmdp, vfe40_ctrl);
+ vfe40_write_gamma_cfg(
+ RGBLUT_RAM_CH2_BANK0, cmdp, vfe40_ctrl);
+ } else {
+ vfe40_write_gamma_cfg(
+ RGBLUT_RAM_CH0_BANK1, cmdp, vfe40_ctrl);
+ vfe40_write_gamma_cfg(
+ RGBLUT_RAM_CH1_BANK1, cmdp, vfe40_ctrl);
+ vfe40_write_gamma_cfg(
+ RGBLUT_RAM_CH2_BANK1, cmdp, vfe40_ctrl);
+ }
+ }
+ vfe40_ctrl->update_gamma = TRUE;
+ cmdp -= 1;
+ break;
+
+ case VFE_CMD_GET_RGB_G_TABLE:
+ temp1 = sizeof(uint32_t) * VFE40_GAMMA_NUM_ENTRIES * 3;
+ if (cmd->length != temp1) {
+ rc = -EINVAL;
+ goto proc_general_done;
+ }
+ cmdp = kzalloc(temp1, GFP_KERNEL);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ cmdp_local = cmdp;
+
+ old_val = msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase + V40_RGB_G_OFF);
+ temp2 = old_val ? RGBLUT_RAM_CH0_BANK1 :
+ RGBLUT_RAM_CH0_BANK0;
+ for (i = 0; i < 3; i++) {
+ vfe40_read_gamma_cfg(temp2,
+ cmdp_local + (VFE40_GAMMA_NUM_ENTRIES * i),
+ vfe40_ctrl);
+ temp2 += 2;
+ }
+ if (copy_to_user((void __user *)(cmd->value), cmdp,
+ temp1)) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ break;
+
+ case VFE_CMD_STATS_AWB_STOP: {
+ old_val = msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+ old_val &= ~AWB_ENABLE_MASK;
+ msm_camera_io_w(old_val,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+ }
+ break;
+
+ case VFE_CMD_STATS_AE_STOP: {
+ old_val = msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+ old_val &= ~BG_ENABLE_MASK;
+ msm_camera_io_w(old_val,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+ }
+ break;
+ case VFE_CMD_STATS_AF_STOP: {
+ old_val = msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+ old_val &= ~BF_ENABLE_MASK;
+ msm_camera_io_w(old_val,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+ rc = vfe40_stats_flush_enqueue(vfe40_ctrl, MSM_STATS_TYPE_AF);
+ if (rc < 0) {
+ pr_err("%s: dq stats buf err = %d",
+ __func__, rc);
+ return -EINVAL;
+ }
+ }
+ break;
+
+ case VFE_CMD_STATS_IHIST_STOP: {
+ old_val = msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+ old_val &= ~IHIST_ENABLE_MASK;
+ msm_camera_io_w(old_val,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+ }
+ break;
+
+ case VFE_CMD_STATS_RS_STOP: {
+ old_val = msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+ old_val &= ~RS_ENABLE_MASK;
+ msm_camera_io_w(old_val,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+ }
+ break;
+
+ case VFE_CMD_STATS_CS_STOP: {
+ old_val = msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+ old_val &= ~CS_ENABLE_MASK;
+ msm_camera_io_w(old_val,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+ }
+ break;
+ case VFE_CMD_STOP:
+ CDBG("vfe40_proc_general: cmdID = %s\n",
+ vfe40_general_cmd[cmd->id]);
+ vfe40_stop(vfe40_ctrl);
+ break;
+
+ case VFE_CMD_SYNC_TIMER_SETTING:
+ cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ if (copy_from_user(cmdp, (void __user *)(cmd->value),
+ cmd->length)) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ vfe40_sync_timer_start(cmdp, vfe40_ctrl);
+ break;
+
+ case VFE_CMD_MODULE_CFG: {
+ cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ if (copy_from_user(cmdp,
+ (void __user *)(cmd->value),
+ cmd->length)) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ *cmdp &= ~STATS_ENABLE_MASK;
+ old_val = msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+ old_val &= STATS_ENABLE_MASK;
+ *cmdp |= old_val;
+
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_cmd[cmd->id].offset,
+ cmdp, (vfe40_cmd[cmd->id].length));
+ }
+ break;
+
+ case VFE_CMD_ZSL:
+ rc = vfe40_configure_pingpong_buffers(VFE_MSG_START,
+ VFE_MSG_OUTPUT_PRIMARY, vfe40_ctrl);
+ if (rc < 0)
+ goto proc_general_done;
+ rc = vfe40_configure_pingpong_buffers(VFE_MSG_START,
+ VFE_MSG_OUTPUT_SECONDARY, vfe40_ctrl);
+ if (rc < 0)
+ goto proc_general_done;
+
+ rc = vfe40_zsl(pmctl, vfe40_ctrl);
+ break;
+
+ case VFE_CMD_ASF_CFG:
+ case VFE_CMD_ASF_UPDATE:
+ cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ if (copy_from_user(cmdp, (void __user *)(cmd->value),
+ cmd->length)) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_cmd[cmd->id].offset,
+ cmdp, (vfe40_cmd[cmd->id].length));
+ cmdp_local = cmdp + V40_ASF_LEN/4;
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase +
+ V40_ASF_SPECIAL_EFX_CFG_OFF,
+ cmdp_local, V40_ASF_SPECIAL_EFX_CFG_LEN);
+ break;
+
+ case VFE_CMD_GET_HW_VERSION:
+ if (cmd->length != V40_GET_HW_VERSION_LEN) {
+ rc = -EINVAL;
+ goto proc_general_done;
+ }
+ cmdp = kmalloc(V40_GET_HW_VERSION_LEN, GFP_ATOMIC);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ *cmdp = msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase+V40_GET_HW_VERSION_OFF);
+ if (copy_to_user((void __user *)(cmd->value), cmdp,
+ V40_GET_HW_VERSION_LEN)) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ break;
+ case VFE_CMD_GET_REG_DUMP:
+ temp1 = sizeof(uint32_t) *
+ vfe40_ctrl->share_ctrl->register_total;
+ if (cmd->length != temp1) {
+ rc = -EINVAL;
+ goto proc_general_done;
+ }
+ cmdp = kmalloc(temp1, GFP_ATOMIC);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+ msm_camera_io_dump(vfe40_ctrl->share_ctrl->vfebase,
+ vfe40_ctrl->share_ctrl->register_total*4);
+ CDBG("%s: %p %p %d\n", __func__, (void *)cmdp,
+ vfe40_ctrl->share_ctrl->vfebase, temp1);
+ memcpy_fromio((void *)cmdp,
+ vfe40_ctrl->share_ctrl->vfebase, temp1);
+ if (copy_to_user((void __user *)(cmd->value), cmdp, temp1)) {
+ rc = -EFAULT;
+ goto proc_general_done;
+ }
+ break;
+ case VFE_CMD_FRAME_SKIP_CFG:
+ if (cmd->length != vfe40_cmd[cmd->id].length)
+ return -EINVAL;
+
+ cmdp = kmalloc(vfe40_cmd[cmd->id].length, GFP_ATOMIC);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+
+ if (copy_from_user((cmdp), (void __user *)cmd->value,
+ cmd->length)) {
+ rc = -EFAULT;
+ pr_err("%s copy from user failed for cmd %d",
+ __func__, cmd->id);
+ break;
+ }
+
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_cmd[cmd->id].offset,
+ cmdp, (vfe40_cmd[cmd->id].length));
+ vfe40_ctrl->frame_skip_cnt = ((uint32_t)
+ *cmdp & VFE_FRAME_SKIP_PERIOD_MASK) + 1;
+ vfe40_ctrl->frame_skip_pattern = (uint32_t)(*(cmdp + 2));
+ break;
+ default:
+ if (cmd->length != vfe40_cmd[cmd->id].length)
+ return -EINVAL;
+
+ cmdp = kmalloc(vfe40_cmd[cmd->id].length, GFP_ATOMIC);
+ if (!cmdp) {
+ rc = -ENOMEM;
+ goto proc_general_done;
+ }
+
+ if (copy_from_user((cmdp), (void __user *)cmd->value,
+ cmd->length)) {
+ rc = -EFAULT;
+ pr_err("%s copy from user failed for cmd %d",
+ __func__, cmd->id);
+ goto proc_general_done;
+ }
+ msm_camera_io_memcpy(
+ vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_cmd[cmd->id].offset,
+ cmdp, (vfe40_cmd[cmd->id].length));
+ break;
+
+ }
+
+proc_general_done:
+ kfree(cmdp);
+
+ return rc;
+}
+
+static inline void vfe40_read_irq_status(
+ struct axi_ctrl_t *axi_ctrl, struct vfe40_irq_status *out)
+{
+ uint32_t *temp;
+ memset(out, 0, sizeof(struct vfe40_irq_status));
+ temp = (uint32_t *)(axi_ctrl->share_ctrl->vfebase + VFE_IRQ_STATUS_0);
+ out->vfeIrqStatus0 = msm_camera_io_r(temp);
+
+ temp = (uint32_t *)(axi_ctrl->share_ctrl->vfebase + VFE_IRQ_STATUS_1);
+ out->vfeIrqStatus1 = msm_camera_io_r(temp);
+
+ temp = (uint32_t *)(axi_ctrl->share_ctrl->vfebase + VFE_CAMIF_STATUS);
+ out->camifStatus = msm_camera_io_r(temp);
+ CDBG("camifStatus = 0x%x\n", out->camifStatus);
+
+ /* clear the pending interrupt of the same kind.*/
+ msm_camera_io_w(out->vfeIrqStatus0,
+ axi_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_0);
+ msm_camera_io_w(out->vfeIrqStatus1,
+ axi_ctrl->share_ctrl->vfebase + VFE_IRQ_CLEAR_1);
+
+ /* Ensure the write order while writing
+ to the command register using the barrier */
+ msm_camera_io_w_mb(1, axi_ctrl->share_ctrl->vfebase + VFE_IRQ_CMD);
+
+}
+
+static void vfe40_process_reg_update_irq(
+ struct vfe40_ctrl_type *vfe40_ctrl)
+{
+ unsigned long flags;
+
+ if (vfe40_ctrl->recording_state == VFE_STATE_START_REQUESTED) {
+ if (vfe40_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_VIDEO_AND_PREVIEW) {
+ msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out0.ch0]);
+ msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out0.ch1]);
+ } else if (vfe40_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_PREVIEW_AND_VIDEO) {
+ msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out1.ch0]);
+ msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out1.ch1]);
+ }
+ vfe40_ctrl->recording_state = VFE_STATE_STARTED;
+ msm_camera_io_w_mb(1,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+ CDBG("start video triggered .\n");
+ } else if (vfe40_ctrl->recording_state ==
+ VFE_STATE_STOP_REQUESTED) {
+ if (vfe40_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_VIDEO_AND_PREVIEW) {
+ msm_camera_io_w(0, vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out0.ch0]);
+ msm_camera_io_w(0, vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out0.ch1]);
+ } else if (vfe40_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_PREVIEW_AND_VIDEO) {
+ msm_camera_io_w(0, vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out1.ch0]);
+ msm_camera_io_w(0, vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out1.ch1]);
+ }
+ CDBG("stop video triggered .\n");
+ }
+
+ if (vfe40_ctrl->start_ack_pending == TRUE) {
+ vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+ vfe40_ctrl->share_ctrl->vfeFrameId, MSG_ID_START_ACK);
+ vfe40_ctrl->start_ack_pending = FALSE;
+ } else {
+ if (vfe40_ctrl->recording_state ==
+ VFE_STATE_STOP_REQUESTED) {
+ vfe40_ctrl->recording_state = VFE_STATE_STOPPED;
+ /* request a reg update and send STOP_REC_ACK
+ * when we process the next reg update irq.
+ */
+ msm_camera_io_w_mb(1,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+ } else if (vfe40_ctrl->recording_state ==
+ VFE_STATE_STOPPED) {
+ vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+ vfe40_ctrl->share_ctrl->vfeFrameId,
+ MSG_ID_STOP_REC_ACK);
+ vfe40_ctrl->recording_state = VFE_STATE_IDLE;
+ }
+ spin_lock_irqsave(&vfe40_ctrl->update_ack_lock, flags);
+ if (vfe40_ctrl->update_ack_pending == TRUE) {
+ vfe40_ctrl->update_ack_pending = FALSE;
+ spin_unlock_irqrestore(
+ &vfe40_ctrl->update_ack_lock, flags);
+ vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+ vfe40_ctrl->share_ctrl->vfeFrameId,
+ MSG_ID_UPDATE_ACK);
+ } else {
+ spin_unlock_irqrestore(
+ &vfe40_ctrl->update_ack_lock, flags);
+ }
+ }
+
+ if (vfe40_ctrl->share_ctrl->liveshot_state ==
+ VFE_STATE_START_REQUESTED) {
+ CDBG("%s enabling liveshot output\n", __func__);
+ if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+ VFE40_OUTPUT_MODE_PRIMARY) {
+ msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out0.ch0]);
+ msm_camera_io_w(1, vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out0.ch1]);
+ vfe40_ctrl->share_ctrl->liveshot_state =
+ VFE_STATE_STARTED;
+ }
+ }
+
+ if (vfe40_ctrl->share_ctrl->liveshot_state == VFE_STATE_STARTED) {
+ vfe40_ctrl->share_ctrl->vfe_capture_count--;
+ if (!vfe40_ctrl->share_ctrl->vfe_capture_count)
+ vfe40_ctrl->share_ctrl->liveshot_state =
+ VFE_STATE_STOP_REQUESTED;
+ msm_camera_io_w_mb(1, vfe40_ctrl->
+ share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+ } else if (vfe40_ctrl->share_ctrl->liveshot_state ==
+ VFE_STATE_STOP_REQUESTED) {
+ CDBG("%s: disabling liveshot output\n", __func__);
+ if (vfe40_ctrl->share_ctrl->outpath.output_mode &
+ VFE40_OUTPUT_MODE_PRIMARY) {
+ msm_camera_io_w(0, vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out0.ch0]);
+ msm_camera_io_w(0, vfe40_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out0.ch1]);
+ vfe40_ctrl->share_ctrl->liveshot_state =
+ VFE_STATE_STOPPED;
+ msm_camera_io_w_mb(1, vfe40_ctrl->share_ctrl->vfebase +
+ VFE_REG_UPDATE_CMD);
+ }
+ } else if (vfe40_ctrl->share_ctrl->liveshot_state ==
+ VFE_STATE_STOPPED) {
+ vfe40_ctrl->share_ctrl->liveshot_state = VFE_STATE_IDLE;
+ }
+
+ if ((vfe40_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_THUMB_AND_MAIN) ||
+ (vfe40_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_MAIN_AND_THUMB) ||
+ (vfe40_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_THUMB_AND_JPEG) ||
+ (vfe40_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_JPEG_AND_THUMB)) {
+ /* in snapshot mode */
+ /* later we need to add check for live snapshot mode. */
+ if (vfe40_ctrl->frame_skip_pattern & (0x1 <<
+ (vfe40_ctrl->snapshot_frame_cnt %
+ vfe40_ctrl->frame_skip_cnt))) {
+ vfe40_ctrl->share_ctrl->vfe_capture_count--;
+ /* if last frame to be captured: */
+ if (vfe40_ctrl->share_ctrl->vfe_capture_count == 0) {
+ /* stop the bus output:write master enable = 0*/
+ if (vfe40_ctrl->share_ctrl->outpath.output_mode
+ & VFE40_OUTPUT_MODE_PRIMARY) {
+ msm_camera_io_w(0,
+ vfe40_ctrl->share_ctrl->vfebase+
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out0.ch0]);
+ msm_camera_io_w(0,
+ vfe40_ctrl->share_ctrl->vfebase+
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out0.ch1]);
+ }
+ if (vfe40_ctrl->share_ctrl->outpath.output_mode&
+ VFE40_OUTPUT_MODE_SECONDARY) {
+ msm_camera_io_w(0,
+ vfe40_ctrl->share_ctrl->vfebase+
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out1.ch0]);
+ msm_camera_io_w(0,
+ vfe40_ctrl->share_ctrl->vfebase+
+ vfe40_AXI_WM_CFG[vfe40_ctrl->
+ share_ctrl->outpath.out1.ch1]);
+ }
+ msm_camera_io_w_mb
+ (CAMIF_COMMAND_STOP_AT_FRAME_BOUNDARY,
+ vfe40_ctrl->share_ctrl->vfebase +
+ VFE_CAMIF_COMMAND);
+ vfe40_ctrl->snapshot_frame_cnt = -1;
+ vfe40_ctrl->frame_skip_cnt = 31;
+ vfe40_ctrl->frame_skip_pattern = 0xffffffff;
+ } /*if snapshot count is 0*/
+ } /*if frame is not being dropped*/
+ vfe40_ctrl->snapshot_frame_cnt++;
+ /* then do reg_update. */
+ msm_camera_io_w(1,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
+ } /* if snapshot mode. */
+}
+
+static void vfe40_set_default_reg_values(
+ struct vfe40_ctrl_type *vfe40_ctrl)
+{
+ msm_camera_io_w(0x800080,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_DEMUX_GAIN_0);
+ msm_camera_io_w(0x800080,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_DEMUX_GAIN_1);
+ /* What value should we program CGC_OVERRIDE to? */
+ msm_camera_io_w(0xFFFFF,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_CGC_OVERRIDE);
+
+ /* default frame drop period and pattern */
+ msm_camera_io_w(0, vfe40_ctrl->share_ctrl->vfebase + VFE_CLAMP_ENC_MIN);
+ msm_camera_io_w(0xFFFFFF,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_CLAMP_ENC_MAX);
+ msm_camera_io_w(0,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_CLAMP_VIEW_MIN);
+ msm_camera_io_w(0xFFFFFF,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_CLAMP_VIEW_MAX);
+
+ /* stats UB config */
+ msm_camera_io_w(0x3980007,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_AEC_UB_CFG);
+ msm_camera_io_w(0x3A00007,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_AF_UB_CFG);
+ msm_camera_io_w(0x3A8000F,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_AWB_UB_CFG);
+ msm_camera_io_w(0x3B80007,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_RS_UB_CFG);
+ msm_camera_io_w(0x3C0001F,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_CS_UB_CFG);
+ msm_camera_io_w(0x3E0001F,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_HIST_UB_CFG);
+}
+
+static void vfe40_process_reset_irq(
+ struct vfe40_ctrl_type *vfe40_ctrl)
+{
+ unsigned long flags;
+
+ atomic_set(&vfe40_ctrl->share_ctrl->vstate, 0);
+
+ spin_lock_irqsave(&vfe40_ctrl->share_ctrl->stop_flag_lock, flags);
+ if (vfe40_ctrl->share_ctrl->stop_ack_pending) {
+ vfe40_ctrl->share_ctrl->stop_ack_pending = FALSE;
+ spin_unlock_irqrestore(
+ &vfe40_ctrl->share_ctrl->stop_flag_lock, flags);
+ vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+ vfe40_ctrl->share_ctrl->vfeFrameId, MSG_ID_STOP_ACK);
+ } else {
+ spin_unlock_irqrestore(
+ &vfe40_ctrl->share_ctrl->stop_flag_lock, flags);
+ /* this is from reset command. */
+ vfe40_set_default_reg_values(vfe40_ctrl);
+
+ /* reload all write masters. (frame & line)*/
+ msm_camera_io_w(0x7FFF,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_BUS_CMD);
+ vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+ vfe40_ctrl->share_ctrl->vfeFrameId, MSG_ID_RESET_ACK);
+ }
+}
+
+static void vfe40_process_camif_sof_irq(
+ struct vfe40_ctrl_type *vfe40_ctrl)
+{
+ if (vfe40_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_RAW) {
+ if (vfe40_ctrl->start_ack_pending) {
+ vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+ vfe40_ctrl->share_ctrl->vfeFrameId,
+ MSG_ID_START_ACK);
+ vfe40_ctrl->start_ack_pending = FALSE;
+ }
+ vfe40_ctrl->share_ctrl->vfe_capture_count--;
+ /* if last frame to be captured: */
+ if (vfe40_ctrl->share_ctrl->vfe_capture_count == 0) {
+ /* Ensure the write order while writing
+ to the command register using the barrier */
+ msm_camera_io_w_mb(CAMIF_COMMAND_STOP_AT_FRAME_BOUNDARY,
+ vfe40_ctrl->share_ctrl->vfebase + VFE_CAMIF_COMMAND);
+ }
+ } /* if raw snapshot mode. */
+ if ((vfe40_ctrl->hfr_mode != HFR_MODE_OFF) &&
+ (vfe40_ctrl->share_ctrl->operation_mode ==
+ VFE_MODE_OF_OPERATION_VIDEO) &&
+ (vfe40_ctrl->share_ctrl->vfeFrameId %
+ vfe40_ctrl->hfr_mode != 0)) {
+ vfe40_ctrl->share_ctrl->vfeFrameId++;
+ CDBG("Skip the SOF notification when HFR enabled\n");
+ return;
+ }
+ vfe40_ctrl->share_ctrl->vfeFrameId++;
+ vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+ vfe40_ctrl->share_ctrl->vfeFrameId, MSG_ID_SOF_ACK);
+ CDBG("camif_sof_irq, frameId = %d\n",
+ vfe40_ctrl->share_ctrl->vfeFrameId);
+
+ if (vfe40_ctrl->sync_timer_state) {
+ if (vfe40_ctrl->sync_timer_repeat_count == 0)
+ vfe40_sync_timer_stop(vfe40_ctrl);
+ else
+ vfe40_ctrl->sync_timer_repeat_count--;
+ }
+}
+
+static void vfe40_process_error_irq(
+ struct axi_ctrl_t *axi_ctrl, uint32_t errStatus)
+{
+ uint32_t reg_value;
+
+ if (errStatus & VFE40_IMASK_CAMIF_ERROR) {
+ pr_err("vfe40_irq: camif errors\n");
+ reg_value = msm_camera_io_r(
+ axi_ctrl->share_ctrl->vfebase + VFE_CAMIF_STATUS);
+ pr_err("camifStatus = 0x%x\n", reg_value);
+ vfe40_send_isp_msg(&axi_ctrl->subdev,
+ axi_ctrl->share_ctrl->vfeFrameId, MSG_ID_CAMIF_ERROR);
+ }
+
+ if (errStatus & VFE40_IMASK_BHIST_OVWR)
+ pr_err("vfe40_irq: stats bhist overwrite\n");
+
+ if (errStatus & VFE40_IMASK_STATS_CS_OVWR)
+ pr_err("vfe40_irq: stats cs overwrite\n");
+
+ if (errStatus & VFE40_IMASK_STATS_IHIST_OVWR)
+ pr_err("vfe40_irq: stats ihist overwrite\n");
+
+ if (errStatus & VFE40_IMASK_REALIGN_BUF_Y_OVFL)
+ pr_err("vfe40_irq: realign bug Y overflow\n");
+
+ if (errStatus & VFE40_IMASK_REALIGN_BUF_CB_OVFL)
+ pr_err("vfe40_irq: realign bug CB overflow\n");
+
+ if (errStatus & VFE40_IMASK_REALIGN_BUF_CR_OVFL)
+ pr_err("vfe40_irq: realign bug CR overflow\n");
+
+ if (errStatus & VFE40_IMASK_VIOLATION) {
+ pr_err("vfe40_irq: violation interrupt\n");
+ reg_value = msm_camera_io_r(
+ axi_ctrl->share_ctrl->vfebase + VFE_VIOLATION_STATUS);
+ pr_err("%s: violationStatus = 0x%x\n", __func__, reg_value);
+ }
+
+ if (errStatus & VFE40_IMASK_IMG_MAST_0_BUS_OVFL)
+ pr_err("vfe40_irq: image master 0 bus overflow\n");
+
+ if (errStatus & VFE40_IMASK_IMG_MAST_1_BUS_OVFL)
+ pr_err("vfe40_irq: image master 1 bus overflow\n");
+
+ if (errStatus & VFE40_IMASK_IMG_MAST_2_BUS_OVFL)
+ pr_err("vfe40_irq: image master 2 bus overflow\n");
+
+ if (errStatus & VFE40_IMASK_IMG_MAST_3_BUS_OVFL)
+ pr_err("vfe40_irq: image master 3 bus overflow\n");
+
+ if (errStatus & VFE40_IMASK_IMG_MAST_4_BUS_OVFL)
+ pr_err("vfe40_irq: image master 4 bus overflow\n");
+
+ if (errStatus & VFE40_IMASK_IMG_MAST_5_BUS_OVFL)
+ pr_err("vfe40_irq: image master 5 bus overflow\n");
+
+ if (errStatus & VFE40_IMASK_IMG_MAST_6_BUS_OVFL)
+ pr_err("vfe40_irq: image master 6 bus overflow\n");
+
+ if (errStatus & VFE40_IMASK_STATS_AE_BG_BUS_OVFL)
+ pr_err("vfe40_irq: ae/bg stats bus overflow\n");
+
+ if (errStatus & VFE40_IMASK_STATS_AF_BF_BUS_OVFL)
+ pr_err("vfe40_irq: af/bf stats bus overflow\n");
+
+ if (errStatus & VFE40_IMASK_STATS_AWB_BUS_OVFL)
+ pr_err("vfe40_irq: awb stats bus overflow\n");
+
+ if (errStatus & VFE40_IMASK_STATS_RS_BUS_OVFL)
+ pr_err("vfe40_irq: rs stats bus overflow\n");
+
+ if (errStatus & VFE40_IMASK_STATS_CS_BUS_OVFL)
+ pr_err("vfe40_irq: cs stats bus overflow\n");
+
+ if (errStatus & VFE40_IMASK_STATS_IHIST_BUS_OVFL)
+ pr_err("vfe40_irq: ihist stats bus overflow\n");
+
+ if (errStatus & VFE40_IMASK_STATS_SKIN_BHIST_BUS_OVFL)
+ pr_err("vfe40_irq: skin/bhist stats bus overflow\n");
+
+ if (errStatus & VFE40_IMASK_AXI_ERROR)
+ pr_err("vfe40_irq: axi error\n");
+}
+
+static uint32_t vfe40_process_stats_irq_common(
+ struct vfe40_ctrl_type *vfe40_ctrl,
+ uint32_t statsNum, uint32_t newAddr)
+{
+ uint32_t pingpongStatus;
+ uint32_t returnAddr;
+ uint32_t pingpongAddr;
+
+ /* must be 0=ping, 1=pong */
+ pingpongStatus =
+ ((msm_camera_io_r(vfe40_ctrl->share_ctrl->vfebase +
+ VFE_BUS_PING_PONG_STATUS))
+ & ((uint32_t)(1<<(statsNum + 7)))) >> (statsNum + 7);
+ /* stats bits starts at 7 */
+ CDBG("statsNum %d, pingpongStatus %d\n", statsNum, pingpongStatus);
+ pingpongAddr =
+ ((uint32_t)(vfe40_ctrl->share_ctrl->vfebase +
+ VFE_BUS_STATS_PING_PONG_BASE)) +
+ (3*statsNum)*4 + (1-pingpongStatus)*4;
+ returnAddr = msm_camera_io_r((uint32_t *)pingpongAddr);
+ msm_camera_io_w(newAddr, (uint32_t *)pingpongAddr);
+ return returnAddr;
+}
+
+static void
+vfe_send_stats_msg(struct vfe40_ctrl_type *vfe40_ctrl,
+ uint32_t bufAddress, uint32_t statsNum)
+{
+ int rc = 0;
+ void *vaddr = NULL;
+ /* fill message with right content. */
+ /* @todo This is causing issues, need further investigate */
+ /* spin_lock_irqsave(&ctrl->state_lock, flags); */
+ struct isp_msg_stats msgStats;
+ msgStats.frameCounter = vfe40_ctrl->share_ctrl->vfeFrameId;
+ msgStats.buffer = bufAddress;
+
+ switch (statsNum) {
+ case statsAeNum:{
+ msgStats.id = MSG_ID_STATS_AEC;
+ rc = vfe40_ctrl->stats_ops.dispatch(
+ vfe40_ctrl->stats_ops.stats_ctrl,
+ MSM_STATS_TYPE_AEC, bufAddress,
+ &msgStats.buf_idx, &vaddr, &msgStats.fd,
+ vfe40_ctrl->stats_ops.client);
+ }
+ break;
+ case statsAfNum:{
+ msgStats.id = MSG_ID_STATS_AF;
+ rc = vfe40_ctrl->stats_ops.dispatch(
+ vfe40_ctrl->stats_ops.stats_ctrl,
+ MSM_STATS_TYPE_AF, bufAddress,
+ &msgStats.buf_idx, &vaddr, &msgStats.fd,
+ vfe40_ctrl->stats_ops.client);
+ }
+ break;
+ case statsAwbNum: {
+ msgStats.id = MSG_ID_STATS_AWB;
+ rc = vfe40_ctrl->stats_ops.dispatch(
+ vfe40_ctrl->stats_ops.stats_ctrl,
+ MSM_STATS_TYPE_AWB, bufAddress,
+ &msgStats.buf_idx, &vaddr, &msgStats.fd,
+ vfe40_ctrl->stats_ops.client);
+ }
+ break;
+
+ case statsIhistNum: {
+ msgStats.id = MSG_ID_STATS_IHIST;
+ rc = vfe40_ctrl->stats_ops.dispatch(
+ vfe40_ctrl->stats_ops.stats_ctrl,
+ MSM_STATS_TYPE_IHIST, bufAddress,
+ &msgStats.buf_idx, &vaddr, &msgStats.fd,
+ vfe40_ctrl->stats_ops.client);
+ }
+ break;
+ case statsRsNum: {
+ msgStats.id = MSG_ID_STATS_RS;
+ rc = vfe40_ctrl->stats_ops.dispatch(
+ vfe40_ctrl->stats_ops.stats_ctrl,
+ MSM_STATS_TYPE_RS, bufAddress,
+ &msgStats.buf_idx, &vaddr, &msgStats.fd,
+ vfe40_ctrl->stats_ops.client);
+ }
+ break;
+ case statsCsNum: {
+ msgStats.id = MSG_ID_STATS_CS;
+ rc = vfe40_ctrl->stats_ops.dispatch(
+ vfe40_ctrl->stats_ops.stats_ctrl,
+ MSM_STATS_TYPE_CS, bufAddress,
+ &msgStats.buf_idx, &vaddr, &msgStats.fd,
+ vfe40_ctrl->stats_ops.client);
+ }
+ break;
+
+ default:
+ goto stats_done;
+ }
+ if (rc == 0) {
+ msgStats.buffer = (uint32_t)vaddr;
+ v4l2_subdev_notify(&vfe40_ctrl->subdev,
+ NOTIFY_VFE_MSG_STATS,
+ &msgStats);
+ } else {
+ pr_err("%s: paddr to idx mapping error, stats_id = %d, paddr = 0x%d",
+ __func__, msgStats.id, msgStats.buffer);
+ }
+stats_done:
+ spin_unlock_irqrestore(&ctrl->state_lock, flags);
+ return;
+}
+
+static void vfe_send_comp_stats_msg(
+ struct vfe40_ctrl_type *vfe40_ctrl, uint32_t status_bits)
+{
+ struct msm_stats_buf msgStats;
+ uint32_t temp;
+
+ msgStats.frame_id = vfe40_ctrl->share_ctrl->vfeFrameId;
+ msgStats.status_bits = status_bits;
+
+ msgStats.aec.buff = vfe40_ctrl->aecStatsControl.bufToRender;
+ msgStats.awb.buff = vfe40_ctrl->awbStatsControl.bufToRender;
+ msgStats.af.buff = vfe40_ctrl->afStatsControl.bufToRender;
+
+ msgStats.ihist.buff = vfe40_ctrl->ihistStatsControl.bufToRender;
+ msgStats.rs.buff = vfe40_ctrl->rsStatsControl.bufToRender;
+ msgStats.cs.buff = vfe40_ctrl->csStatsControl.bufToRender;
+
+ temp = msm_camera_io_r(
+ vfe40_ctrl->share_ctrl->vfebase + VFE_STATS_AWB_SGW_CFG);
+ msgStats.awb_ymin = (0xFF00 & temp) >> 8;
+
+ v4l2_subdev_notify(&vfe40_ctrl->subdev,
+ NOTIFY_VFE_MSG_COMP_STATS,
+ &msgStats);
+}
+
+static void vfe40_process_stats_awb_irq(struct vfe40_ctrl_type *vfe40_ctrl)
+{
+ unsigned long flags;
+ uint32_t addr;
+ spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+ addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_AWB);
+ spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+ if (addr) {
+ vfe40_ctrl->awbStatsControl.bufToRender =
+ vfe40_process_stats_irq_common(vfe40_ctrl, statsAwbNum,
+ addr);
+
+ vfe_send_stats_msg(vfe40_ctrl,
+ vfe40_ctrl->awbStatsControl.bufToRender, statsAwbNum);
+ } else{
+ vfe40_ctrl->awbStatsControl.droppedStatsFrameCount++;
+ CDBG("%s: droppedStatsFrameCount = %d", __func__,
+ vfe40_ctrl->awbStatsControl.droppedStatsFrameCount);
+ }
+}
+
+static void vfe40_process_stats_ihist_irq(struct vfe40_ctrl_type *vfe40_ctrl)
+{
+ unsigned long flags;
+ uint32_t addr;
+ spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+ addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_IHIST);
+ spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+ if (addr) {
+ vfe40_ctrl->ihistStatsControl.bufToRender =
+ vfe40_process_stats_irq_common(
+ vfe40_ctrl, statsIhistNum, addr);
+
+ vfe_send_stats_msg(vfe40_ctrl,
+ vfe40_ctrl->ihistStatsControl.bufToRender,
+ statsIhistNum);
+ } else {
+ vfe40_ctrl->ihistStatsControl.droppedStatsFrameCount++;
+ CDBG("%s: droppedStatsFrameCount = %d", __func__,
+ vfe40_ctrl->ihistStatsControl.droppedStatsFrameCount);
+ }
+}
+
+static void vfe40_process_stats_rs_irq(struct vfe40_ctrl_type *vfe40_ctrl)
+{
+ unsigned long flags;
+ uint32_t addr;
+ spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+ addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_RS);
+ spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+ if (addr) {
+ vfe40_ctrl->rsStatsControl.bufToRender =
+ vfe40_process_stats_irq_common(vfe40_ctrl, statsRsNum,
+ addr);
+
+ vfe_send_stats_msg(vfe40_ctrl,
+ vfe40_ctrl->rsStatsControl.bufToRender, statsRsNum);
+ } else {
+ vfe40_ctrl->rsStatsControl.droppedStatsFrameCount++;
+ CDBG("%s: droppedStatsFrameCount = %d", __func__,
+ vfe40_ctrl->rsStatsControl.droppedStatsFrameCount);
+ }
+}
+
+static void vfe40_process_stats_cs_irq(struct vfe40_ctrl_type *vfe40_ctrl)
+{
+ unsigned long flags;
+ uint32_t addr;
+ spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+ addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl, MSM_STATS_TYPE_CS);
+ spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+ if (addr) {
+ vfe40_ctrl->csStatsControl.bufToRender =
+ vfe40_process_stats_irq_common(vfe40_ctrl, statsCsNum,
+ addr);
+
+ vfe_send_stats_msg(vfe40_ctrl,
+ vfe40_ctrl->csStatsControl.bufToRender, statsCsNum);
+ } else {
+ vfe40_ctrl->csStatsControl.droppedStatsFrameCount++;
+ CDBG("%s: droppedStatsFrameCount = %d", __func__,
+ vfe40_ctrl->csStatsControl.droppedStatsFrameCount);
+ }
+}
+
+static void vfe40_process_stats(struct vfe40_ctrl_type *vfe40_ctrl,
+ uint32_t status_bits)
+{
+ unsigned long flags;
+ int32_t process_stats = false;
+ uint32_t addr;
+
+ CDBG("%s, stats = 0x%x\n", __func__, status_bits);
+ spin_lock_irqsave(&vfe40_ctrl->stats_bufq_lock, flags);
+ if (status_bits & VFE_IRQ_STATUS0_STATS_AWB) {
+ addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl,
+ MSM_STATS_TYPE_AWB);
+ if (addr) {
+ vfe40_ctrl->awbStatsControl.bufToRender =
+ vfe40_process_stats_irq_common(
+ vfe40_ctrl, statsAwbNum,
+ addr);
+ process_stats = true;
+ } else{
+ vfe40_ctrl->awbStatsControl.droppedStatsFrameCount++;
+ vfe40_ctrl->awbStatsControl.bufToRender = 0;
+ }
+ } else {
+ vfe40_ctrl->awbStatsControl.bufToRender = 0;
+ }
+
+ if (status_bits & VFE_IRQ_STATUS0_STATS_IHIST) {
+ addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl,
+ MSM_STATS_TYPE_IHIST);
+ if (addr) {
+ vfe40_ctrl->ihistStatsControl.bufToRender =
+ vfe40_process_stats_irq_common(
+ vfe40_ctrl, statsIhistNum,
+ addr);
+ process_stats = true;
+ } else {
+ vfe40_ctrl->ihistStatsControl.droppedStatsFrameCount++;
+ vfe40_ctrl->ihistStatsControl.bufToRender = 0;
+ }
+ } else {
+ vfe40_ctrl->ihistStatsControl.bufToRender = 0;
+ }
+
+ if (status_bits & VFE_IRQ_STATUS0_STATS_RS) {
+ addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl,
+ MSM_STATS_TYPE_RS);
+ if (addr) {
+ vfe40_ctrl->rsStatsControl.bufToRender =
+ vfe40_process_stats_irq_common(
+ vfe40_ctrl, statsRsNum,
+ addr);
+ process_stats = true;
+ } else {
+ vfe40_ctrl->rsStatsControl.droppedStatsFrameCount++;
+ vfe40_ctrl->rsStatsControl.bufToRender = 0;
+ }
+ } else {
+ vfe40_ctrl->rsStatsControl.bufToRender = 0;
+ }
+
+ if (status_bits & VFE_IRQ_STATUS0_STATS_CS) {
+ addr = (uint32_t)vfe40_stats_dqbuf(vfe40_ctrl,
+ MSM_STATS_TYPE_CS);
+ if (addr) {
+ vfe40_ctrl->csStatsControl.bufToRender =
+ vfe40_process_stats_irq_common(
+ vfe40_ctrl, statsCsNum,
+ addr);
+ process_stats = true;
+ } else {
+ vfe40_ctrl->csStatsControl.droppedStatsFrameCount++;
+ vfe40_ctrl->csStatsControl.bufToRender = 0;
+ }
+ } else {
+ vfe40_ctrl->csStatsControl.bufToRender = 0;
+ }
+ spin_unlock_irqrestore(&vfe40_ctrl->stats_bufq_lock, flags);
+ if (process_stats)
+ vfe_send_comp_stats_msg(vfe40_ctrl, status_bits);
+
+ return;
+}
+
+static void vfe40_process_stats_irq(
+ struct vfe40_ctrl_type *vfe40_ctrl, uint32_t irqstatus)
+{
+ uint32_t status_bits = VFE_COM_STATUS & irqstatus;
+
+ if ((vfe40_ctrl->hfr_mode != HFR_MODE_OFF) &&
+ (vfe40_ctrl->share_ctrl->vfeFrameId %
+ vfe40_ctrl->hfr_mode != 0)) {
+ CDBG("Skip the stats when HFR enabled\n");
+ return;
+ }
+
+ vfe40_process_stats(vfe40_ctrl, status_bits);
+ return;
+}
+
+static void vfe40_process_irq(
+ struct vfe40_ctrl_type *vfe40_ctrl, uint32_t irqstatus)
+{
+ if (irqstatus &
+ VFE_IRQ_STATUS0_STATS_COMPOSIT_MASK_0) {
+ vfe40_process_stats_irq(vfe40_ctrl, irqstatus);
+ return;
+ }
+
+ switch (irqstatus) {
+ case VFE_IRQ_STATUS0_CAMIF_SOF_MASK:
+ CDBG("irq camifSofIrq\n");
+ vfe40_process_camif_sof_irq(vfe40_ctrl);
+ break;
+ case VFE_IRQ_STATUS0_REG_UPDATE_MASK:
+ CDBG("irq regUpdateIrq\n");
+ vfe40_process_reg_update_irq(vfe40_ctrl);
+ break;
+ case VFE_IMASK_WHILE_STOPPING_0:
+ CDBG("irq resetAckIrq\n");
+ vfe40_process_reset_irq(vfe40_ctrl);
+ break;
+ case VFE_IRQ_STATUS0_STATS_AWB:
+ CDBG("Stats AWB irq occured.\n");
+ vfe40_process_stats_awb_irq(vfe40_ctrl);
+ break;
+ case VFE_IRQ_STATUS0_STATS_IHIST:
+ CDBG("Stats IHIST irq occured.\n");
+ vfe40_process_stats_ihist_irq(vfe40_ctrl);
+ break;
+ case VFE_IRQ_STATUS0_STATS_RS:
+ CDBG("Stats RS irq occured.\n");
+ vfe40_process_stats_rs_irq(vfe40_ctrl);
+ break;
+ case VFE_IRQ_STATUS0_STATS_CS:
+ CDBG("Stats CS irq occured.\n");
+ vfe40_process_stats_cs_irq(vfe40_ctrl);
+ break;
+ case VFE_IRQ_STATUS1_SYNC_TIMER0:
+ CDBG("SYNC_TIMER 0 irq occured.\n");
+ vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+ vfe40_ctrl->share_ctrl->vfeFrameId,
+ MSG_ID_SYNC_TIMER0_DONE);
+ break;
+ case VFE_IRQ_STATUS1_SYNC_TIMER1:
+ CDBG("SYNC_TIMER 1 irq occured.\n");
+ vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+ vfe40_ctrl->share_ctrl->vfeFrameId,
+ MSG_ID_SYNC_TIMER1_DONE);
+ break;
+ case VFE_IRQ_STATUS1_SYNC_TIMER2:
+ CDBG("SYNC_TIMER 2 irq occured.\n");
+ vfe40_send_isp_msg(&vfe40_ctrl->subdev,
+ vfe40_ctrl->share_ctrl->vfeFrameId,
+ MSG_ID_SYNC_TIMER2_DONE);
+ break;
+ default:
+ pr_err("Invalid IRQ status\n");
+ }
+}
+
+static void axi40_do_tasklet(unsigned long data)
+{
+ unsigned long flags;
+ struct axi_ctrl_t *axi_ctrl = (struct axi_ctrl_t *)data;
+ struct vfe40_isr_queue_cmd *qcmd = NULL;
+
+ CDBG("=== axi40_do_tasklet start ===\n");
+
+ while (atomic_read(&axi_ctrl->share_ctrl->irq_cnt)) {
+ spin_lock_irqsave(&axi_ctrl->tasklet_lock, flags);
+ qcmd = list_first_entry(&axi_ctrl->tasklet_q,
+ struct vfe40_isr_queue_cmd, list);
+ atomic_sub(1, &axi_ctrl->share_ctrl->irq_cnt);
+
+ if (!qcmd) {
+ spin_unlock_irqrestore(&axi_ctrl->tasklet_lock,
+ flags);
+ return;
+ }
+
+ list_del(&qcmd->list);
+ spin_unlock_irqrestore(&axi_ctrl->tasklet_lock,
+ flags);
+
+ if (qcmd->vfeInterruptStatus0 &
+ VFE_IRQ_STATUS0_CAMIF_SOF_MASK)
+ v4l2_subdev_notify(&axi_ctrl->subdev,
+ NOTIFY_VFE_IRQ,
+ (void *)VFE_IRQ_STATUS0_CAMIF_SOF_MASK);
+
+ /* interrupt to be processed, *qcmd has the payload. */
+ if (qcmd->vfeInterruptStatus0 &
+ VFE_IRQ_STATUS0_REG_UPDATE_MASK) {
+ v4l2_subdev_notify(&axi_ctrl->subdev,
+ NOTIFY_VFE_IRQ,
+ (void *)VFE_IRQ_STATUS0_REG_UPDATE_MASK);
+ }
+
+ if (qcmd->vfeInterruptStatus0 &
+ VFE_IMASK_WHILE_STOPPING_0)
+ v4l2_subdev_notify(&axi_ctrl->subdev,
+ NOTIFY_VFE_IRQ,
+ (void *)VFE_IMASK_WHILE_STOPPING_0);
+
+ if (atomic_read(&axi_ctrl->share_ctrl->vstate)) {
+ if (qcmd->vfeInterruptStatus1 &
+ VFE40_IMASK_ERROR_ONLY_1) {
+ pr_err("irq errorIrq\n");
+ vfe40_process_error_irq(
+ axi_ctrl,
+ qcmd->vfeInterruptStatus1 &
+ VFE40_IMASK_ERROR_ONLY_1);
+ }
+ v4l2_subdev_notify(&axi_ctrl->subdev,
+ NOTIFY_AXI_IRQ,
+ (void *)qcmd->vfeInterruptStatus0);
+
+ /* then process stats irq. */
+ if (axi_ctrl->share_ctrl->stats_comp) {
+ /* process stats comb interrupt. */
+ if (qcmd->vfeInterruptStatus0 &
+ VFE_IRQ_STATUS0_STATS_COMPOSIT_MASK_0) {
+ CDBG("Stats composite irq occured.\n");
+ v4l2_subdev_notify(&axi_ctrl->subdev,
+ NOTIFY_VFE_IRQ,
+ (void *)qcmd->vfeInterruptStatus0);
+ }
+ } else {
+ /* process individual stats interrupt. */
+ if (qcmd->vfeInterruptStatus0 &
+ VFE_IRQ_STATUS0_STATS_AWB)
+ v4l2_subdev_notify(&axi_ctrl->subdev,
+ NOTIFY_VFE_IRQ,
+ (void *)VFE_IRQ_STATUS0_STATS_AWB);
+ if (qcmd->vfeInterruptStatus0 &
+ VFE_IRQ_STATUS0_STATS_IHIST)
+ v4l2_subdev_notify(&axi_ctrl->subdev,
+ NOTIFY_VFE_IRQ,
+ (void *)VFE_IRQ_STATUS0_STATS_IHIST);
+
+ if (qcmd->vfeInterruptStatus0 &
+ VFE_IRQ_STATUS0_STATS_RS)
+ v4l2_subdev_notify(&axi_ctrl->subdev,
+ NOTIFY_VFE_IRQ,
+ (void *)VFE_IRQ_STATUS0_STATS_RS);
+
+ if (qcmd->vfeInterruptStatus0 &
+ VFE_IRQ_STATUS0_STATS_CS)
+ v4l2_subdev_notify(&axi_ctrl->subdev,
+ NOTIFY_VFE_IRQ,
+ (void *)VFE_IRQ_STATUS0_STATS_CS);
+
+ if (qcmd->vfeInterruptStatus0 &
+ VFE_IRQ_STATUS1_SYNC_TIMER0)
+ v4l2_subdev_notify(&axi_ctrl->subdev,
+ NOTIFY_VFE_IRQ,
+ (void *)VFE_IRQ_STATUS1_SYNC_TIMER0);
+
+ if (qcmd->vfeInterruptStatus0 &
+ VFE_IRQ_STATUS1_SYNC_TIMER1)
+ v4l2_subdev_notify(&axi_ctrl->subdev,
+ NOTIFY_VFE_IRQ,
+ (void *)VFE_IRQ_STATUS1_SYNC_TIMER1);
+
+ if (qcmd->vfeInterruptStatus0 &
+ VFE_IRQ_STATUS1_SYNC_TIMER2)
+ v4l2_subdev_notify(&axi_ctrl->subdev,
+ NOTIFY_VFE_IRQ,
+ (void *)VFE_IRQ_STATUS1_SYNC_TIMER2);
+ }
+ }
+ kfree(qcmd);
+ }
+ CDBG("=== axi40_do_tasklet end ===\n");
+}
+
+static irqreturn_t vfe40_parse_irq(int irq_num, void *data)
+{
+ unsigned long flags;
+ struct vfe40_irq_status irq;
+ struct vfe40_isr_queue_cmd *qcmd;
+ struct axi_ctrl_t *axi_ctrl = data;
+
+ CDBG("vfe_parse_irq\n");
+
+ vfe40_read_irq_status(axi_ctrl, &irq);
+
+ if ((irq.vfeIrqStatus0 == 0) && (irq.vfeIrqStatus1 == 0)) {
+ CDBG("vfe_parse_irq: vfeIrqStatus0 & 1 are both 0!\n");
+ return IRQ_HANDLED;
+ }
+
+ qcmd = kzalloc(sizeof(struct vfe40_isr_queue_cmd),
+ GFP_ATOMIC);
+ if (!qcmd) {
+ pr_err("vfe_parse_irq: qcmd malloc failed!\n");
+ return IRQ_HANDLED;
+ }
+
+ spin_lock_irqsave(&axi_ctrl->share_ctrl->stop_flag_lock, flags);
+ if (axi_ctrl->share_ctrl->stop_ack_pending) {
+ irq.vfeIrqStatus0 &= VFE_IMASK_WHILE_STOPPING_0;
+ irq.vfeIrqStatus1 &= VFE_IMASK_WHILE_STOPPING_1;
+ }
+ spin_unlock_irqrestore(&axi_ctrl->share_ctrl->stop_flag_lock, flags);
+
+ CDBG("vfe_parse_irq: Irq_status0 = 0x%x, Irq_status1 = 0x%x.\n",
+ irq.vfeIrqStatus0, irq.vfeIrqStatus1);
+
+ qcmd->vfeInterruptStatus0 = irq.vfeIrqStatus0;
+ qcmd->vfeInterruptStatus1 = irq.vfeIrqStatus1;
+
+ spin_lock_irqsave(&axi_ctrl->tasklet_lock, flags);
+ list_add_tail(&qcmd->list, &axi_ctrl->tasklet_q);
+
+ atomic_add(1, &axi_ctrl->share_ctrl->irq_cnt);
+ spin_unlock_irqrestore(&axi_ctrl->tasklet_lock, flags);
+ tasklet_schedule(&axi_ctrl->vfe40_tasklet);
+ return IRQ_HANDLED;
+}
+
+
+static long vfe_stats_bufq_sub_ioctl(
+ struct vfe40_ctrl_type *vfe_ctrl,
+ struct msm_vfe_cfg_cmd *cmd, void *ion_client)
+{
+ long rc = 0;
+ switch (cmd->cmd_type) {
+ case VFE_CMD_STATS_REQBUF:
+ if (!vfe_ctrl->stats_ops.stats_ctrl) {
+ /* stats_ctrl has not been init yet */
+ rc = msm_stats_buf_ops_init(&vfe_ctrl->stats_ctrl,
+ (struct ion_client *)ion_client,
+ &vfe_ctrl->stats_ops);
+ if (rc < 0) {
+ pr_err("%s: cannot init stats ops", __func__);
+ goto end;
+ }
+ rc = vfe_ctrl->stats_ops.stats_ctrl_init(&vfe_ctrl->stats_ctrl);
+ if (rc < 0) {
+ pr_err("%s: cannot init stats_ctrl ops", __func__);
+ memset(&vfe_ctrl->stats_ops, 0,
+ sizeof(vfe_ctrl->stats_ops));
+ goto end;
+ }
+ if (sizeof(struct msm_stats_reqbuf) != cmd->length) {
+ /* error. the length not match */
+ pr_err("%s: stats reqbuf input size = %d,\n"
+ "struct size = %d, mitch match\n",
+ __func__, cmd->length,
+ sizeof(struct msm_stats_reqbuf));
+ rc = -EINVAL ;
+ goto end;
+ }
+ }
+ rc = vfe_ctrl->stats_ops.reqbuf(
+ &vfe_ctrl->stats_ctrl,
+ (struct msm_stats_reqbuf *)cmd->value,
+ vfe_ctrl->stats_ops.client);
+ break;
+ case VFE_CMD_STATS_ENQUEUEBUF:
+ if (sizeof(struct msm_stats_buf_info) != cmd->length) {
+ /* error. the length not match */
+ pr_err("%s: stats enqueuebuf input size = %d,\n"
+ "struct size = %d, mitch match\n",
+ __func__, cmd->length,
+ sizeof(struct msm_stats_buf_info));
+ rc = -EINVAL;
+ goto end;
+ }
+ rc = vfe_ctrl->stats_ops.enqueue_buf(
+ &vfe_ctrl->stats_ctrl,
+ (struct msm_stats_buf_info *)cmd->value,
+ vfe_ctrl->stats_ops.client);
+ break;
+ case VFE_CMD_STATS_FLUSH_BUFQ:
+ {
+ struct msm_stats_flush_bufq *flush_req = NULL;
+ flush_req = (struct msm_stats_flush_bufq *)cmd->value;
+ if (sizeof(struct msm_stats_flush_bufq) != cmd->length) {
+ /* error. the length not match */
+ pr_err("%s: stats flush queue input size = %d,\n"
+ "struct size = %d, mitch match\n",
+ __func__, cmd->length,
+ sizeof(struct msm_stats_flush_bufq));
+ rc = -EINVAL;
+ goto end;
+ }
+ rc = vfe_ctrl->stats_ops.bufq_flush(
+ &vfe_ctrl->stats_ctrl,
+ (enum msm_stats_enum_type)flush_req->stats_type,
+ vfe_ctrl->stats_ops.client);
+ }
+ break;
+ default:
+ rc = -1;
+ pr_err("%s: cmd_type %d not supported", __func__,
+ cmd->cmd_type);
+ break;
+ }
+end:
+ return rc;
+}
+
+static long msm_vfe_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int subdev_cmd, void *arg)
+{
+ struct msm_cam_media_controller *pmctl =
+ (struct msm_cam_media_controller *)v4l2_get_subdev_hostdata(sd);
+ struct vfe40_ctrl_type *vfe40_ctrl =
+ (struct vfe40_ctrl_type *)v4l2_get_subdevdata(sd);
+ struct msm_isp_cmd vfecmd;
+ struct msm_camvfe_params *vfe_params =
+ (struct msm_camvfe_params *)arg;
+ struct msm_vfe_cfg_cmd *cmd = vfe_params->vfe_cfg;
+ void *data = vfe_params->data;
+
+ long rc = 0;
+ struct vfe_cmd_stats_buf *scfg = NULL;
+ struct vfe_cmd_stats_ack *sack = NULL;
+
+ if (!vfe40_ctrl->share_ctrl->vfebase) {
+ pr_err("%s: base address unmapped\n", __func__);
+ return -EFAULT;
+ }
+
+ switch (cmd->cmd_type) {
+ case CMD_VFE_PROCESS_IRQ:
+ vfe40_process_irq(vfe40_ctrl, (uint32_t) data);
+ return rc;
+ case VFE_CMD_STATS_REQBUF:
+ case VFE_CMD_STATS_ENQUEUEBUF:
+ case VFE_CMD_STATS_FLUSH_BUFQ:
+ /* for easy porting put in one envelope */
+ rc = vfe_stats_bufq_sub_ioctl(vfe40_ctrl,
+ cmd, vfe_params->data);
+ return rc;
+ default:
+ if (cmd->cmd_type != CMD_CONFIG_PING_ADDR &&
+ cmd->cmd_type != CMD_CONFIG_PONG_ADDR &&
+ cmd->cmd_type != CMD_CONFIG_FREE_BUF_ADDR &&
+ cmd->cmd_type != CMD_STATS_AEC_BUF_RELEASE &&
+ cmd->cmd_type != CMD_STATS_AWB_BUF_RELEASE &&
+ cmd->cmd_type != CMD_STATS_IHIST_BUF_RELEASE &&
+ cmd->cmd_type != CMD_STATS_RS_BUF_RELEASE &&
+ cmd->cmd_type != CMD_STATS_CS_BUF_RELEASE &&
+ cmd->cmd_type != CMD_STATS_AF_BUF_RELEASE) {
+ if (copy_from_user(&vfecmd,
+ (void __user *)(cmd->value),
+ sizeof(vfecmd))) {
+ pr_err("%s %d: copy_from_user failed\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+ } else {
+ /* here eith stats release or frame release. */
+ if (cmd->cmd_type != CMD_CONFIG_PING_ADDR &&
+ cmd->cmd_type != CMD_CONFIG_PONG_ADDR &&
+ cmd->cmd_type != CMD_CONFIG_FREE_BUF_ADDR) {
+ /* then must be stats release. */
+ if (!data) {
+ pr_err("%s: data = NULL, cmd->cmd_type = %d",
+ __func__, cmd->cmd_type);
+ return -EFAULT;
+ }
+ sack = kmalloc(sizeof(struct vfe_cmd_stats_ack),
+ GFP_ATOMIC);
+ if (!sack) {
+ pr_err("%s: no mem for cmd->cmd_type = %d",
+ __func__, cmd->cmd_type);
+ return -ENOMEM;
+ }
+ sack->nextStatsBuf = *(uint32_t *)data;
+ }
+ }
+ CDBG("%s: cmdType = %d\n", __func__, cmd->cmd_type);
+
+ if ((cmd->cmd_type == CMD_STATS_AF_ENABLE) ||
+ (cmd->cmd_type == CMD_STATS_AWB_ENABLE) ||
+ (cmd->cmd_type == CMD_STATS_IHIST_ENABLE) ||
+ (cmd->cmd_type == CMD_STATS_RS_ENABLE) ||
+ (cmd->cmd_type == CMD_STATS_CS_ENABLE) ||
+ (cmd->cmd_type == CMD_STATS_AEC_ENABLE)) {
+ scfg = NULL;
+ /* individual */
+ goto vfe40_config_done;
+ }
+ switch (cmd->cmd_type) {
+ case CMD_GENERAL:
+ rc = vfe40_proc_general(pmctl, &vfecmd, vfe40_ctrl);
+ break;
+ case CMD_CONFIG_PING_ADDR: {
+ int path = *((int *)cmd->value);
+ struct vfe40_output_ch *outch =
+ vfe40_get_ch(path, vfe40_ctrl->share_ctrl);
+ outch->ping = *((struct msm_free_buf *)data);
+ }
+ break;
+
+ case CMD_CONFIG_PONG_ADDR: {
+ int path = *((int *)cmd->value);
+ struct vfe40_output_ch *outch =
+ vfe40_get_ch(path, vfe40_ctrl->share_ctrl);
+ outch->pong = *((struct msm_free_buf *)data);
+ }
+ break;
+
+ case CMD_CONFIG_FREE_BUF_ADDR: {
+ int path = *((int *)cmd->value);
+ struct vfe40_output_ch *outch =
+ vfe40_get_ch(path, vfe40_ctrl->share_ctrl);
+ outch->free_buf = *((struct msm_free_buf *)data);
+ }
+ break;
+ case CMD_SNAP_BUF_RELEASE:
+ break;
+ default:
+ pr_err("%s Unsupported AXI configuration %x ", __func__,
+ cmd->cmd_type);
+ break;
+ }
+ }
+vfe40_config_done:
+ kfree(scfg);
+ kfree(sack);
+ CDBG("%s done: rc = %d\n", __func__, (int) rc);
+ return rc;
+}
+
+static const struct v4l2_subdev_core_ops msm_vfe_subdev_core_ops = {
+ .ioctl = msm_vfe_subdev_ioctl,
+};
+
+static const struct v4l2_subdev_ops msm_vfe_subdev_ops = {
+ .core = &msm_vfe_subdev_core_ops,
+};
+
+int msm_vfe_subdev_init(struct v4l2_subdev *sd,
+ struct msm_cam_media_controller *mctl)
+{
+ int rc = 0;
+ struct vfe40_ctrl_type *vfe40_ctrl =
+ (struct vfe40_ctrl_type *)v4l2_get_subdevdata(sd);
+ v4l2_set_subdev_hostdata(sd, mctl);
+
+ spin_lock_init(&vfe40_ctrl->share_ctrl->stop_flag_lock);
+ spin_lock_init(&vfe40_ctrl->state_lock);
+ spin_lock_init(&vfe40_ctrl->io_lock);
+ spin_lock_init(&vfe40_ctrl->update_ack_lock);
+ spin_lock_init(&vfe40_ctrl->stats_bufq_lock);
+
+
+ vfe40_ctrl->update_linear = false;
+ vfe40_ctrl->update_rolloff = false;
+ vfe40_ctrl->update_la = false;
+ vfe40_ctrl->update_gamma = false;
+ vfe40_ctrl->hfr_mode = HFR_MODE_OFF;
+
+ return rc;
+}
+
+void msm_vfe_subdev_release(struct v4l2_subdev *sd)
+{
+ struct vfe40_ctrl_type *vfe40_ctrl =
+ (struct vfe40_ctrl_type *)v4l2_get_subdevdata(sd);
+ if (!vfe40_ctrl->share_ctrl->vfebase)
+ vfe40_ctrl->share_ctrl->vfebase = NULL;
+}
+
+static const struct v4l2_subdev_internal_ops msm_vfe_internal_ops;
+
+static int __devinit vfe40_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct axi_ctrl_t *axi_ctrl;
+ struct vfe40_ctrl_type *vfe40_ctrl;
+ struct vfe_share_ctrl_t *share_ctrl;
+ struct msm_cam_subdev_info sd_info;
+ CDBG("%s: device id = %d\n", __func__, pdev->id);
+
+ share_ctrl = kzalloc(sizeof(struct vfe_share_ctrl_t), GFP_KERNEL);
+ if (!share_ctrl) {
+ pr_err("%s: no enough memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ axi_ctrl = kzalloc(sizeof(struct axi_ctrl_t), GFP_KERNEL);
+ if (!axi_ctrl) {
+ pr_err("%s: no enough memory\n", __func__);
+ kfree(share_ctrl);
+ return -ENOMEM;
+ }
+
+ vfe40_ctrl = kzalloc(sizeof(struct vfe40_ctrl_type), GFP_KERNEL);
+ if (!vfe40_ctrl) {
+ pr_err("%s: no enough memory\n", __func__);
+ kfree(share_ctrl);
+ kfree(axi_ctrl);
+ return -ENOMEM;
+ }
+
+ if (pdev->dev.of_node)
+ of_property_read_u32((&pdev->dev)->of_node,
+ "cell-index", &pdev->id);
+
+ share_ctrl->axi_ctrl = axi_ctrl;
+ share_ctrl->vfe40_ctrl = vfe40_ctrl;
+ axi_ctrl->share_ctrl = share_ctrl;
+ vfe40_ctrl->share_ctrl = share_ctrl;
+ axi_ctrl->pdev = pdev;
+ vfe40_axi_probe(axi_ctrl);
+
+ v4l2_subdev_init(&vfe40_ctrl->subdev, &msm_vfe_subdev_ops);
+ vfe40_ctrl->subdev.internal_ops = &msm_vfe_internal_ops;
+ vfe40_ctrl->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(vfe40_ctrl->subdev.name,
+ sizeof(vfe40_ctrl->subdev.name), "vfe4.0");
+ v4l2_set_subdevdata(&vfe40_ctrl->subdev, vfe40_ctrl);
+ platform_set_drvdata(pdev, &vfe40_ctrl->subdev);
+
+ axi_ctrl->vfemem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "vfe");
+ if (!axi_ctrl->vfemem) {
+ pr_err("%s: no mem resource?\n", __func__);
+ rc = -ENODEV;
+ goto vfe40_no_resource;
+ }
+ axi_ctrl->vfeirq = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "vfe");
+ if (!axi_ctrl->vfeirq) {
+ pr_err("%s: no irq resource?\n", __func__);
+ rc = -ENODEV;
+ goto vfe40_no_resource;
+ }
+
+ axi_ctrl->vfeio = request_mem_region(axi_ctrl->vfemem->start,
+ resource_size(axi_ctrl->vfemem), pdev->name);
+ if (!axi_ctrl->vfeio) {
+ pr_err("%s: no valid mem region\n", __func__);
+ rc = -EBUSY;
+ goto vfe40_no_resource;
+ }
+
+ rc = request_irq(axi_ctrl->vfeirq->start, vfe40_parse_irq,
+ IRQF_TRIGGER_RISING, "vfe", axi_ctrl);
+ if (rc < 0) {
+ release_mem_region(axi_ctrl->vfemem->start,
+ resource_size(axi_ctrl->vfemem));
+ pr_err("%s: irq request fail\n", __func__);
+ rc = -EBUSY;
+ goto vfe40_no_resource;
+ }
+
+ disable_irq(axi_ctrl->vfeirq->start);
+
+ tasklet_init(&axi_ctrl->vfe40_tasklet,
+ axi40_do_tasklet, (unsigned long)axi_ctrl);
+
+ vfe40_ctrl->pdev = pdev;
+ sd_info.sdev_type = VFE_DEV;
+ sd_info.sd_index = pdev->id;
+ sd_info.irq_num = axi_ctrl->vfeirq->start;
+ msm_cam_register_subdev_node(&vfe40_ctrl->subdev, &sd_info);
+ return 0;
+
+vfe40_no_resource:
+ kfree(vfe40_ctrl);
+ kfree(axi_ctrl);
+ return 0;
+}
+
+static const struct of_device_id msm_vfe_dt_match[] = {
+ {.compatible = "qcom,vfe40"},
+};
+
+MODULE_DEVICE_TABLE(of, msm_vfe_dt_match);
+
+static struct platform_driver vfe40_driver = {
+ .probe = vfe40_probe,
+ .driver = {
+ .name = MSM_VFE_DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = msm_vfe_dt_match,
+ },
+};
+
+static int __init msm_vfe40_init_module(void)
+{
+ return platform_driver_register(&vfe40_driver);
+}
+
+static void __exit msm_vfe40_exit_module(void)
+{
+ platform_driver_unregister(&vfe40_driver);
+}
+
+module_init(msm_vfe40_init_module);
+module_exit(msm_vfe40_exit_module);
+MODULE_DESCRIPTION("VFE 4.0 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/msm/vfe/msm_vfe40.h b/drivers/media/video/msm/vfe/msm_vfe40.h
new file mode 100644
index 0000000..c8b0cb8
--- /dev/null
+++ b/drivers/media/video/msm/vfe/msm_vfe40.h
@@ -0,0 +1,1202 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_VFE40_H__
+#define __MSM_VFE40_H__
+
+#include <linux/bitops.h>
+#include "msm_vfe_stats_buf.h"
+
+#define TRUE 1
+#define FALSE 0
+
+#define VFE40_HW_NUMBER 0x10000015
+
+/* This defines total number registers in VFE.
+ * Each register is 4 bytes so to get the range,
+ * multiply this number with 4. */
+#define VFE40_REGISTER_TOTAL 0x00000320
+
+/* at stop of vfe pipeline, for now it is assumed
+ * that camif will stop at any time. Bit 1:0 = 0x10:
+ * disable image data capture immediately. */
+#define CAMIF_COMMAND_STOP_IMMEDIATELY 0x00000002
+
+/* at stop of vfe pipeline, for now it is assumed
+ * that camif will stop at any time. Bit 1:0 = 0x00:
+ * disable image data capture at frame boundary */
+#define CAMIF_COMMAND_STOP_AT_FRAME_BOUNDARY 0x00000000
+
+/* to halt axi bridge */
+#define AXI_HALT 0x00000001
+
+/* clear the halt bit. */
+#define AXI_HALT_CLEAR 0x00000000
+
+/* reset the pipeline when stop command is issued.
+ * (without reset the register.) bit 26-32 = 0,
+ * domain reset, bit 0-9 = 1 for module reset, except
+ * register module. */
+#define VFE_RESET_UPON_STOP_CMD 0x000003ef
+
+/* reset the pipeline when reset command.
+ * bit 26-32 = 0, domain reset, bit 0-9 = 1 for module reset. */
+#define VFE_RESET_UPON_RESET_CMD 0x000001ff
+
+/* constants for irq registers */
+#define VFE_DISABLE_ALL_IRQS 0
+/* bit =1 is to clear the corresponding bit in VFE_IRQ_STATUS. */
+#define VFE_CLEAR_ALL_IRQ0 0xffff7fff
+#define VFE_CLEAR_ALL_IRQ1 0xffffffff
+
+#define VFE_IRQ_STATUS0_CAMIF_SOF_MASK (0x00000001<<0)
+#define VFE_IRQ_STATUS0_REG_UPDATE_MASK (0x00000001<<4)
+#define VFE_IRQ_STATUS0_STATS_BE (0x00000001<<16)
+#define VFE_IRQ_STATUS0_STATS_BG (0x00000001<<17)
+#define VFE_IRQ_STATUS0_STATS_BF (0x00000001<<18)
+#define VFE_IRQ_STATUS0_STATS_AWB (0x00000001<<19)
+#define VFE_IRQ_STATUS0_STATS_RS (0x00000001<<20)
+#define VFE_IRQ_STATUS0_STATS_CS (0x00000001<<21)
+#define VFE_IRQ_STATUS0_STATS_IHIST (0x00000001<<22)
+#define VFE_IRQ_STATUS0_STATS_SKIN_BHIST (0x00000001<<23)
+#define VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE0_MASK (0x00000001<<25)
+#define VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE1_MASK (0x00000001<<26)
+#define VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE2_MASK (0x00000001<<27)
+#define VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE3_MASK (0x00000001<<28)
+#define VFE_IRQ_STATUS0_STATS_COMPOSIT_MASK_0 (0x00000001<<29)
+#define VFE_IRQ_STATUS0_STATS_COMPOSIT_MASK_1 (0x00000001<<30)
+#define VFE_IRQ_STATUS0_RESET_AXI_HALT_ACK_MASK (0x00000001<<31)
+
+#define VFE_IRQ_STATUS1_SYNC_TIMER0 (0x00000001<<25)
+#define VFE_IRQ_STATUS1_SYNC_TIMER1 (0x00000001<<26)
+#define VFE_IRQ_STATUS1_SYNC_TIMER2 (0x00000001<<27)
+#define VFE_IRQ_STATUS1_ASYNC_TIMER0 (0x00000001<<28)
+#define VFE_IRQ_STATUS1_ASYNC_TIMER1 (0x00000001<<29)
+#define VFE_IRQ_STATUS1_ASYNC_TIMER2 (0x00000001<<30)
+#define VFE_IRQ_STATUS1_ASYNC_TIMER3 (0x00000001<<31)
+
+/* imask for while waiting for stop ack, driver has already
+ * requested stop, waiting for reset irq, and async timer irq.
+ * For irq_status_0, bit 28-32 are for async timer. For
+ * irq_status_1, bit 22 for reset irq, bit 23 for axi_halt_ack
+ irq */
+#define VFE_IMASK_WHILE_STOPPING_0 0x80000000
+#define VFE_IMASK_WHILE_STOPPING_1 0x00000100
+
+/* For ABF bit 4 is set to zero and other's 1 */
+#define ABF_MASK 0xFFFFFFF7
+
+/* For DBPC bit 0 is set to zero and other's 1 */
+#define DBPC_MASK 0xFFFFFFFE
+
+/* For DBPC bit 1 is set to zero and other's 1 */
+#define DBCC_MASK 0xFFFFFFFD
+
+/* For DBPC/ABF/DBCC/ABCC bits are set to 1 all others 0 */
+#define DEMOSAIC_MASK 0xF
+
+/* For MCE enable bit 28 set to zero and other's 1 */
+#define MCE_EN_MASK 0xEFFFFFFF
+
+/* For MCE Q_K bit 28 to 32 set to zero and other's 1 */
+#define MCE_Q_K_MASK 0x0FFFFFFF
+
+#define BE_ENABLE_MASK (0x00000001<<5)
+#define BG_ENABLE_MASK (0x00000001<<6)
+#define BF_ENABLE_MASK (0x00000001<<7)
+#define AWB_ENABLE_MASK (0x00000001<<8)
+#define RS_ENABLE_MASK (0x00000001<<9)
+#define CS_ENABLE_MASK (0x00000001<<10)
+#define CLF_ENABLE_MASK (0x00000001<<12)
+#define IHIST_ENABLE_MASK (0x00000001<<15)
+#define RS_CS_ENABLE_MASK (RS_ENABLE_MASK|CS_ENABLE_MASK)
+#define STATS_ENABLE_MASK 0x000487E0 /* bit 18,15,10,9,8,7,6,5*/
+
+#define VFE_DMI_CFG_DEFAULT 0x00000100
+
+#define HFR_MODE_OFF 1
+#define VFE_FRAME_SKIP_PERIOD_MASK 0x0000001F /*bits 0 -4*/
+
+enum VFE40_DMI_RAM_SEL {
+ NO_MEM_SELECTED = 0,
+ BLACK_LUT_RAM_BANK0 = 0x1,
+ BLACK_LUT_RAM_BANK1 = 0x2,
+ ROLLOFF_RAM0_BANK0 = 0x3,
+ ROLLOFF_RAM0_BANK1 = 0x4,
+ DEMOSAIC_LUT_RAM_BANK0 = 0x5,
+ DEMOSAIC_LUT_RAM_BANK1 = 0x6,
+ STATS_BHIST_RAM0 = 0x7,
+ STATS_BHIST_RAM1 = 0x8,
+ RGBLUT_RAM_CH0_BANK0 = 0x9,
+ RGBLUT_RAM_CH0_BANK1 = 0xa,
+ RGBLUT_RAM_CH1_BANK0 = 0xb,
+ RGBLUT_RAM_CH1_BANK1 = 0xc,
+ RGBLUT_RAM_CH2_BANK0 = 0xd,
+ RGBLUT_RAM_CH2_BANK1 = 0xe,
+ RGBLUT_CHX_BANK0 = 0xf,
+ RGBLUT_CHX_BANK1 = 0x10,
+ STATS_IHIST_RAM = 0x11,
+ LUMA_ADAPT_LUT_RAM_BANK0 = 0x12,
+ LUMA_ADAPT_LUT_RAM_BANK1 = 0x13,
+};
+
+enum vfe_output_state {
+ VFE_STATE_IDLE,
+ VFE_STATE_START_REQUESTED,
+ VFE_STATE_STARTED,
+ VFE_STATE_STOP_REQUESTED,
+ VFE_STATE_STOPPED,
+};
+
+#define V40_CAMIF_OFF 0x000002F8
+#define V40_CAMIF_LEN 36
+
+#define V40_DEMUX_OFF 0x00000424
+#define V40_DEMUX_LEN 28
+
+#define V40_DEMOSAICV3_0_OFF 0x00000440
+#define V40_DEMOSAICV3_0_LEN 4
+#define V40_DEMOSAICV3_1_OFF 0x00000518
+#define V40_DEMOSAICV3_1_LEN 88
+#define V40_DEMOSAICV3_2_OFF 0x00000568
+#define V40_DEMOSAICV3_UP_REG_CNT 5
+
+#define V40_OUT_CLAMP_OFF 0x00000874
+#define V40_OUT_CLAMP_LEN 16
+
+#define V40_OPERATION_CFG_LEN 44
+
+#define V40_AXI_OUT_OFF 0x0000004C
+#define V40_AXI_OUT_LEN 412
+#define V40_AXI_CH_INF_LEN 32
+#define V40_AXI_CFG_LEN 71
+
+#define V40_FOV_ENC_OFF 0x00000854
+#define V40_FOV_ENC_LEN 16
+#define V40_FOV_VIEW_OFF 0x00000864
+#define V40_FOV_VIEW_LEN 16
+
+#define V40_SCALER_ENC_OFF 0x0000075C
+#define V40_SCALER_ENC_LEN 72
+
+#define V40_SCALER_VIEW_OFF 0x000007A4
+#define V40_SCALER_VIEW_LEN 72
+
+#define V40_COLORXFORM_ENC_CFG_OFF 0x0000071C
+#define V40_COLORXFORM_ENC_CFG_LEN 32
+
+#define V40_COLORXFORM_VIEW_CFG_OFF 0x0000073C
+#define V40_COLORXFORM_VIEW_CFG_LEN 32
+
+#define V40_CHROMA_EN_OFF 0x00000640
+#define V40_CHROMA_EN_LEN 36
+
+#define V40_SYNC_TIMER_OFF 0x00000324
+#define V40_SYNC_TIMER_POLARITY_OFF 0x0000034C
+#define V40_TIMER_SELECT_OFF 0x00000374
+#define V40_SYNC_TIMER_LEN 28
+
+#define V40_ASYNC_TIMER_OFF 0x00000350
+#define V40_ASYNC_TIMER_LEN 28
+
+/* use 10x13 mesh table in vfe40*/
+#define V40_MESH_ROLL_OFF_CFG_OFF 0x00000400
+#define V40_MESH_ROLL_OFF_CFG_LEN 36
+#define V40_MESH_ROLL_OFF_TABLE_SIZE 130
+
+
+#define V40_COLOR_COR_OFF 0x000005D0
+#define V40_COLOR_COR_LEN 52
+
+#define V40_WB_OFF 0x00000580
+#define V40_WB_LEN 4
+
+#define V40_RGB_G_OFF 0x00000638
+#define V40_RGB_G_LEN 4
+#define V40_GAMMA_LUT_BANK_SEL_MASK 0x00000007
+
+#define V40_LA_OFF 0x0000063C
+#define V40_LA_LEN 4
+
+#define V40_SCE_OFF 0x00000694
+#define V40_SCE_LEN 136
+
+#define V40_CHROMA_SUP_OFF 0x00000664
+#define V40_CHROMA_SUP_LEN 12
+
+#define V40_MCE_OFF 0x00000670
+#define V40_MCE_LEN 36
+
+#define V40_STATS_BE_OFF 0x0000088C
+#define V40_STATS_BE_LEN 12
+
+#define V40_STATS_BG_OFF 0x00000898
+#define V40_STATS_BG_LEN 12
+
+#define V40_STATS_BF_OFF 0x000008A4
+#define V40_STATS_BF_LEN 24
+
+#define V40_STATS_BHIST_OFF 0x000008BC
+#define V40_STATS_BHIST_LEN 8
+
+#define V40_STATS_AWB_OFF 0x000008C4
+#define V40_STATS_AWB_LEN 32
+
+#define V40_STATS_RS_OFF 0x000008E4
+#define V40_STATS_RS_LEN 8
+
+#define V40_STATS_CS_OFF 0x000008EC
+#define V40_STATS_CS_LEN 8
+
+#define V40_STATS_IHIST_OFF 0x000008F4
+#define V40_STATS_IHIST_LEN 8
+
+#define V40_STATS_SKIN_OFF 0x000008FC
+#define V40_STATS_SKIN_LEN 20
+
+#define V40_ASF_OFF 0x000007EC
+#define V40_ASF_LEN 48
+#define V40_ASF_UPDATE_LEN 36
+
+#define V40_CAPTURE_LEN 4
+
+#define V40_GET_HW_VERSION_OFF 0
+#define V40_GET_HW_VERSION_LEN 4
+
+#define V40_LINEARIZATION_OFF1 0x0000037C
+#define V40_LINEARIZATION_LEN1 68
+
+#define V40_DEMOSAICV3_DBPC_CFG_OFF 0x00000444
+#define V40_DEMOSAICV3_DBPC_LEN 4
+
+#define V40_DEMOSAICV3_DBPC_CFG_OFF0 0x00000448
+#define V40_DEMOSAICV3_DBPC_CFG_OFF1 0x0000044C
+#define V40_DEMOSAICV3_DBPC_CFG_OFF2 0x00000450
+
+#define V40_DEMOSAICV3_DBCC_OFF 0x00000454
+#define V40_DEMOSAICV3_DBCC_LEN 16
+
+#define V40_DEMOSAICV3_ABF_OFF 0x00000464
+#define V40_DEMOSAICV3_ABF_LEN 180
+
+#define V40_MODULE_CFG_OFF 0x00000018
+#define V40_MODULE_CFG_LEN 4
+
+#define V40_ASF_SPECIAL_EFX_CFG_OFF 0x0000081C
+#define V40_ASF_SPECIAL_EFX_CFG_LEN 4
+
+#define V40_CLF_CFG_OFF 0x00000588
+#define V40_CLF_CFG_LEN 72
+
+#define V40_CLF_LUMA_UPDATE_OFF 0x0000058C
+#define V40_CLF_LUMA_UPDATE_LEN 60
+
+#define V40_CLF_CHROMA_UPDATE_OFF 0x000005C8
+#define V40_CLF_CHROMA_UPDATE_LEN 8
+
+#define VFE40_GAMMA_NUM_ENTRIES 64
+
+#define VFE40_LA_TABLE_LENGTH 64
+
+#define VFE40_LINEARIZATON_TABLE_LENGTH 36
+
+#define VFE_WM_CFG_BASE 0x0070
+#define VFE_WM_CFG_LEN 0x0024
+
+#define vfe40_get_ch_ping_addr(base, chn) \
+ (msm_camera_io_r((base) + VFE_WM_CFG_BASE + VFE_WM_CFG_LEN * (chn)))
+#define vfe40_get_ch_pong_addr(base, chn) \
+ (msm_camera_io_r((base) + VFE_WM_CFG_BASE + VFE_WM_CFG_LEN * (chn) + 4))
+#define vfe40_get_ch_addr(ping_pong, base, chn) \
+ ((((ping_pong) & (1 << (chn))) == 0) ? \
+ (vfe40_get_ch_pong_addr((base), chn)) : \
+ (vfe40_get_ch_ping_addr((base), chn)))
+
+#define vfe40_put_ch_ping_addr(base, chn, addr) \
+ (msm_camera_io_w((addr), \
+ (base) + VFE_WM_CFG_BASE + VFE_WM_CFG_LEN * (chn)))
+#define vfe40_put_ch_pong_addr(base, chn, addr) \
+ (msm_camera_io_w((addr), \
+ (base) + VFE_WM_CFG_BASE + VFE_WM_CFG_LEN * (chn) + 4))
+#define vfe40_put_ch_addr(ping_pong, base, chn, addr) \
+ (((ping_pong) & (1 << (chn))) == 0 ? \
+ vfe40_put_ch_pong_addr((base), (chn), (addr)) : \
+ vfe40_put_ch_ping_addr((base), (chn), (addr)))
+
+struct vfe_cmd_hw_version {
+ uint32_t minorVersion;
+ uint32_t majorVersion;
+ uint32_t coreVersion;
+};
+
+enum VFE_AXI_OUTPUT_MODE {
+ VFE_AXI_OUTPUT_MODE_Output1,
+ VFE_AXI_OUTPUT_MODE_Output2,
+ VFE_AXI_OUTPUT_MODE_Output1AndOutput2,
+ VFE_AXI_OUTPUT_MODE_CAMIFToAXIViaOutput2,
+ VFE_AXI_OUTPUT_MODE_Output2AndCAMIFToAXIViaOutput1,
+ VFE_AXI_OUTPUT_MODE_Output1AndCAMIFToAXIViaOutput2,
+ VFE_AXI_LAST_OUTPUT_MODE_ENUM
+};
+
+enum VFE_RAW_WR_PATH_SEL {
+ VFE_RAW_OUTPUT_DISABLED,
+ VFE_RAW_OUTPUT_ENC_CBCR_PATH,
+ VFE_RAW_OUTPUT_VIEW_CBCR_PATH,
+ VFE_RAW_OUTPUT_PATH_INVALID
+};
+
+
+#define VFE_AXI_OUTPUT_BURST_LENGTH 4
+#define VFE_MAX_NUM_FRAGMENTS_PER_FRAME 4
+#define VFE_AXI_OUTPUT_CFG_FRAME_COUNT 3
+
+struct vfe_cmds_per_write_master {
+ uint16_t imageWidth;
+ uint16_t imageHeight;
+ uint16_t outRowCount;
+ uint16_t outRowIncrement;
+ uint32_t outFragments[VFE_AXI_OUTPUT_CFG_FRAME_COUNT]
+ [VFE_MAX_NUM_FRAGMENTS_PER_FRAME];
+};
+
+struct vfe_cmds_axi_per_output_path {
+ uint8_t fragmentCount;
+ struct vfe_cmds_per_write_master firstWM;
+ struct vfe_cmds_per_write_master secondWM;
+};
+
+enum VFE_AXI_BURST_LENGTH {
+ VFE_AXI_BURST_LENGTH_IS_2 = 2,
+ VFE_AXI_BURST_LENGTH_IS_4 = 4,
+ VFE_AXI_BURST_LENGTH_IS_8 = 8,
+ VFE_AXI_BURST_LENGTH_IS_16 = 16
+};
+
+
+struct vfe_cmd_fov_crop_config {
+ uint8_t enable;
+ uint16_t firstPixel;
+ uint16_t lastPixel;
+ uint16_t firstLine;
+ uint16_t lastLine;
+};
+
+struct vfe_cmds_main_scaler_stripe_init {
+ uint16_t MNCounterInit;
+ uint16_t phaseInit;
+};
+
+struct vfe_cmds_scaler_one_dimension {
+ uint8_t enable;
+ uint16_t inputSize;
+ uint16_t outputSize;
+ uint32_t phaseMultiplicationFactor;
+ uint8_t interpolationResolution;
+};
+
+struct vfe_cmd_main_scaler_config {
+ uint8_t enable;
+ struct vfe_cmds_scaler_one_dimension hconfig;
+ struct vfe_cmds_scaler_one_dimension vconfig;
+ struct vfe_cmds_main_scaler_stripe_init MNInitH;
+ struct vfe_cmds_main_scaler_stripe_init MNInitV;
+};
+
+struct vfe_cmd_scaler2_config {
+ uint8_t enable;
+ struct vfe_cmds_scaler_one_dimension hconfig;
+ struct vfe_cmds_scaler_one_dimension vconfig;
+};
+
+
+struct vfe_cmd_frame_skip_update {
+ uint32_t output1Pattern;
+ uint32_t output2Pattern;
+};
+
+struct vfe_cmd_output_clamp_config {
+ uint8_t minCh0;
+ uint8_t minCh1;
+ uint8_t minCh2;
+ uint8_t maxCh0;
+ uint8_t maxCh1;
+ uint8_t maxCh2;
+};
+
+struct vfe_cmd_chroma_subsample_config {
+ uint8_t enable;
+ uint8_t cropEnable;
+ uint8_t vsubSampleEnable;
+ uint8_t hsubSampleEnable;
+ uint8_t vCosited;
+ uint8_t hCosited;
+ uint8_t vCositedPhase;
+ uint8_t hCositedPhase;
+ uint16_t cropWidthFirstPixel;
+ uint16_t cropWidthLastPixel;
+ uint16_t cropHeightFirstLine;
+ uint16_t cropHeightLastLine;
+};
+
+enum VFE_START_PIXEL_PATTERN {
+ VFE_BAYER_RGRGRG,
+ VFE_BAYER_GRGRGR,
+ VFE_BAYER_BGBGBG,
+ VFE_BAYER_GBGBGB,
+ VFE_YUV_YCbYCr,
+ VFE_YUV_YCrYCb,
+ VFE_YUV_CbYCrY,
+ VFE_YUV_CrYCbY
+};
+
+enum VFE_BUS_RD_INPUT_PIXEL_PATTERN {
+ VFE_BAYER_RAW,
+ VFE_YUV_INTERLEAVED,
+ VFE_YUV_PSEUDO_PLANAR_Y,
+ VFE_YUV_PSEUDO_PLANAR_CBCR
+};
+
+enum VFE_YUV_INPUT_COSITING_MODE {
+ VFE_YUV_COSITED,
+ VFE_YUV_INTERPOLATED
+};
+
+struct vfe_cmds_demosaic_abf {
+ uint8_t enable;
+ uint8_t forceOn;
+ uint8_t shift;
+ uint16_t lpThreshold;
+ uint16_t max;
+ uint16_t min;
+ uint8_t ratio;
+};
+
+struct vfe_cmds_demosaic_bpc {
+ uint8_t enable;
+ uint16_t fmaxThreshold;
+ uint16_t fminThreshold;
+ uint16_t redDiffThreshold;
+ uint16_t blueDiffThreshold;
+ uint16_t greenDiffThreshold;
+};
+
+struct vfe_cmd_demosaic_config {
+ uint8_t enable;
+ uint8_t slopeShift;
+ struct vfe_cmds_demosaic_abf abfConfig;
+ struct vfe_cmds_demosaic_bpc bpcConfig;
+};
+
+struct vfe_cmd_demosaic_bpc_update {
+ struct vfe_cmds_demosaic_bpc bpcUpdate;
+};
+
+struct vfe_cmd_demosaic_abf_update {
+ struct vfe_cmds_demosaic_abf abfUpdate;
+};
+
+struct vfe_cmd_white_balance_config {
+ uint8_t enable;
+ uint16_t ch2Gain;
+ uint16_t ch1Gain;
+ uint16_t ch0Gain;
+};
+
+enum VFE_COLOR_CORRECTION_COEF_QFACTOR {
+ COEF_IS_Q7_SIGNED,
+ COEF_IS_Q8_SIGNED,
+ COEF_IS_Q9_SIGNED,
+ COEF_IS_Q10_SIGNED
+};
+
+struct vfe_cmd_color_correction_config {
+ uint8_t enable;
+ enum VFE_COLOR_CORRECTION_COEF_QFACTOR coefQFactor;
+ int16_t C0;
+ int16_t C1;
+ int16_t C2;
+ int16_t C3;
+ int16_t C4;
+ int16_t C5;
+ int16_t C6;
+ int16_t C7;
+ int16_t C8;
+ int16_t K0;
+ int16_t K1;
+ int16_t K2;
+};
+
+#define VFE_LA_TABLE_LENGTH 64
+
+struct vfe_cmd_la_config {
+ uint8_t enable;
+ int16_t table[VFE_LA_TABLE_LENGTH];
+};
+
+#define VFE_GAMMA_TABLE_LENGTH 256
+enum VFE_RGB_GAMMA_TABLE_SELECT {
+ RGB_GAMMA_CH0_SELECTED,
+ RGB_GAMMA_CH1_SELECTED,
+ RGB_GAMMA_CH2_SELECTED,
+ RGB_GAMMA_CH0_CH1_SELECTED,
+ RGB_GAMMA_CH0_CH2_SELECTED,
+ RGB_GAMMA_CH1_CH2_SELECTED,
+ RGB_GAMMA_CH0_CH1_CH2_SELECTED
+};
+
+struct vfe_cmd_rgb_gamma_config {
+ uint8_t enable;
+ enum VFE_RGB_GAMMA_TABLE_SELECT channelSelect;
+ int16_t table[VFE_GAMMA_TABLE_LENGTH];
+};
+
+struct vfe_cmd_chroma_enhan_config {
+ uint8_t enable;
+ int16_t am;
+ int16_t ap;
+ int16_t bm;
+ int16_t bp;
+ int16_t cm;
+ int16_t cp;
+ int16_t dm;
+ int16_t dp;
+ int16_t kcr;
+ int16_t kcb;
+ int16_t RGBtoYConversionV0;
+ int16_t RGBtoYConversionV1;
+ int16_t RGBtoYConversionV2;
+ uint8_t RGBtoYConversionOffset;
+};
+
+struct vfe_cmd_chroma_suppression_config {
+ uint8_t enable;
+ uint8_t m1;
+ uint8_t m3;
+ uint8_t n1;
+ uint8_t n3;
+ uint8_t nn1;
+ uint8_t mm1;
+};
+
+struct vfe_cmd_asf_config {
+ uint8_t enable;
+ uint8_t smoothFilterEnabled;
+ uint8_t sharpMode;
+ uint8_t smoothCoefCenter;
+ uint8_t smoothCoefSurr;
+ uint8_t normalizeFactor;
+ uint8_t sharpK1;
+ uint8_t sharpK2;
+ uint8_t sharpThreshE1;
+ int8_t sharpThreshE2;
+ int8_t sharpThreshE3;
+ int8_t sharpThreshE4;
+ int8_t sharpThreshE5;
+ int8_t filter1Coefficients[9];
+ int8_t filter2Coefficients[9];
+ uint8_t cropEnable;
+ uint16_t cropFirstPixel;
+ uint16_t cropLastPixel;
+ uint16_t cropFirstLine;
+ uint16_t cropLastLine;
+};
+
+struct vfe_cmd_asf_update {
+ uint8_t enable;
+ uint8_t smoothFilterEnabled;
+ uint8_t sharpMode;
+ uint8_t smoothCoefCenter;
+ uint8_t smoothCoefSurr;
+ uint8_t normalizeFactor;
+ uint8_t sharpK1;
+ uint8_t sharpK2;
+ uint8_t sharpThreshE1;
+ int8_t sharpThreshE2;
+ int8_t sharpThreshE3;
+ int8_t sharpThreshE4;
+ int8_t sharpThreshE5;
+ int8_t filter1Coefficients[9];
+ int8_t filter2Coefficients[9];
+ uint8_t cropEnable;
+};
+
+enum VFE_TEST_GEN_SYNC_EDGE {
+ VFE_TEST_GEN_SYNC_EDGE_ActiveHigh,
+ VFE_TEST_GEN_SYNC_EDGE_ActiveLow
+};
+
+
+struct vfe_cmd_bus_pm_start {
+ uint8_t output2YWrPmEnable;
+ uint8_t output2CbcrWrPmEnable;
+ uint8_t output1YWrPmEnable;
+ uint8_t output1CbcrWrPmEnable;
+};
+
+struct vfe_frame_skip_counts {
+ uint32_t totalFrameCount;
+ uint32_t output1Count;
+ uint32_t output2Count;
+};
+
+enum VFE_AXI_RD_UNPACK_HBI_SEL {
+ VFE_AXI_RD_HBI_32_CLOCK_CYCLES,
+ VFE_AXI_RD_HBI_64_CLOCK_CYCLES,
+ VFE_AXI_RD_HBI_128_CLOCK_CYCLES,
+ VFE_AXI_RD_HBI_256_CLOCK_CYCLES,
+ VFE_AXI_RD_HBI_512_CLOCK_CYCLES,
+ VFE_AXI_RD_HBI_1024_CLOCK_CYCLES,
+ VFE_AXI_RD_HBI_2048_CLOCK_CYCLES,
+ VFE_AXI_RD_HBI_4096_CLOCK_CYCLES
+};
+
+struct vfe_frame_bpc_info {
+ uint32_t greenDefectPixelCount;
+ uint32_t redBlueDefectPixelCount;
+};
+
+struct vfe_frame_asf_info {
+ uint32_t asfMaxEdge;
+ uint32_t asfHbiCount;
+};
+
+struct vfe_msg_camif_status {
+ uint8_t camifState;
+ uint32_t pixelCount;
+ uint32_t lineCount;
+};
+
+struct vfe40_irq_status {
+ uint32_t vfeIrqStatus0;
+ uint32_t vfeIrqStatus1;
+ uint32_t camifStatus;
+ uint32_t demosaicStatus;
+ uint32_t asfMaxEdge;
+};
+
+#define V40_PREVIEW_AXI_FLAG 0x00000001
+#define V40_SNAPSHOT_AXI_FLAG (0x00000001<<1)
+
+struct vfe40_cmd_type {
+ uint16_t id;
+ uint32_t length;
+ uint32_t offset;
+ uint32_t flag;
+};
+
+struct vfe40_free_buf {
+ struct list_head node;
+ uint32_t paddr;
+ uint32_t y_off;
+ uint32_t cbcr_off;
+};
+
+struct vfe40_output_ch {
+ struct list_head free_buf_queue;
+ spinlock_t free_buf_lock;
+ uint16_t image_mode;
+ int8_t ch0;
+ int8_t ch1;
+ int8_t ch2;
+ uint32_t capture_cnt;
+ uint32_t frame_drop_cnt;
+ struct msm_free_buf ping;
+ struct msm_free_buf pong;
+ struct msm_free_buf free_buf;
+};
+
+/* no error irq in mask 0 */
+#define VFE40_IMASK_ERROR_ONLY_0 0x0
+/* when normal case, don't want to block error status. */
+/* bit 0-21 are error irq bits */
+#define VFE40_IMASK_ERROR_ONLY_1 0x005FFFFF
+#define VFE40_IMASK_CAMIF_ERROR (0x00000001<<0)
+#define VFE40_IMASK_BHIST_OVWR (0x00000001<<1)
+#define VFE40_IMASK_STATS_CS_OVWR (0x00000001<<2)
+#define VFE40_IMASK_STATS_IHIST_OVWR (0x00000001<<3)
+#define VFE40_IMASK_REALIGN_BUF_Y_OVFL (0x00000001<<4)
+#define VFE40_IMASK_REALIGN_BUF_CB_OVFL (0x00000001<<5)
+#define VFE40_IMASK_REALIGN_BUF_CR_OVFL (0x00000001<<6)
+#define VFE40_IMASK_VIOLATION (0x00000001<<7)
+#define VFE40_IMASK_IMG_MAST_0_BUS_OVFL (0x00000001<<8)
+#define VFE40_IMASK_IMG_MAST_1_BUS_OVFL (0x00000001<<9)
+#define VFE40_IMASK_IMG_MAST_2_BUS_OVFL (0x00000001<<10)
+#define VFE40_IMASK_IMG_MAST_3_BUS_OVFL (0x00000001<<11)
+#define VFE40_IMASK_IMG_MAST_4_BUS_OVFL (0x00000001<<12)
+#define VFE40_IMASK_IMG_MAST_5_BUS_OVFL (0x00000001<<13)
+#define VFE40_IMASK_IMG_MAST_6_BUS_OVFL (0x00000001<<14)
+#define VFE40_IMASK_STATS_AE_BG_BUS_OVFL (0x00000001<<15)
+#define VFE40_IMASK_STATS_AF_BF_BUS_OVFL (0x00000001<<16)
+#define VFE40_IMASK_STATS_AWB_BUS_OVFL (0x00000001<<17)
+#define VFE40_IMASK_STATS_RS_BUS_OVFL (0x00000001<<18)
+#define VFE40_IMASK_STATS_CS_BUS_OVFL (0x00000001<<19)
+#define VFE40_IMASK_STATS_IHIST_BUS_OVFL (0x00000001<<20)
+#define VFE40_IMASK_STATS_SKIN_BHIST_BUS_OVFL (0x00000001<<21)
+#define VFE40_IMASK_AXI_ERROR (0x00000001<<22)
+
+#define VFE_COM_STATUS 0x000FE000
+
+struct vfe40_output_path {
+ uint16_t output_mode; /* bitmask */
+
+ struct vfe40_output_ch out0; /* preview and thumbnail */
+ struct vfe40_output_ch out1; /* snapshot */
+ struct vfe40_output_ch out2; /* video */
+};
+
+struct vfe40_frame_extra {
+ uint32_t greenDefectPixelCount;
+ uint32_t redBlueDefectPixelCount;
+
+ uint32_t asfMaxEdge;
+ uint32_t asfHbiCount;
+
+ uint32_t yWrPmStats0;
+ uint32_t yWrPmStats1;
+ uint32_t cbcrWrPmStats0;
+ uint32_t cbcrWrPmStats1;
+
+ uint32_t frameCounter;
+};
+
+#define VFE_CLEAR_ALL_IRQS 0xffffffff
+
+#define VFE_HW_VERSION 0x00000000
+#define VFE_GLOBAL_RESET 0x0000000C
+#define VFE_MODULE_RESET 0x00000010
+#define VFE_CGC_OVERRIDE 0x00000014
+#define VFE_MODULE_CFG 0x00000018
+#define VFE_CFG 0x0000001C
+#define VFE_IRQ_CMD 0x00000024
+#define VFE_IRQ_MASK_0 0x00000028
+#define VFE_IRQ_MASK_1 0x0000002C
+#define VFE_IRQ_CLEAR_0 0x00000030
+#define VFE_IRQ_CLEAR_1 0x00000034
+#define VFE_IRQ_STATUS_0 0x00000038
+#define VFE_IRQ_STATUS_1 0x0000003C
+#define VFE_IRQ_COMP_MASK 0x00000040
+#define VFE_BUS_CMD 0x0000004C
+#define VFE_BUS_PING_PONG_STATUS 0x00000180
+#define VFE_AXI_CMD 0x000001D8
+#define VFE_AXI_STATUS 0x000002C0
+#define VFE_BUS_STATS_PING_PONG_BASE 0x000000F4
+
+#define VFE_BUS_STATS_AEC_WR_PING_ADDR 0x000000F4
+#define VFE_BUS_STATS_AEC_WR_PONG_ADDR 0x000000F8
+#define VFE_BUS_STATS_AEC_UB_CFG 0x000000FC
+#define VFE_BUS_STATS_AF_WR_PING_ADDR 0x00000100
+#define VFE_BUS_STATS_AF_WR_PONG_ADDR 0x00000104
+#define VFE_BUS_STATS_AF_UB_CFG 0x00000108
+#define VFE_BUS_STATS_AWB_WR_PING_ADDR 0x0000010C
+#define VFE_BUS_STATS_AWB_WR_PONG_ADDR 0x00000110
+#define VFE_BUS_STATS_AWB_UB_CFG 0x00000114
+#define VFE_BUS_STATS_RS_WR_PING_ADDR 0x00000118
+#define VFE_BUS_STATS_RS_WR_PONG_ADDR 0x0000011C
+#define VFE_BUS_STATS_RS_UB_CFG 0x00000120
+#define VFE_BUS_STATS_CS_WR_PING_ADDR 0x00000124
+#define VFE_BUS_STATS_CS_WR_PONG_ADDR 0x00000128
+#define VFE_BUS_STATS_CS_UB_CFG 0x0000012C
+#define VFE_BUS_STATS_HIST_WR_PING_ADDR 0x00000130
+#define VFE_BUS_STATS_HIST_WR_PONG_ADDR 0x00000134
+#define VFE_BUS_STATS_HIST_UB_CFG 0x00000138
+#define VFE_BUS_STATS_SKIN_WR_PING_ADDR 0x0000013C
+#define VFE_BUS_STATS_SKIN_WR_PONG_ADDR 0x00000140
+#define VFE_BUS_STATS_SKIN_UB_CFG 0x00000144
+
+#define VFE_0_BUS_BDG_QOS_CFG_0 0x000002C4
+#define VFE_0_BUS_BDG_QOS_CFG_1 0x000002C8
+#define VFE_0_BUS_BDG_QOS_CFG_2 0x000002CC
+#define VFE_0_BUS_BDG_QOS_CFG_3 0x000002D0
+#define VFE_0_BUS_BDG_QOS_CFG_4 0x000002D4
+#define VFE_0_BUS_BDG_QOS_CFG_5 0x000002D8
+#define VFE_0_BUS_BDG_QOS_CFG_6 0x000002DC
+#define VFE_0_BUS_BDG_QOS_CFG_7 0x000002E0
+
+#define VFE_CAMIF_COMMAND 0x000002F4
+#define VFE_CAMIF_STATUS 0x0000031C
+#define VFE_REG_UPDATE_CMD 0x00000378
+#define VFE_DEMUX_GAIN_0 0x00000428
+#define VFE_DEMUX_GAIN_1 0x0000042C
+#define VFE_CHROMA_UP 0x0000057C
+
+#define VFE_CLAMP_ENC_MAX 0x00000874
+#define VFE_CLAMP_ENC_MIN 0x00000878
+#define VFE_CLAMP_VIEW_MAX 0x0000087C
+#define VFE_CLAMP_VIEW_MIN 0x00000880
+
+#define VFE_REALIGN_BUF 0x00000884
+#define VFE_STATS_CFG 0x00000888
+#define VFE_STATS_AWB_SGW_CFG 0x000008CC
+#define VFE_DMI_CFG 0x00000910
+#define VFE_DMI_ADDR 0x00000914
+#define VFE_DMI_DATA_LO 0x0000091C
+#define VFE_BUS_IO_FORMAT_CFG 0x00000054
+#define VFE_RDI0_CFG 0x000002E8
+#define VFE_RDI1_CFG 0x000002EC
+#define VFE_RDI2_CFG 0x000002F0
+
+#define VFE_VIOLATION_STATUS 0x000007B4
+
+#define VFE40_DMI_DATA_HI 0x00000918
+#define VFE40_DMI_DATA_LO 0x0000091C
+
+#define VFE40_OUTPUT_MODE_PT BIT(0)
+#define VFE40_OUTPUT_MODE_S BIT(1)
+#define VFE40_OUTPUT_MODE_V BIT(2)
+#define VFE40_OUTPUT_MODE_P BIT(3)
+#define VFE40_OUTPUT_MODE_T BIT(4)
+#define VFE40_OUTPUT_MODE_P_ALL_CHNLS BIT(5)
+#define VFE40_OUTPUT_MODE_PRIMARY BIT(6)
+#define VFE40_OUTPUT_MODE_PRIMARY_ALL_CHNLS BIT(7)
+#define VFE40_OUTPUT_MODE_SECONDARY BIT(8)
+#define VFE40_OUTPUT_MODE_SECONDARY_ALL_CHNLS BIT(9)
+
+struct vfe_stats_control {
+ uint32_t droppedStatsFrameCount;
+ uint32_t bufToRender;
+};
+struct axi_ctrl_t;
+struct vfe40_ctrl_type;
+
+struct vfe_share_ctrl_t {
+ void __iomem *vfebase;
+ uint32_t register_total;
+
+ atomic_t vstate;
+ uint32_t vfeFrameId;
+ uint32_t stats_comp;
+ spinlock_t stop_flag_lock;
+ int8_t stop_ack_pending;
+ enum vfe_output_state liveshot_state;
+ uint32_t vfe_capture_count;
+
+ uint16_t operation_mode; /* streaming or snapshot */
+ struct vfe40_output_path outpath;
+
+ uint32_t ref_count;
+ spinlock_t sd_notify_lock;
+ uint32_t vfe_clk_rate;
+
+ atomic_t irq_cnt;
+ struct axi_ctrl_t *axi_ctrl;
+ struct vfe40_ctrl_type *vfe40_ctrl;
+};
+
+struct axi_ctrl_t {
+ struct v4l2_subdev subdev;
+ struct platform_device *pdev;
+ struct resource *vfeirq;
+ spinlock_t tasklet_lock;
+ struct list_head tasklet_q;
+
+ void *syncdata;
+
+ struct resource *vfemem;
+ struct resource *vfeio;
+ struct regulator *fs_vfe;
+ struct clk *vfe_clk[3];
+ struct tasklet_struct vfe40_tasklet;
+ struct vfe_share_ctrl_t *share_ctrl;
+};
+
+struct vfe40_ctrl_type {
+ uint32_t vfeImaskCompositePacked;
+
+ spinlock_t update_ack_lock;
+ spinlock_t state_lock;
+ spinlock_t io_lock;
+ spinlock_t stats_bufq_lock;
+ uint32_t extlen;
+ void *extdata;
+
+ int8_t start_ack_pending;
+ int8_t reset_ack_pending;
+ int8_t update_ack_pending;
+ enum vfe_output_state recording_state;
+ int8_t update_linear;
+ int8_t update_rolloff;
+ int8_t update_la;
+ int8_t update_gamma;
+
+ struct vfe_share_ctrl_t *share_ctrl;
+
+ uint32_t sync_timer_repeat_count;
+ uint32_t sync_timer_state;
+ uint32_t sync_timer_number;
+
+ uint32_t output1Pattern;
+ uint32_t output1Period;
+ uint32_t output2Pattern;
+ uint32_t output2Period;
+ uint32_t vfeFrameSkipCount;
+ uint32_t vfeFrameSkipPeriod;
+ struct vfe_stats_control afStatsControl;
+ struct vfe_stats_control awbStatsControl;
+ struct vfe_stats_control aecStatsControl;
+ struct vfe_stats_control ihistStatsControl;
+ struct vfe_stats_control rsStatsControl;
+ struct vfe_stats_control csStatsControl;
+
+ /* v4l2 subdev */
+ struct v4l2_subdev subdev;
+ struct platform_device *pdev;
+ uint32_t hfr_mode;
+ uint32_t frame_skip_cnt;
+ uint32_t frame_skip_pattern;
+ uint32_t snapshot_frame_cnt;
+ struct msm_stats_bufq_ctrl stats_ctrl;
+ struct msm_stats_ops stats_ops;
+};
+
+#define statsAeNum 0
+#define statsAfNum 1
+#define statsAwbNum 2
+#define statsRsNum 3
+#define statsCsNum 4
+#define statsIhistNum 5
+#define statsSkinNum 6
+
+struct vfe_cmd_stats_ack {
+ uint32_t nextStatsBuf;
+};
+
+#define VFE_STATS_BUFFER_COUNT 3
+
+struct vfe_cmd_stats_buf {
+ uint32_t statsBuf[VFE_STATS_BUFFER_COUNT];
+};
+
+void vfe40_subdev_notify(int id, int path, int image_mode,
+ struct v4l2_subdev *sd, struct vfe_share_ctrl_t *share_ctrl);
+struct vfe40_output_ch *vfe40_get_ch(
+ int path, struct vfe_share_ctrl_t *share_ctrl);
+void vfe40_send_isp_msg(struct v4l2_subdev *sd,
+ uint32_t vfeFrameId, uint32_t isp_msg_id);
+void vfe40_axi_probe(struct axi_ctrl_t *axi_ctrl);
+
+static const uint32_t vfe40_AXI_WM_CFG[] = {
+ 0x0000006C,
+ 0x00000090,
+ 0x000000B4,
+ 0x000000D8,
+ 0x000000FC,
+ 0x00000120,
+ 0x00000144,
+};
+
+static struct vfe40_cmd_type vfe40_cmd[] = {
+/*0*/
+ {VFE_CMD_DUMMY_0},
+ {VFE_CMD_SET_CLK},
+ {VFE_CMD_RESET},
+ {VFE_CMD_START},
+ {VFE_CMD_TEST_GEN_START},
+/*5*/
+ {VFE_CMD_OPERATION_CFG, V40_OPERATION_CFG_LEN},
+ {VFE_CMD_AXI_OUT_CFG, V40_AXI_OUT_LEN, V40_AXI_OUT_OFF, 0xFF},
+ {VFE_CMD_CAMIF_CFG, V40_CAMIF_LEN, V40_CAMIF_OFF, 0xFF},
+ {VFE_CMD_AXI_INPUT_CFG},
+ {VFE_CMD_BLACK_LEVEL_CFG},
+/*10*/
+ {VFE_CMD_MESH_ROLL_OFF_CFG},
+ {VFE_CMD_DEMUX_CFG, V40_DEMUX_LEN, V40_DEMUX_OFF, 0xFF},
+ {VFE_CMD_FOV_CFG},
+ {VFE_CMD_MAIN_SCALER_CFG},
+ {VFE_CMD_WB_CFG, V40_WB_LEN, V40_WB_OFF, 0xFF},
+/*15*/
+ {VFE_CMD_COLOR_COR_CFG, V40_COLOR_COR_LEN, V40_COLOR_COR_OFF, 0xFF},
+ {VFE_CMD_RGB_G_CFG, V40_RGB_G_LEN, V40_RGB_G_OFF, 0xFF},
+ {VFE_CMD_LA_CFG, V40_LA_LEN, V40_LA_OFF, 0xFF },
+ {VFE_CMD_CHROMA_EN_CFG, V40_CHROMA_EN_LEN, V40_CHROMA_EN_OFF, 0xFF},
+ {VFE_CMD_CHROMA_SUP_CFG, V40_CHROMA_SUP_LEN, V40_CHROMA_SUP_OFF, 0xFF},
+/*20*/
+ {VFE_CMD_MCE_CFG, V40_MCE_LEN, V40_MCE_OFF, 0xFF},
+ {VFE_CMD_SK_ENHAN_CFG, V40_SCE_LEN, V40_SCE_OFF, 0xFF},
+ {VFE_CMD_ASF_CFG, V40_ASF_LEN, V40_ASF_OFF, 0xFF},
+ {VFE_CMD_S2Y_CFG},
+ {VFE_CMD_S2CbCr_CFG},
+/*25*/
+ {VFE_CMD_CHROMA_SUBS_CFG},
+ {VFE_CMD_OUT_CLAMP_CFG, V40_OUT_CLAMP_LEN, V40_OUT_CLAMP_OFF, 0xFF},
+ {VFE_CMD_FRAME_SKIP_CFG},
+ {VFE_CMD_DUMMY_1},
+ {VFE_CMD_DUMMY_2},
+/*30*/
+ {VFE_CMD_DUMMY_3},
+ {VFE_CMD_UPDATE},
+ {VFE_CMD_BL_LVL_UPDATE},
+ {VFE_CMD_DEMUX_UPDATE, V40_DEMUX_LEN, V40_DEMUX_OFF, 0xFF},
+ {VFE_CMD_FOV_UPDATE},
+/*35*/
+ {VFE_CMD_MAIN_SCALER_UPDATE},
+ {VFE_CMD_WB_UPDATE, V40_WB_LEN, V40_WB_OFF, 0xFF},
+ {VFE_CMD_COLOR_COR_UPDATE, V40_COLOR_COR_LEN, V40_COLOR_COR_OFF, 0xFF},
+ {VFE_CMD_RGB_G_UPDATE, V40_RGB_G_LEN, V40_CHROMA_EN_OFF, 0xFF},
+ {VFE_CMD_LA_UPDATE, V40_LA_LEN, V40_LA_OFF, 0xFF },
+/*40*/
+ {VFE_CMD_CHROMA_EN_UPDATE, V40_CHROMA_EN_LEN, V40_CHROMA_EN_OFF, 0xFF},
+ {VFE_CMD_CHROMA_SUP_UPDATE, V40_CHROMA_SUP_LEN,
+ V40_CHROMA_SUP_OFF, 0xFF},
+ {VFE_CMD_MCE_UPDATE, V40_MCE_LEN, V40_MCE_OFF, 0xFF},
+ {VFE_CMD_SK_ENHAN_UPDATE, V40_SCE_LEN, V40_SCE_OFF, 0xFF},
+ {VFE_CMD_S2CbCr_UPDATE},
+/*45*/
+ {VFE_CMD_S2Y_UPDATE},
+ {VFE_CMD_ASF_UPDATE, V40_ASF_UPDATE_LEN, V40_ASF_OFF, 0xFF},
+ {VFE_CMD_FRAME_SKIP_UPDATE},
+ {VFE_CMD_CAMIF_FRAME_UPDATE},
+ {VFE_CMD_STATS_AF_UPDATE},
+/*50*/
+ {VFE_CMD_STATS_AE_UPDATE},
+ {VFE_CMD_STATS_AWB_UPDATE, V40_STATS_AWB_LEN, V40_STATS_AWB_OFF},
+ {VFE_CMD_STATS_RS_UPDATE, V40_STATS_RS_LEN, V40_STATS_RS_OFF},
+ {VFE_CMD_STATS_CS_UPDATE, V40_STATS_CS_LEN, V40_STATS_CS_OFF},
+ {VFE_CMD_STATS_SKIN_UPDATE},
+/*55*/
+ {VFE_CMD_STATS_IHIST_UPDATE, V40_STATS_IHIST_LEN, V40_STATS_IHIST_OFF},
+ {VFE_CMD_DUMMY_4},
+ {VFE_CMD_EPOCH1_ACK},
+ {VFE_CMD_EPOCH2_ACK},
+ {VFE_CMD_START_RECORDING},
+/*60*/
+ {VFE_CMD_STOP_RECORDING},
+ {VFE_CMD_DUMMY_5},
+ {VFE_CMD_DUMMY_6},
+ {VFE_CMD_CAPTURE, V40_CAPTURE_LEN, 0xFF},
+ {VFE_CMD_DUMMY_7},
+/*65*/
+ {VFE_CMD_STOP},
+ {VFE_CMD_GET_HW_VERSION, V40_GET_HW_VERSION_LEN,
+ V40_GET_HW_VERSION_OFF},
+ {VFE_CMD_GET_FRAME_SKIP_COUNTS},
+ {VFE_CMD_OUTPUT1_BUFFER_ENQ},
+ {VFE_CMD_OUTPUT2_BUFFER_ENQ},
+/*70*/
+ {VFE_CMD_OUTPUT3_BUFFER_ENQ},
+ {VFE_CMD_JPEG_OUT_BUF_ENQ},
+ {VFE_CMD_RAW_OUT_BUF_ENQ},
+ {VFE_CMD_RAW_IN_BUF_ENQ},
+ {VFE_CMD_STATS_AF_ENQ},
+/*75*/
+ {VFE_CMD_STATS_AE_ENQ},
+ {VFE_CMD_STATS_AWB_ENQ},
+ {VFE_CMD_STATS_RS_ENQ},
+ {VFE_CMD_STATS_CS_ENQ},
+ {VFE_CMD_STATS_SKIN_ENQ},
+/*80*/
+ {VFE_CMD_STATS_IHIST_ENQ},
+ {VFE_CMD_DUMMY_8},
+ {VFE_CMD_JPEG_ENC_CFG},
+ {VFE_CMD_DUMMY_9},
+ {VFE_CMD_STATS_AF_START},
+/*85*/
+ {VFE_CMD_STATS_AF_STOP},
+ {VFE_CMD_STATS_AE_START},
+ {VFE_CMD_STATS_AE_STOP},
+ {VFE_CMD_STATS_AWB_START, V40_STATS_AWB_LEN, V40_STATS_AWB_OFF},
+ {VFE_CMD_STATS_AWB_STOP},
+/*90*/
+ {VFE_CMD_STATS_RS_START, V40_STATS_RS_LEN, V40_STATS_RS_OFF},
+ {VFE_CMD_STATS_RS_STOP},
+ {VFE_CMD_STATS_CS_START, V40_STATS_CS_LEN, V40_STATS_CS_OFF},
+ {VFE_CMD_STATS_CS_STOP},
+ {VFE_CMD_STATS_SKIN_START},
+/*95*/
+ {VFE_CMD_STATS_SKIN_STOP},
+ {VFE_CMD_STATS_IHIST_START, V40_STATS_IHIST_LEN, V40_STATS_IHIST_OFF},
+ {VFE_CMD_STATS_IHIST_STOP},
+ {VFE_CMD_DUMMY_10},
+ {VFE_CMD_SYNC_TIMER_SETTING, V40_SYNC_TIMER_LEN, V40_SYNC_TIMER_OFF},
+/*100*/
+ {VFE_CMD_ASYNC_TIMER_SETTING, V40_ASYNC_TIMER_LEN, V40_ASYNC_TIMER_OFF},
+ {VFE_CMD_LIVESHOT},
+ {VFE_CMD_LA_SETUP},
+ {VFE_CMD_LINEARIZATION_CFG, V40_LINEARIZATION_LEN1,
+ V40_LINEARIZATION_OFF1},
+ {VFE_CMD_DEMOSAICV3},
+/*105*/
+ {VFE_CMD_DEMOSAICV3_ABCC_CFG},
+ {VFE_CMD_DEMOSAICV3_DBCC_CFG, V40_DEMOSAICV3_DBCC_LEN,
+ V40_DEMOSAICV3_DBCC_OFF},
+ {VFE_CMD_DEMOSAICV3_DBPC_CFG},
+ {VFE_CMD_DEMOSAICV3_ABF_CFG, V40_DEMOSAICV3_ABF_LEN,
+ V40_DEMOSAICV3_ABF_OFF},
+ {VFE_CMD_DEMOSAICV3_ABCC_UPDATE},
+/*110*/
+ {VFE_CMD_DEMOSAICV3_DBCC_UPDATE, V40_DEMOSAICV3_DBCC_LEN,
+ V40_DEMOSAICV3_DBCC_OFF},
+ {VFE_CMD_DEMOSAICV3_DBPC_UPDATE},
+ {VFE_CMD_XBAR_CFG},
+ {VFE_CMD_MODULE_CFG, V40_MODULE_CFG_LEN, V40_MODULE_CFG_OFF},
+ {VFE_CMD_ZSL},
+/*115*/
+ {VFE_CMD_LINEARIZATION_UPDATE, V40_LINEARIZATION_LEN1,
+ V40_LINEARIZATION_OFF1},
+ {VFE_CMD_DEMOSAICV3_ABF_UPDATE, V40_DEMOSAICV3_ABF_LEN,
+ V40_DEMOSAICV3_ABF_OFF},
+ {VFE_CMD_CLF_CFG, V40_CLF_CFG_LEN, V40_CLF_CFG_OFF},
+ {VFE_CMD_CLF_LUMA_UPDATE, V40_CLF_LUMA_UPDATE_LEN,
+ V40_CLF_LUMA_UPDATE_OFF},
+ {VFE_CMD_CLF_CHROMA_UPDATE, V40_CLF_CHROMA_UPDATE_LEN,
+ V40_CLF_CHROMA_UPDATE_OFF},
+/*120*/
+ {VFE_CMD_PCA_ROLL_OFF_CFG},
+ {VFE_CMD_PCA_ROLL_OFF_UPDATE},
+ {VFE_CMD_GET_REG_DUMP},
+ {VFE_CMD_GET_LINEARIZATON_TABLE},
+ {VFE_CMD_GET_MESH_ROLLOFF_TABLE},
+/*125*/
+ {VFE_CMD_GET_PCA_ROLLOFF_TABLE},
+ {VFE_CMD_GET_RGB_G_TABLE},
+ {VFE_CMD_GET_LA_TABLE},
+ {VFE_CMD_DEMOSAICV3_UPDATE},
+ {VFE_CMD_ACTIVE_REGION_CFG},
+/*130*/
+ {VFE_CMD_COLOR_PROCESSING_CONFIG},
+ {VFE_CMD_STATS_WB_AEC_CONFIG},
+ {VFE_CMD_STATS_WB_AEC_UPDATE},
+ {VFE_CMD_Y_GAMMA_CONFIG},
+ {VFE_CMD_SCALE_OUTPUT1_CONFIG},
+/*135*/
+ {VFE_CMD_SCALE_OUTPUT2_CONFIG},
+ {VFE_CMD_CAPTURE_RAW},
+ {VFE_CMD_STOP_LIVESHOT},
+ {VFE_CMD_RECONFIG_VFE},
+ {VFE_CMD_STATS_REQBUF},
+/*140*/
+ {VFE_CMD_STATS_ENQUEUEBUF},
+ {VFE_CMD_STATS_FLUSH_BUFQ},
+ {VFE_CMD_FOV_ENC_CFG, V40_FOV_ENC_LEN, V40_FOV_ENC_OFF, 0xFF},
+ {VFE_CMD_FOV_VIEW_CFG, V40_FOV_VIEW_LEN, V40_FOV_VIEW_OFF, 0xFF},
+ {VFE_CMD_FOV_ENC_UPDATE, V40_FOV_ENC_LEN, V40_FOV_ENC_OFF, 0xFF},
+/*145*/
+ {VFE_CMD_FOV_VIEW_UPDATE, V40_FOV_VIEW_LEN, V40_FOV_VIEW_OFF, 0xFF},
+ {VFE_CMD_SCALER_ENC_CFG, V40_SCALER_ENC_LEN, V40_SCALER_ENC_OFF, 0xFF},
+ {VFE_CMD_SCALER_VIEW_CFG, V40_SCALER_VIEW_LEN,
+ V40_SCALER_VIEW_OFF, 0xFF},
+ {VFE_CMD_SCALER_ENC_UPDATE, V40_SCALER_ENC_LEN,
+ V40_SCALER_ENC_OFF, 0xFF},
+ {VFE_CMD_SCALER_VIEW_UPDATE, V40_SCALER_VIEW_LEN,
+ V40_SCALER_VIEW_OFF, 0xFF},
+/*150*/
+ {VFE_CMD_COLORXFORM_ENC_CFG, V40_COLORXFORM_ENC_CFG_LEN,
+ V40_COLORXFORM_ENC_CFG_OFF, 0xFF},
+ {VFE_CMD_COLORXFORM_VIEW_CFG, V40_COLORXFORM_VIEW_CFG_LEN,
+ V40_COLORXFORM_VIEW_CFG_OFF},
+ {VFE_CMD_COLORXFORM_ENC_UPDATE, V40_COLORXFORM_ENC_CFG_LEN,
+ V40_COLORXFORM_ENC_CFG_OFF, 0xFF},
+ {VFE_CMD_COLORXFORM_VIEW_UPDATE, V40_COLORXFORM_VIEW_CFG_LEN,
+ V40_COLORXFORM_VIEW_CFG_OFF, 0xFF},
+};
+
+#endif /* __MSM_VFE40_H__ */
diff --git a/drivers/media/video/msm/vfe/msm_vfe40_axi.c b/drivers/media/video/msm/vfe/msm_vfe40_axi.c
new file mode 100644
index 0000000..35d5207
--- /dev/null
+++ b/drivers/media/video/msm/vfe/msm_vfe40_axi.c
@@ -0,0 +1,812 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/atomic.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#include <mach/irqs.h>
+#include <mach/camera.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/msm_isp.h>
+
+#include "msm.h"
+#include "msm_vfe40.h"
+
+static int msm_axi_subdev_s_crystal_freq(struct v4l2_subdev *sd,
+ u32 freq, u32 flags)
+{
+ int rc = 0;
+ int round_rate;
+ struct axi_ctrl_t *axi_ctrl = v4l2_get_subdevdata(sd);
+
+ round_rate = clk_round_rate(axi_ctrl->vfe_clk[0], freq);
+ if (rc < 0) {
+ pr_err("%s: clk_round_rate failed %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ axi_ctrl->share_ctrl->vfe_clk_rate = round_rate;
+ rc = clk_set_rate(axi_ctrl->vfe_clk[0], round_rate);
+ if (rc < 0)
+ pr_err("%s: clk_set_rate failed %d\n",
+ __func__, rc);
+
+ return rc;
+}
+
+void axi_start(struct axi_ctrl_t *axi_ctrl)
+{
+ switch (axi_ctrl->share_ctrl->operation_mode) {
+ case VFE_OUTPUTS_PREVIEW:
+ case VFE_OUTPUTS_PREVIEW_AND_VIDEO:
+ if (axi_ctrl->share_ctrl->outpath.output_mode &
+ VFE40_OUTPUT_MODE_PRIMARY) {
+ msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out0.ch0]);
+ msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out0.ch1]);
+ } else if (axi_ctrl->share_ctrl->outpath.output_mode &
+ VFE40_OUTPUT_MODE_PRIMARY_ALL_CHNLS) {
+ msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out0.ch0]);
+ msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out0.ch1]);
+ msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out0.ch2]);
+ }
+ break;
+ default:
+ if (axi_ctrl->share_ctrl->outpath.output_mode &
+ VFE40_OUTPUT_MODE_SECONDARY) {
+ msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out1.ch0]);
+ msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out1.ch1]);
+ } else if (axi_ctrl->share_ctrl->outpath.output_mode &
+ VFE40_OUTPUT_MODE_SECONDARY_ALL_CHNLS) {
+ msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out1.ch0]);
+ msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out1.ch1]);
+ msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+ vfe40_AXI_WM_CFG[axi_ctrl->
+ share_ctrl->outpath.out1.ch2]);
+ }
+ break;
+ }
+}
+
+void axi_stop(struct axi_ctrl_t *axi_ctrl)
+{
+ uint8_t axiBusyFlag = true;
+ /* axi halt command. */
+ msm_camera_io_w(AXI_HALT,
+ axi_ctrl->share_ctrl->vfebase + VFE_AXI_CMD);
+ wmb();
+ while (axiBusyFlag) {
+ if (msm_camera_io_r(
+ axi_ctrl->share_ctrl->vfebase + VFE_AXI_STATUS) & 0x1)
+ axiBusyFlag = false;
+ }
+ /* Ensure the write order while writing
+ to the command register using the barrier */
+ msm_camera_io_w_mb(AXI_HALT_CLEAR,
+ axi_ctrl->share_ctrl->vfebase + VFE_AXI_CMD);
+
+ /* after axi halt, then ok to apply global reset. */
+ /* enable reset_ack and async timer interrupt only while
+ stopping the pipeline.*/
+ msm_camera_io_w(0xf0000000,
+ axi_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_0);
+ msm_camera_io_w(VFE_IMASK_WHILE_STOPPING_1,
+ axi_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
+
+ /* Ensure the write order while writing
+ to the command register using the barrier */
+ msm_camera_io_w_mb(VFE_RESET_UPON_STOP_CMD,
+ axi_ctrl->share_ctrl->vfebase + VFE_GLOBAL_RESET);
+}
+
+static int vfe40_config_axi(
+ struct axi_ctrl_t *axi_ctrl, int mode, uint32_t *ao)
+{
+ uint32_t *ch_info;
+ uint32_t *axi_cfg = ao;
+
+ /* Update the corresponding write masters for each output*/
+ ch_info = axi_cfg + V40_AXI_CFG_LEN;
+ axi_ctrl->share_ctrl->outpath.out0.ch0 = 0x0000FFFF & *ch_info;
+ axi_ctrl->share_ctrl->outpath.out0.ch1 =
+ 0x0000FFFF & (*ch_info++ >> 16);
+ axi_ctrl->share_ctrl->outpath.out0.ch2 = 0x0000FFFF & *ch_info;
+ axi_ctrl->share_ctrl->outpath.out0.image_mode =
+ 0x0000FFFF & (*ch_info++ >> 16);
+ axi_ctrl->share_ctrl->outpath.out1.ch0 = 0x0000FFFF & *ch_info;
+ axi_ctrl->share_ctrl->outpath.out1.ch1 =
+ 0x0000FFFF & (*ch_info++ >> 16);
+ axi_ctrl->share_ctrl->outpath.out1.ch2 = 0x0000FFFF & *ch_info;
+ axi_ctrl->share_ctrl->outpath.out1.image_mode =
+ 0x0000FFFF & (*ch_info++ >> 16);
+ axi_ctrl->share_ctrl->outpath.out2.ch0 = 0x0000FFFF & *ch_info;
+ axi_ctrl->share_ctrl->outpath.out2.ch1 =
+ 0x0000FFFF & (*ch_info++ >> 16);
+ axi_ctrl->share_ctrl->outpath.out2.ch2 = 0x0000FFFF & *ch_info++;
+
+ switch (mode) {
+ case OUTPUT_PRIM:
+ axi_ctrl->share_ctrl->outpath.output_mode =
+ VFE40_OUTPUT_MODE_PRIMARY;
+ break;
+ case OUTPUT_PRIM_ALL_CHNLS:
+ axi_ctrl->share_ctrl->outpath.output_mode =
+ VFE40_OUTPUT_MODE_PRIMARY_ALL_CHNLS;
+ break;
+ case OUTPUT_PRIM|OUTPUT_SEC:
+ axi_ctrl->share_ctrl->outpath.output_mode =
+ VFE40_OUTPUT_MODE_PRIMARY;
+ axi_ctrl->share_ctrl->outpath.output_mode |=
+ VFE40_OUTPUT_MODE_SECONDARY;
+ break;
+ case OUTPUT_PRIM|OUTPUT_SEC_ALL_CHNLS:
+ axi_ctrl->share_ctrl->outpath.output_mode =
+ VFE40_OUTPUT_MODE_PRIMARY;
+ axi_ctrl->share_ctrl->outpath.output_mode |=
+ VFE40_OUTPUT_MODE_SECONDARY_ALL_CHNLS;
+ break;
+ case OUTPUT_PRIM_ALL_CHNLS|OUTPUT_SEC:
+ axi_ctrl->share_ctrl->outpath.output_mode =
+ VFE40_OUTPUT_MODE_PRIMARY_ALL_CHNLS;
+ axi_ctrl->share_ctrl->outpath.output_mode |=
+ VFE40_OUTPUT_MODE_SECONDARY;
+ break;
+ default:
+ pr_err("%s Invalid AXI mode %d ", __func__, mode);
+ return -EINVAL;
+ }
+ msm_camera_io_w(*ao, axi_ctrl->share_ctrl->vfebase +
+ VFE_BUS_IO_FORMAT_CFG);
+ msm_camera_io_memcpy(axi_ctrl->share_ctrl->vfebase +
+ vfe40_cmd[VFE_CMD_AXI_OUT_CFG].offset, axi_cfg,
+ vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length - V40_AXI_CH_INF_LEN);
+ return 0;
+}
+
+static int msm_axi_config(struct v4l2_subdev *sd, void __user *arg)
+{
+ struct msm_vfe_cfg_cmd cfgcmd;
+ struct msm_isp_cmd vfecmd;
+ int rc = 0;
+ struct axi_ctrl_t *axi_ctrl = v4l2_get_subdevdata(sd);
+
+ if (!axi_ctrl->share_ctrl->vfebase) {
+ pr_err("%s: base address unmapped\n", __func__);
+ return -EFAULT;
+ }
+ if (NULL != arg) {
+ if (copy_from_user(&cfgcmd, arg, sizeof(cfgcmd))) {
+ ERR_COPY_FROM_USER();
+ return -EFAULT;
+ }
+ }
+ if (NULL != cfgcmd.value) {
+ if (copy_from_user(&vfecmd,
+ (void __user *)(cfgcmd.value),
+ sizeof(vfecmd))) {
+ pr_err("%s %d: copy_from_user failed\n", __func__,
+ __LINE__);
+ return -EFAULT;
+ }
+ }
+
+ switch (cfgcmd.cmd_type) {
+ case CMD_AXI_CFG_PRIM: {
+ uint32_t *axio = NULL;
+ axio = kmalloc(vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length,
+ GFP_ATOMIC);
+ if (!axio) {
+ rc = -ENOMEM;
+ break;
+ }
+
+ if (copy_from_user(axio, (void __user *)(vfecmd.value),
+ vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length)) {
+ kfree(axio);
+ rc = -EFAULT;
+ break;
+ }
+ vfe40_config_axi(axi_ctrl, OUTPUT_PRIM, axio);
+ kfree(axio);
+ }
+ break;
+ case CMD_AXI_CFG_PRIM_ALL_CHNLS: {
+ uint32_t *axio = NULL;
+ axio = kmalloc(vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length,
+ GFP_ATOMIC);
+ if (!axio) {
+ rc = -ENOMEM;
+ break;
+ }
+
+ if (copy_from_user(axio, (void __user *)(vfecmd.value),
+ vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length)) {
+ kfree(axio);
+ rc = -EFAULT;
+ break;
+ }
+ vfe40_config_axi(axi_ctrl, OUTPUT_PRIM_ALL_CHNLS, axio);
+ kfree(axio);
+ }
+ break;
+ case CMD_AXI_CFG_PRIM|CMD_AXI_CFG_SEC: {
+ uint32_t *axio = NULL;
+ axio = kmalloc(vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length,
+ GFP_ATOMIC);
+ if (!axio) {
+ rc = -ENOMEM;
+ break;
+ }
+
+ if (copy_from_user(axio, (void __user *)(vfecmd.value),
+ vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length)) {
+ kfree(axio);
+ rc = -EFAULT;
+ break;
+ }
+ vfe40_config_axi(axi_ctrl, OUTPUT_PRIM|OUTPUT_SEC, axio);
+ kfree(axio);
+ }
+ break;
+ case CMD_AXI_CFG_PRIM|CMD_AXI_CFG_SEC_ALL_CHNLS: {
+ uint32_t *axio = NULL;
+ axio = kmalloc(vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length,
+ GFP_ATOMIC);
+ if (!axio) {
+ rc = -ENOMEM;
+ break;
+ }
+
+ if (copy_from_user(axio, (void __user *)(vfecmd.value),
+ vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length)) {
+ kfree(axio);
+ rc = -EFAULT;
+ break;
+ }
+ vfe40_config_axi(axi_ctrl,
+ OUTPUT_PRIM|OUTPUT_SEC_ALL_CHNLS, axio);
+ kfree(axio);
+ }
+ break;
+ case CMD_AXI_CFG_PRIM_ALL_CHNLS|CMD_AXI_CFG_SEC: {
+ uint32_t *axio = NULL;
+ axio = kmalloc(vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length,
+ GFP_ATOMIC);
+ if (!axio) {
+ rc = -ENOMEM;
+ break;
+ }
+
+ if (copy_from_user(axio, (void __user *)(vfecmd.value),
+ vfe40_cmd[VFE_CMD_AXI_OUT_CFG].length)) {
+ kfree(axio);
+ rc = -EFAULT;
+ break;
+ }
+ vfe40_config_axi(axi_ctrl,
+ OUTPUT_PRIM_ALL_CHNLS|OUTPUT_SEC, axio);
+ kfree(axio);
+ }
+ break;
+ case CMD_AXI_CFG_PRIM_ALL_CHNLS|CMD_AXI_CFG_SEC_ALL_CHNLS:
+ pr_err("%s Invalid/Unsupported AXI configuration %x",
+ __func__, cfgcmd.cmd_type);
+ break;
+ case CMD_AXI_START:
+ axi_start(axi_ctrl);
+ break;
+ case CMD_AXI_STOP:
+ axi_stop(axi_ctrl);
+ break;
+ default:
+ pr_err("%s Unsupported AXI configuration %x ", __func__,
+ cfgcmd.cmd_type);
+ break;
+ }
+ return rc;
+}
+
+static struct msm_free_buf *vfe40_check_free_buffer(
+ int id, int path, struct axi_ctrl_t *axi_ctrl)
+{
+ struct vfe40_output_ch *outch = NULL;
+ struct msm_free_buf *b = NULL;
+ uint32_t image_mode = 0;
+
+ if (path == VFE_MSG_OUTPUT_PRIMARY)
+ image_mode = axi_ctrl->share_ctrl->outpath.out0.image_mode;
+ else
+ image_mode = axi_ctrl->share_ctrl->outpath.out1.image_mode;
+
+ vfe40_subdev_notify(id, path, image_mode,
+ &axi_ctrl->subdev, axi_ctrl->share_ctrl);
+ outch = vfe40_get_ch(path, axi_ctrl->share_ctrl);
+ if (outch->free_buf.ch_paddr[0])
+ b = &outch->free_buf;
+ return b;
+}
+
+static void vfe_send_outmsg(
+ struct axi_ctrl_t *axi_ctrl, uint8_t msgid,
+ uint32_t ch0_paddr, uint32_t ch1_paddr,
+ uint32_t ch2_paddr, uint32_t image_mode)
+{
+ struct isp_msg_output msg;
+
+ msg.output_id = msgid;
+ msg.buf.image_mode = image_mode;
+ msg.buf.ch_paddr[0] = ch0_paddr;
+ msg.buf.ch_paddr[1] = ch1_paddr;
+ msg.buf.ch_paddr[2] = ch2_paddr;
+ msg.frameCounter = axi_ctrl->share_ctrl->vfeFrameId;
+
+ v4l2_subdev_notify(&axi_ctrl->subdev,
+ NOTIFY_VFE_MSG_OUT,
+ &msg);
+ return;
+}
+
+static void vfe40_process_output_path_irq_0(
+ struct axi_ctrl_t *axi_ctrl)
+{
+ uint32_t ping_pong;
+ uint32_t ch0_paddr, ch1_paddr, ch2_paddr;
+ uint8_t out_bool = 0;
+ struct msm_free_buf *free_buf = NULL;
+
+ free_buf = vfe40_check_free_buffer(VFE_MSG_OUTPUT_IRQ,
+ VFE_MSG_OUTPUT_PRIMARY, axi_ctrl);
+
+ /* we render frames in the following conditions:
+ 1. Continuous mode and the free buffer is avaialable.
+ 2. In snapshot shot mode, free buffer is not always available.
+ when pending snapshot count is <=1, then no need to use
+ free buffer.
+ */
+ out_bool = (
+ (axi_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_THUMB_AND_MAIN ||
+ axi_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_MAIN_AND_THUMB ||
+ axi_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_THUMB_AND_JPEG ||
+ axi_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_JPEG_AND_THUMB ||
+ axi_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_RAW ||
+ axi_ctrl->share_ctrl->liveshot_state ==
+ VFE_STATE_STARTED ||
+ axi_ctrl->share_ctrl->liveshot_state ==
+ VFE_STATE_STOP_REQUESTED ||
+ axi_ctrl->share_ctrl->liveshot_state ==
+ VFE_STATE_STOPPED) &&
+ (axi_ctrl->share_ctrl->vfe_capture_count <= 1)) ||
+ free_buf;
+
+ if (out_bool) {
+ ping_pong = msm_camera_io_r(axi_ctrl->share_ctrl->vfebase +
+ VFE_BUS_PING_PONG_STATUS);
+
+ /* Channel 0*/
+ ch0_paddr = vfe40_get_ch_addr(
+ ping_pong, axi_ctrl->share_ctrl->vfebase,
+ axi_ctrl->share_ctrl->outpath.out0.ch0);
+ /* Channel 1*/
+ ch1_paddr = vfe40_get_ch_addr(
+ ping_pong, axi_ctrl->share_ctrl->vfebase,
+ axi_ctrl->share_ctrl->outpath.out0.ch1);
+ /* Channel 2*/
+ ch2_paddr = vfe40_get_ch_addr(
+ ping_pong, axi_ctrl->share_ctrl->vfebase,
+ axi_ctrl->share_ctrl->outpath.out0.ch2);
+
+ CDBG("output path 0, ch0 = 0x%x, ch1 = 0x%x, ch2 = 0x%x\n",
+ ch0_paddr, ch1_paddr, ch2_paddr);
+ if (free_buf) {
+ /* Y channel */
+ vfe40_put_ch_addr(ping_pong,
+ axi_ctrl->share_ctrl->vfebase,
+ axi_ctrl->share_ctrl->outpath.out0.ch0,
+ free_buf->ch_paddr[0]);
+ /* Chroma channel */
+ vfe40_put_ch_addr(ping_pong,
+ axi_ctrl->share_ctrl->vfebase,
+ axi_ctrl->share_ctrl->outpath.out0.ch1,
+ free_buf->ch_paddr[1]);
+ if (free_buf->num_planes > 2)
+ vfe40_put_ch_addr(ping_pong,
+ axi_ctrl->share_ctrl->vfebase,
+ axi_ctrl->share_ctrl->outpath.out0.ch2,
+ free_buf->ch_paddr[2]);
+ }
+ if (axi_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_THUMB_AND_MAIN ||
+ axi_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_MAIN_AND_THUMB ||
+ axi_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_THUMB_AND_JPEG ||
+ axi_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_JPEG_AND_THUMB ||
+ axi_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_RAW ||
+ axi_ctrl->share_ctrl->liveshot_state ==
+ VFE_STATE_STOPPED)
+ axi_ctrl->share_ctrl->outpath.out0.capture_cnt--;
+
+ vfe_send_outmsg(axi_ctrl,
+ MSG_ID_OUTPUT_PRIMARY, ch0_paddr,
+ ch1_paddr, ch2_paddr,
+ axi_ctrl->share_ctrl->outpath.out0.image_mode);
+
+ if (axi_ctrl->share_ctrl->liveshot_state == VFE_STATE_STOPPED)
+ axi_ctrl->share_ctrl->liveshot_state = VFE_STATE_IDLE;
+
+ } else {
+ axi_ctrl->share_ctrl->outpath.out0.frame_drop_cnt++;
+ CDBG("path_irq_0 - no free buffer!\n");
+ }
+}
+
+static void vfe40_process_output_path_irq_1(
+ struct axi_ctrl_t *axi_ctrl)
+{
+ uint32_t ping_pong;
+ uint32_t ch0_paddr, ch1_paddr, ch2_paddr;
+ /* this must be snapshot main image output. */
+ uint8_t out_bool = 0;
+ struct msm_free_buf *free_buf = NULL;
+
+ free_buf = vfe40_check_free_buffer(VFE_MSG_OUTPUT_IRQ,
+ VFE_MSG_OUTPUT_SECONDARY, axi_ctrl);
+ out_bool = ((axi_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_THUMB_AND_MAIN ||
+ axi_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_MAIN_AND_THUMB ||
+ axi_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_RAW ||
+ axi_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_JPEG_AND_THUMB) &&
+ (axi_ctrl->share_ctrl->vfe_capture_count <= 1)) ||
+ free_buf;
+
+ if (out_bool) {
+ ping_pong = msm_camera_io_r(axi_ctrl->share_ctrl->vfebase +
+ VFE_BUS_PING_PONG_STATUS);
+
+ /* Y channel */
+ ch0_paddr = vfe40_get_ch_addr(ping_pong,
+ axi_ctrl->share_ctrl->vfebase,
+ axi_ctrl->share_ctrl->outpath.out1.ch0);
+ /* Chroma channel */
+ ch1_paddr = vfe40_get_ch_addr(ping_pong,
+ axi_ctrl->share_ctrl->vfebase,
+ axi_ctrl->share_ctrl->outpath.out1.ch1);
+ ch2_paddr = vfe40_get_ch_addr(ping_pong,
+ axi_ctrl->share_ctrl->vfebase,
+ axi_ctrl->share_ctrl->outpath.out1.ch2);
+
+ CDBG("%s ch0 = 0x%x, ch1 = 0x%x, ch2 = 0x%x\n",
+ __func__, ch0_paddr, ch1_paddr, ch2_paddr);
+ if (free_buf) {
+ /* Y channel */
+ vfe40_put_ch_addr(ping_pong,
+ axi_ctrl->share_ctrl->vfebase,
+ axi_ctrl->share_ctrl->outpath.out1.ch0,
+ free_buf->ch_paddr[0]);
+ /* Chroma channel */
+ vfe40_put_ch_addr(ping_pong,
+ axi_ctrl->share_ctrl->vfebase,
+ axi_ctrl->share_ctrl->outpath.out1.ch1,
+ free_buf->ch_paddr[1]);
+ if (free_buf->num_planes > 2)
+ vfe40_put_ch_addr(ping_pong,
+ axi_ctrl->share_ctrl->vfebase,
+ axi_ctrl->share_ctrl->outpath.out1.ch2,
+ free_buf->ch_paddr[2]);
+ }
+ if (axi_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_THUMB_AND_MAIN ||
+ axi_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_MAIN_AND_THUMB ||
+ axi_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_RAW ||
+ axi_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_JPEG_AND_THUMB)
+ axi_ctrl->share_ctrl->outpath.out1.capture_cnt--;
+
+ vfe_send_outmsg(axi_ctrl,
+ MSG_ID_OUTPUT_SECONDARY, ch0_paddr,
+ ch1_paddr, ch2_paddr,
+ axi_ctrl->share_ctrl->outpath.out1.image_mode);
+
+ } else {
+ axi_ctrl->share_ctrl->outpath.out1.frame_drop_cnt++;
+ CDBG("path_irq_1 - no free buffer!\n");
+ }
+}
+
+static void msm_axi_process_irq(struct v4l2_subdev *sd, void *arg)
+{
+ struct axi_ctrl_t *axi_ctrl = v4l2_get_subdevdata(sd);
+ uint32_t irqstatus = (uint32_t) arg;
+
+ if (!axi_ctrl->share_ctrl->vfebase) {
+ pr_err("%s: base address unmapped\n", __func__);
+ return;
+ }
+ /* next, check output path related interrupts. */
+ if (irqstatus &
+ VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE0_MASK) {
+ CDBG("Image composite done 0 irq occured.\n");
+ vfe40_process_output_path_irq_0(axi_ctrl);
+ }
+ if (irqstatus &
+ VFE_IRQ_STATUS0_IMAGE_COMPOSIT_DONE1_MASK) {
+ CDBG("Image composite done 1 irq occured.\n");
+ vfe40_process_output_path_irq_1(axi_ctrl);
+ }
+ /* in snapshot mode if done then send
+ snapshot done message */
+ if (axi_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_THUMB_AND_MAIN ||
+ axi_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_MAIN_AND_THUMB ||
+ axi_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_THUMB_AND_JPEG ||
+ axi_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_JPEG_AND_THUMB ||
+ axi_ctrl->share_ctrl->operation_mode ==
+ VFE_OUTPUTS_RAW) {
+ if ((axi_ctrl->share_ctrl->outpath.out0.capture_cnt == 0)
+ && (axi_ctrl->share_ctrl->outpath.out1.
+ capture_cnt == 0)) {
+ msm_camera_io_w_mb(
+ CAMIF_COMMAND_STOP_IMMEDIATELY,
+ axi_ctrl->share_ctrl->vfebase +
+ VFE_CAMIF_COMMAND);
+ vfe40_send_isp_msg(&axi_ctrl->subdev,
+ axi_ctrl->share_ctrl->vfeFrameId,
+ MSG_ID_SNAPSHOT_DONE);
+ }
+ }
+}
+
+static int msm_axi_buf_cfg(struct v4l2_subdev *sd, void __user *arg)
+{
+ struct msm_camvfe_params *vfe_params =
+ (struct msm_camvfe_params *)arg;
+ struct msm_vfe_cfg_cmd *cmd = vfe_params->vfe_cfg;
+ struct axi_ctrl_t *axi_ctrl = v4l2_get_subdevdata(sd);
+ void *data = vfe_params->data;
+ int rc = 0;
+
+ if (!axi_ctrl->share_ctrl->vfebase) {
+ pr_err("%s: base address unmapped\n", __func__);
+ return -EFAULT;
+ }
+
+ switch (cmd->cmd_type) {
+ case CMD_CONFIG_PING_ADDR: {
+ int path = *((int *)cmd->value);
+ struct vfe40_output_ch *outch =
+ vfe40_get_ch(path, axi_ctrl->share_ctrl);
+ outch->ping = *((struct msm_free_buf *)data);
+ }
+ break;
+
+ case CMD_CONFIG_PONG_ADDR: {
+ int path = *((int *)cmd->value);
+ struct vfe40_output_ch *outch =
+ vfe40_get_ch(path, axi_ctrl->share_ctrl);
+ outch->pong = *((struct msm_free_buf *)data);
+ }
+ break;
+
+ case CMD_CONFIG_FREE_BUF_ADDR: {
+ int path = *((int *)cmd->value);
+ struct vfe40_output_ch *outch =
+ vfe40_get_ch(path, axi_ctrl->share_ctrl);
+ outch->free_buf = *((struct msm_free_buf *)data);
+ }
+ break;
+ default:
+ pr_err("%s Unsupported AXI Buf config %x ", __func__,
+ cmd->cmd_type);
+ }
+ return rc;
+};
+
+static struct msm_cam_clk_info vfe40_clk_info[] = {
+ {"vfe_clk_src", 266670000},
+ {"camss_vfe_vfe_clk", -1},
+ {"camss_csi_vfe_clk", -1},
+ {"top_clk", -1},
+ {"iface_clk", -1},
+ {"bus_clk", -1},
+};
+
+int msm_axi_subdev_init(struct v4l2_subdev *sd,
+ struct msm_cam_media_controller *mctl)
+{
+ int rc = 0;
+ struct axi_ctrl_t *axi_ctrl = v4l2_get_subdevdata(sd);
+ v4l2_set_subdev_hostdata(sd, mctl);
+ spin_lock_init(&axi_ctrl->tasklet_lock);
+ INIT_LIST_HEAD(&axi_ctrl->tasklet_q);
+ spin_lock_init(&axi_ctrl->share_ctrl->sd_notify_lock);
+
+ axi_ctrl->share_ctrl->vfebase = ioremap(axi_ctrl->vfemem->start,
+ resource_size(axi_ctrl->vfemem));
+ if (!axi_ctrl->share_ctrl->vfebase) {
+ rc = -ENOMEM;
+ pr_err("%s: vfe ioremap failed\n", __func__);
+ goto remap_failed;
+ }
+
+ if (axi_ctrl->fs_vfe == NULL) {
+ axi_ctrl->fs_vfe =
+ regulator_get(&axi_ctrl->pdev->dev, "vdd");
+ if (IS_ERR(axi_ctrl->fs_vfe)) {
+ pr_err("%s: Regulator FS_VFE get failed %ld\n",
+ __func__, PTR_ERR(axi_ctrl->fs_vfe));
+ axi_ctrl->fs_vfe = NULL;
+ goto fs_failed;
+ } else if (regulator_enable(axi_ctrl->fs_vfe)) {
+ pr_err("%s: Regulator FS_VFE enable failed\n",
+ __func__);
+ regulator_put(axi_ctrl->fs_vfe);
+ axi_ctrl->fs_vfe = NULL;
+ goto fs_failed;
+ }
+ }
+ rc = msm_cam_clk_enable(&axi_ctrl->pdev->dev, vfe40_clk_info,
+ axi_ctrl->vfe_clk, ARRAY_SIZE(vfe40_clk_info), 1);
+ if (rc < 0)
+ goto clk_enable_failed;
+
+ msm_camio_bus_scale_cfg(
+ mctl->sdata->pdata->cam_bus_scale_table, S_INIT);
+ msm_camio_bus_scale_cfg(
+ mctl->sdata->pdata->cam_bus_scale_table, S_PREVIEW);
+
+ axi_ctrl->share_ctrl->register_total = VFE40_REGISTER_TOTAL;
+
+ enable_irq(axi_ctrl->vfeirq->start);
+
+ return rc;
+clk_enable_failed:
+ regulator_disable(axi_ctrl->fs_vfe);
+ regulator_put(axi_ctrl->fs_vfe);
+ axi_ctrl->fs_vfe = NULL;
+fs_failed:
+ iounmap(axi_ctrl->share_ctrl->vfebase);
+ axi_ctrl->share_ctrl->vfebase = NULL;
+remap_failed:
+ disable_irq(axi_ctrl->vfeirq->start);
+ return rc;
+}
+
+void msm_axi_subdev_release(struct v4l2_subdev *sd)
+{
+ struct msm_cam_media_controller *pmctl =
+ (struct msm_cam_media_controller *)v4l2_get_subdev_hostdata(sd);
+ struct axi_ctrl_t *axi_ctrl = v4l2_get_subdevdata(sd);
+ if (!axi_ctrl->share_ctrl->vfebase) {
+ pr_err("%s: base address unmapped\n", __func__);
+ return;
+ }
+
+ CDBG("%s, free_irq\n", __func__);
+ disable_irq(axi_ctrl->vfeirq->start);
+ tasklet_kill(&axi_ctrl->vfe40_tasklet);
+ msm_cam_clk_enable(&axi_ctrl->pdev->dev, vfe40_clk_info,
+ axi_ctrl->vfe_clk, ARRAY_SIZE(vfe40_clk_info), 0);
+
+ if (axi_ctrl->fs_vfe) {
+ regulator_disable(axi_ctrl->fs_vfe);
+ regulator_put(axi_ctrl->fs_vfe);
+ axi_ctrl->fs_vfe = NULL;
+ }
+ iounmap(axi_ctrl->share_ctrl->vfebase);
+ axi_ctrl->share_ctrl->vfebase = NULL;
+
+ if (atomic_read(&axi_ctrl->share_ctrl->irq_cnt))
+ pr_warning("%s, Warning IRQ Count not ZERO\n", __func__);
+
+ msm_camio_bus_scale_cfg(
+ pmctl->sdata->pdata->cam_bus_scale_table, S_EXIT);
+}
+
+static long msm_axi_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int rc = -ENOIOCTLCMD;
+ switch (cmd) {
+ case VIDIOC_MSM_AXI_INIT:
+ rc = msm_axi_subdev_init(sd,
+ (struct msm_cam_media_controller *)arg);
+ break;
+ case VIDIOC_MSM_AXI_CFG:
+ rc = msm_axi_config(sd, arg);
+ break;
+ case VIDIOC_MSM_AXI_IRQ:
+ msm_axi_process_irq(sd, arg);
+ rc = 0;
+ break;
+ case VIDIOC_MSM_AXI_BUF_CFG:
+ msm_axi_buf_cfg(sd, arg);
+ rc = 0;
+ break;
+ case VIDIOC_MSM_AXI_RELEASE:
+ msm_axi_subdev_release(sd);
+ rc = 0;
+ break;
+ default:
+ pr_err("%s: command not found\n", __func__);
+ }
+ return rc;
+}
+
+static const struct v4l2_subdev_core_ops msm_axi_subdev_core_ops = {
+ .ioctl = msm_axi_subdev_ioctl,
+};
+
+static const struct v4l2_subdev_video_ops msm_axi_subdev_video_ops = {
+ .s_crystal_freq = msm_axi_subdev_s_crystal_freq,
+};
+
+static const struct v4l2_subdev_ops msm_axi_subdev_ops = {
+ .core = &msm_axi_subdev_core_ops,
+ .video = &msm_axi_subdev_video_ops,
+};
+
+static const struct v4l2_subdev_internal_ops msm_axi_internal_ops;
+
+void vfe40_axi_probe(struct axi_ctrl_t *axi_ctrl)
+{
+ struct msm_cam_subdev_info sd_info;
+ v4l2_subdev_init(&axi_ctrl->subdev, &msm_axi_subdev_ops);
+ axi_ctrl->subdev.internal_ops = &msm_axi_internal_ops;
+ axi_ctrl->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(axi_ctrl->subdev.name,
+ sizeof(axi_ctrl->subdev.name), "axi");
+ v4l2_set_subdevdata(&axi_ctrl->subdev, axi_ctrl);
+
+ sd_info.sdev_type = AXI_DEV;
+ sd_info.sd_index = axi_ctrl->pdev->id;
+ sd_info.irq_num = 0;
+ msm_cam_register_subdev_node(&axi_ctrl->subdev, &sd_info);
+}
diff --git a/drivers/platform/msm/usb_bam.c b/drivers/platform/msm/usb_bam.c
index d3edfa8..b7c73de 100644
--- a/drivers/platform/msm/usb_bam.c
+++ b/drivers/platform/msm/usb_bam.c
@@ -18,13 +18,14 @@
#include <linux/io.h>
#include <linux/stat.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/usb/msm_hsusb.h>
#include <mach/usb_bam.h>
#include <mach/sps.h>
#include <linux/workqueue.h>
#define USB_SUMMING_THRESHOLD 512
-#define CONNECTIONS_NUM 4
+#define CONNECTIONS_NUM 4
static struct sps_bam_props usb_props;
static struct sps_pipe *sps_pipes[CONNECTIONS_NUM][2];
@@ -43,32 +44,35 @@
struct usb_bam_connect_info {
u8 idx;
- u8 *src_pipe;
- u8 *dst_pipe;
+ u32 *src_pipe;
+ u32 *dst_pipe;
struct usb_bam_wake_event_info peer_event;
bool enabled;
};
static struct usb_bam_connect_info usb_bam_connections[CONNECTIONS_NUM];
+static struct usb_bam_pipe_connect ***msm_usb_bam_connections_info;
+static struct usb_bam_pipe_connect *bam_connection_arr;
+
+static bool device_tree_enabled;
static inline int bam_offset(struct msm_usb_bam_platform_data *pdata)
{
return pdata->usb_active_bam * CONNECTIONS_NUM * 2;
}
-static int connect_pipe(u8 connection_idx, enum usb_bam_pipe_dir pipe_dir,
- u8 *usb_pipe_idx)
+static int connect_pipe(u8 conn_idx, enum usb_bam_pipe_dir pipe_dir,
+ u32 *usb_pipe_idx)
{
int ret;
- struct sps_pipe **pipe = &sps_pipes[connection_idx][pipe_dir];
+ struct sps_pipe **pipe = &sps_pipes[conn_idx][pipe_dir];
struct sps_connect *connection =
- &sps_connections[connection_idx][pipe_dir];
+ &sps_connections[conn_idx][pipe_dir];
struct msm_usb_bam_platform_data *pdata =
- (struct msm_usb_bam_platform_data *)
- (usb_bam_pdev->dev.platform_data);
+ usb_bam_pdev->dev.platform_data;
struct usb_bam_pipe_connect *pipe_connection =
- (struct usb_bam_pipe_connect *)(pdata->connections +
- bam_offset(pdata) + (2*connection_idx+pipe_dir));
+ (struct usb_bam_pipe_connect *)(pdata->connections +
+ bam_offset(pdata) + (2*conn_idx+pipe_dir));
*pipe = sps_alloc_endpoint();
if (*pipe == NULL) {
@@ -105,26 +109,54 @@
*usb_pipe_idx = connection->dest_pipe_index;
}
- ret = sps_setup_bam2bam_fifo(
- &data_mem_buf[connection_idx][pipe_dir],
+ if (!device_tree_enabled) {
+ ret = sps_setup_bam2bam_fifo(
+ &data_mem_buf[conn_idx][pipe_dir],
pipe_connection->data_fifo_base_offset,
pipe_connection->data_fifo_size, 1);
- if (ret) {
- pr_err("%s: data fifo setup failure %d\n", __func__, ret);
- goto fifo_setup_error;
- }
- connection->data = data_mem_buf[connection_idx][pipe_dir];
+ if (ret) {
+ pr_err("%s: data fifo setup failure %d\n", __func__,
+ ret);
+ goto fifo_setup_error;
+ }
- ret = sps_setup_bam2bam_fifo(
- &desc_mem_buf[connection_idx][pipe_dir],
+ ret = sps_setup_bam2bam_fifo(
+ &desc_mem_buf[conn_idx][pipe_dir],
pipe_connection->desc_fifo_base_offset,
pipe_connection->desc_fifo_size, 1);
- if (ret) {
- pr_err("%s: desc. fifo setup failure %d\n", __func__, ret);
- goto fifo_setup_error;
+ if (ret) {
+ pr_err("%s: desc. fifo setup failure %d\n", __func__,
+ ret);
+ goto fifo_setup_error;
+ }
+ } else {
+ data_mem_buf[conn_idx][pipe_dir].phys_base =
+ pipe_connection->data_fifo_base_offset +
+ pdata->usb_base_address;
+ data_mem_buf[conn_idx][pipe_dir].size =
+ pipe_connection->data_fifo_size;
+ data_mem_buf[conn_idx][pipe_dir].base =
+ ioremap(data_mem_buf[conn_idx][pipe_dir].phys_base,
+ data_mem_buf[conn_idx][pipe_dir].size);
+ memset(data_mem_buf[conn_idx][pipe_dir].base, 0,
+ data_mem_buf[conn_idx][pipe_dir].size);
+
+ desc_mem_buf[conn_idx][pipe_dir].phys_base =
+ pipe_connection->desc_fifo_base_offset +
+ pdata->usb_base_address;
+ desc_mem_buf[conn_idx][pipe_dir].size =
+ pipe_connection->desc_fifo_size;
+ desc_mem_buf[conn_idx][pipe_dir].base =
+ ioremap(desc_mem_buf[conn_idx][pipe_dir].phys_base,
+ desc_mem_buf[conn_idx][pipe_dir].size);
+ memset(desc_mem_buf[conn_idx][pipe_dir].base, 0,
+ desc_mem_buf[conn_idx][pipe_dir].size);
}
- connection->desc = desc_mem_buf[connection_idx][pipe_dir];
+
+ connection->data = data_mem_buf[conn_idx][pipe_dir];
+ connection->desc = desc_mem_buf[conn_idx][pipe_dir];
connection->event_thresh = 16;
+ connection->options = SPS_O_AUTO_ENABLE;
ret = sps_connect(*pipe, connection);
if (ret < 0) {
@@ -141,7 +173,22 @@
return ret;
}
-int usb_bam_connect(u8 idx, u8 *src_pipe_idx, u8 *dst_pipe_idx)
+
+static int disconnect_pipe(u8 connection_idx, enum usb_bam_pipe_dir pipe_dir,
+ u32 *usb_pipe_idx)
+{
+ struct sps_pipe *pipe = sps_pipes[connection_idx][pipe_dir];
+ struct sps_connect *connection =
+ &sps_connections[connection_idx][pipe_dir];
+
+ sps_disconnect(pipe);
+ sps_free_endpoint(pipe);
+
+ connection->options &= ~SPS_O_AUTO_ENABLE;
+ return 0;
+}
+
+int usb_bam_connect(u8 idx, u32 *src_pipe_idx, u32 *dst_pipe_idx)
{
struct usb_bam_connect_info *connection = &usb_bam_connections[idx];
int ret;
@@ -153,7 +200,7 @@
}
if (connection->enabled) {
- pr_info("%s: connection %d was already established\n",
+ pr_debug("%s: connection %d was already established\n",
__func__, idx);
return 0;
}
@@ -161,19 +208,23 @@
connection->dst_pipe = dst_pipe_idx;
connection->idx = idx;
- /* open USB -> Peripheral pipe */
- ret = connect_pipe(connection->idx, USB_TO_PEER_PERIPHERAL,
- connection->src_pipe);
- if (ret) {
- pr_err("%s: src pipe connection failure\n", __func__);
- return ret;
+ if (src_pipe_idx) {
+ /* open USB -> Peripheral pipe */
+ ret = connect_pipe(connection->idx, USB_TO_PEER_PERIPHERAL,
+ connection->src_pipe);
+ if (ret) {
+ pr_err("%s: src pipe connection failure\n", __func__);
+ return ret;
+ }
}
- /* open Peripheral -> USB pipe */
- ret = connect_pipe(connection->idx, PEER_PERIPHERAL_TO_USB,
- connection->dst_pipe);
- if (ret) {
- pr_err("%s: dst pipe connection failure\n", __func__);
- return ret;
+ if (dst_pipe_idx) {
+ /* open Peripheral -> USB pipe */
+ ret = connect_pipe(connection->idx, PEER_PERIPHERAL_TO_USB,
+ connection->dst_pipe);
+ if (ret) {
+ pr_err("%s: dst pipe connection failure\n", __func__);
+ return ret;
+ }
}
connection->enabled = 1;
@@ -232,19 +283,259 @@
return 0;
}
+int usb_bam_disconnect_pipe(u8 idx)
+{
+ struct usb_bam_connect_info *connection = &usb_bam_connections[idx];
+ int ret;
+
+ if (idx >= CONNECTIONS_NUM) {
+ pr_err("%s: Invalid connection index\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (!connection->enabled) {
+ pr_debug("%s: connection %d isn't enabled\n",
+ __func__, idx);
+ return 0;
+ }
+
+ if (connection->src_pipe) {
+ /* close USB -> Peripheral pipe */
+ ret = disconnect_pipe(connection->idx, USB_TO_PEER_PERIPHERAL,
+ connection->src_pipe);
+ if (ret) {
+ pr_err("%s: src pipe connection failure\n", __func__);
+ return ret;
+ }
+
+ }
+ if (connection->dst_pipe) {
+ /* close Peripheral -> USB pipe */
+ ret = disconnect_pipe(connection->idx, PEER_PERIPHERAL_TO_USB,
+ connection->dst_pipe);
+ if (ret) {
+ pr_err("%s: dst pipe connection failure\n", __func__);
+ return ret;
+ }
+ }
+
+ connection->src_pipe = 0;
+ connection->dst_pipe = 0;
+ connection->enabled = 0;
+
+ return 0;
+}
+
+static int update_connections_info(struct device_node *node, int bam,
+ int conn_num, int dir)
+{
+ u32 rc;
+ char *key = NULL;
+ uint32_t val = 0;
+
+ struct usb_bam_pipe_connect *pipe_connection;
+
+ pipe_connection = &msm_usb_bam_connections_info[bam][conn_num][dir];
+
+ key = "qcom,src-bam-physical-address";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+ pipe_connection->src_phy_addr = val;
+
+ key = "qcom,src-bam-pipe-index";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+ pipe_connection->src_pipe_index = val;
+
+ key = "qcom,dst-bam-physical-address";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+ pipe_connection->dst_phy_addr = val;
+
+ key = "qcom,dst-bam-pipe-index";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+ pipe_connection->dst_pipe_index = val;
+
+ key = "qcom,data-fifo-offset";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+ pipe_connection->data_fifo_base_offset = val;
+
+ key = "qcom,data-fifo-size";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+ pipe_connection->data_fifo_size = val;
+
+ key = "qcom,descriptor-fifo-offset";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+ pipe_connection->desc_fifo_base_offset = val;
+
+ key = "qcom,descriptor-fifo-size";
+ rc = of_property_read_u32(node, key, &val);
+ if (rc)
+ goto err;
+ pipe_connection->desc_fifo_size = val;
+
+ return 0;
+
+err:
+ pr_err("%s: Error in name %s key %s\n", __func__,
+ node->full_name, key);
+ return -EFAULT;
+}
+
+static struct msm_usb_bam_platform_data *usb_bam_dt_to_pdata(
+ struct platform_device *pdev)
+{
+ struct msm_usb_bam_platform_data *pdata;
+ struct device_node *node = pdev->dev.of_node;
+ u32 i, j;
+ int conn_num, bam;
+ u8 dir;
+ u8 ncolumns = 2;
+ int bam_amount, rc = 0;
+ u32 pipe_entry = 0;
+ char *key = NULL;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ pr_err("unable to allocate platform data\n");
+ return NULL;
+ }
+
+ rc = of_property_read_u32(node, "qcom,usb-active-bam",
+ &pdata->usb_active_bam);
+ if (rc) {
+ pr_err("Invalid usb active bam property\n");
+ return NULL;
+ }
+
+ rc = of_property_read_u32(node, "qcom,usb-total-bam-num",
+ &pdata->total_bam_num);
+ if (rc) {
+ pr_err("Invalid usb total bam num property\n");
+ return NULL;
+ }
+
+ rc = of_property_read_u32(node, "qcom,usb-bam-num-pipes",
+ &pdata->usb_bam_num_pipes);
+ if (rc) {
+ pr_err("Invalid usb bam num pipes property\n");
+ return NULL;
+ }
+
+ rc = of_property_read_u32(node, "qcom,usb-base-address",
+ &pdata->usb_base_address);
+ if (rc) {
+ pr_err("Invalid usb base address property\n");
+ return NULL;
+ }
+
+ for_each_child_of_node(pdev->dev.of_node, node)
+ pipe_entry++;
+
+ /*
+ * we need to know the number of connection, so we will know
+ * how much memory to allocate
+ */
+ conn_num = pipe_entry / 2;
+ bam_amount = pdata->total_bam_num;
+
+ if (conn_num > 0 && conn_num < pdata->usb_bam_num_pipes) {
+ /* alloc msm_usb_bam_connections_info */
+ bam_connection_arr = devm_kzalloc(&pdev->dev, bam_amount *
+ conn_num * ncolumns *
+ sizeof(struct usb_bam_pipe_connect), GFP_KERNEL);
+
+ if (!bam_connection_arr)
+ goto err;
+
+ msm_usb_bam_connections_info = devm_kzalloc(&pdev->dev,
+ bam_amount * sizeof(struct usb_bam_pipe_connect **),
+ GFP_KERNEL);
+
+ if (!msm_usb_bam_connections_info)
+ goto err;
+
+ for (j = 0; j < bam_amount; j++) {
+ msm_usb_bam_connections_info[j] =
+ devm_kzalloc(&pdev->dev, conn_num *
+ sizeof(struct usb_bam_pipe_connect *),
+ GFP_KERNEL);
+ for (i = 0; i < conn_num; i++)
+ msm_usb_bam_connections_info[j][i] =
+ bam_connection_arr +
+ (j * conn_num * ncolumns) +
+ (i * ncolumns);
+ }
+
+ /* retrieve device tree parameters */
+ for_each_child_of_node(pdev->dev.of_node, node) {
+ const char *str;
+
+ key = "qcom,usb-bam-type";
+ rc = of_property_read_u32(node, key, &bam);
+ if (rc)
+ goto err;
+
+ rc = of_property_read_string(node, "label", &str);
+ if (rc) {
+ pr_err("Cannot read string\n");
+ goto err;
+ }
+
+ if (strstr(str, "usb-to-peri"))
+ dir = USB_TO_PEER_PERIPHERAL;
+ else if (strstr(str, "peri-to-usb"))
+ dir = PEER_PERIPHERAL_TO_USB;
+ else
+ goto err;
+
+ if (!strcmp(str, "usb-to-peri-qdss-dwc3") ||
+ !strcmp(str, "peri-to-usb-qdss-dwc3"))
+ conn_num = 0;
+ else
+ goto err;
+
+ rc = update_connections_info(node, bam, conn_num, dir);
+ if (rc)
+ goto err;
+ }
+
+ pdata->connections = &msm_usb_bam_connections_info[0][0][0];
+
+ } else {
+ goto err;
+ }
+
+ return pdata;
+err:
+ pr_err("%s: failed\n", __func__);
+ return NULL;
+}
+
static int usb_bam_init(void)
{
u32 h_usb;
int ret;
void *usb_virt_addr;
struct msm_usb_bam_platform_data *pdata =
- (struct msm_usb_bam_platform_data *)
- (usb_bam_pdev->dev.platform_data);
+ usb_bam_pdev->dev.platform_data;
struct resource *res;
int irq;
res = platform_get_resource(usb_bam_pdev, IORESOURCE_MEM,
- pdata->usb_active_bam);
+ pdata->usb_active_bam);
if (!res) {
dev_err(&usb_bam_pdev->dev, "Unable to get memory resource\n");
return -ENODEV;
@@ -266,6 +557,7 @@
usb_props.virt_size = resource_size(res);
usb_props.irq = irq;
usb_props.summing_threshold = USB_SUMMING_THRESHOLD;
+ usb_props.event_threshold = 512;
usb_props.num_pipes = pdata->usb_bam_num_pipes;
ret = sps_register_bam_device(&usb_props, &h_usb);
@@ -286,11 +578,10 @@
usb_bam_show_enable(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct platform_device *pdev = container_of(dev, struct platform_device,
- dev);
+ struct platform_device *pdev =
+ container_of(dev, struct platform_device, dev);
struct msm_usb_bam_platform_data *pdata =
- (struct msm_usb_bam_platform_data *)
- (usb_bam_pdev->dev.platform_data);
+ usb_bam_pdev->dev.platform_data;
if (!pdev || !pdata)
return 0;
@@ -302,11 +593,10 @@
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct platform_device *pdev = container_of(dev, struct platform_device,
- dev);
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
struct msm_usb_bam_platform_data *pdata =
- (struct msm_usb_bam_platform_data *)
- (usb_bam_pdev->dev.platform_data);
+ usb_bam_pdev->dev.platform_data;
char str[10], *pstr;
int ret, i;
@@ -336,6 +626,7 @@
static int usb_bam_probe(struct platform_device *pdev)
{
int ret, i;
+ struct msm_usb_bam_platform_data *pdata;
dev_dbg(&pdev->dev, "usb_bam_probe\n");
@@ -345,9 +636,19 @@
usb_bam_wake_work);
}
- if (!pdev->dev.platform_data) {
+ if (pdev->dev.of_node) {
+ dev_dbg(&pdev->dev, "device tree enabled\n");
+ device_tree_enabled = 1;
+ pdata = usb_bam_dt_to_pdata(pdev);
+ if (!pdata)
+ return -ENOMEM;
+ pdev->dev.platform_data = pdata;
+ } else if (!pdev->dev.platform_data) {
dev_err(&pdev->dev, "missing platform_data\n");
return -ENODEV;
+ } else {
+ pdata = pdev->dev.platform_data;
+ device_tree_enabled = 0;
}
usb_bam_pdev = pdev;
@@ -365,6 +666,32 @@
return ret;
}
+void get_bam2bam_connection_info(u8 conn_idx, enum usb_bam_pipe_dir pipe_dir,
+ u32 *usb_bam_handle, u32 *usb_bam_pipe_idx, u32 *peer_pipe_idx,
+ struct sps_mem_buffer *desc_fifo, struct sps_mem_buffer *data_fifo)
+{
+ struct sps_connect *connection =
+ &sps_connections[conn_idx][pipe_dir];
+
+
+ if (pipe_dir == USB_TO_PEER_PERIPHERAL) {
+ *usb_bam_handle = connection->source;
+ *usb_bam_pipe_idx = connection->src_pipe_index;
+ *peer_pipe_idx = connection->dest_pipe_index;
+ } else {
+ *usb_bam_handle = connection->destination;
+ *usb_bam_pipe_idx = connection->dest_pipe_index;
+ *peer_pipe_idx = connection->src_pipe_index;
+ }
+ if (data_fifo)
+ memcpy(data_fifo, &data_mem_buf[conn_idx][pipe_dir],
+ sizeof(struct sps_mem_buffer));
+ if (desc_fifo)
+ memcpy(desc_fifo, &desc_mem_buf[conn_idx][pipe_dir],
+ sizeof(struct sps_mem_buffer));
+}
+EXPORT_SYMBOL(get_bam2bam_connection_info);
+
static int usb_bam_remove(struct platform_device *pdev)
{
destroy_workqueue(usb_bam_wq);
@@ -372,10 +699,20 @@
return 0;
}
+static const struct of_device_id usb_bam_dt_match[] = {
+ { .compatible = "qcom,usb-bam-msm",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, usb_bam_dt_match);
+
static struct platform_driver usb_bam_driver = {
.probe = usb_bam_probe,
.remove = usb_bam_remove,
- .driver = { .name = "usb_bam", },
+ .driver = {
+ .name = "usb_bam",
+ .of_match_table = usb_bam_dt_match,
+ },
};
static int __init init(void)
diff --git a/drivers/usb/gadget/u_bam.c b/drivers/usb/gadget/u_bam.c
index 1fade88..a9e5d91 100644
--- a/drivers/usb/gadget/u_bam.c
+++ b/drivers/usb/gadget/u_bam.c
@@ -98,8 +98,8 @@
struct usb_request *rx_req;
struct usb_request *tx_req;
- u8 src_pipe_idx;
- u8 dst_pipe_idx;
+ u32 src_pipe_idx;
+ u32 dst_pipe_idx;
u8 connection_idx;
/* stats */
diff --git a/drivers/usb/gadget/u_bam_data.c b/drivers/usb/gadget/u_bam_data.c
index 73b4e75..a105f5d 100644
--- a/drivers/usb/gadget/u_bam_data.c
+++ b/drivers/usb/gadget/u_bam_data.c
@@ -50,8 +50,8 @@
struct usb_request *rx_req;
struct usb_request *tx_req;
- u8 src_pipe_idx;
- u8 dst_pipe_idx;
+ u32 src_pipe_idx;
+ u32 dst_pipe_idx;
u8 connection_idx;
};
diff --git a/drivers/usb/host/ehci-msm2.c b/drivers/usb/host/ehci-msm2.c
index 8a87a6a..c612cb9 100644
--- a/drivers/usb/host/ehci-msm2.c
+++ b/drivers/usb/host/ehci-msm2.c
@@ -54,6 +54,10 @@
bool async_int;
bool vbus_on;
atomic_t in_lpm;
+ int pmic_gpio_dp_irq;
+ bool pmic_gpio_dp_irq_enabled;
+ uint32_t pmic_gpio_int_cnt;
+ atomic_t pm_usage_cnt;
struct wake_lock wlock;
};
@@ -603,6 +607,11 @@
atomic_set(&mhcd->in_lpm, 1);
enable_irq(hcd->irq);
+ if (mhcd->pmic_gpio_dp_irq) {
+ mhcd->pmic_gpio_dp_irq_enabled = 1;
+ enable_irq_wake(mhcd->pmic_gpio_dp_irq);
+ enable_irq(mhcd->pmic_gpio_dp_irq);
+ }
wake_unlock(&mhcd->wlock);
dev_info(mhcd->dev, "EHCI USB in low power mode\n");
@@ -622,6 +631,11 @@
return 0;
}
+ if (mhcd->pmic_gpio_dp_irq_enabled) {
+ disable_irq_wake(mhcd->pmic_gpio_dp_irq);
+ disable_irq_nosync(mhcd->pmic_gpio_dp_irq);
+ mhcd->pmic_gpio_dp_irq_enabled = 0;
+ }
wake_lock(&mhcd->wlock);
/* Vote for TCXO when waking up the phy */
@@ -669,6 +683,11 @@
enable_irq(hcd->irq);
}
+ if (atomic_read(&mhcd->pm_usage_cnt)) {
+ atomic_set(&mhcd->pm_usage_cnt, 0);
+ pm_runtime_put_noidle(mhcd->dev);
+ }
+
dev_info(mhcd->dev, "EHCI USB exited from low power mode\n");
return 0;
@@ -689,6 +708,32 @@
return ehci_irq(hcd);
}
+static irqreturn_t msm_ehci_host_wakeup_irq(int irq, void *data)
+{
+
+ struct msm_hcd *mhcd = data;
+
+ mhcd->pmic_gpio_int_cnt++;
+ dev_dbg(mhcd->dev, "%s: hsusb host remote wakeup interrupt cnt: %u\n",
+ __func__, mhcd->pmic_gpio_int_cnt);
+
+
+ wake_lock(&mhcd->wlock);
+
+ if (mhcd->pmic_gpio_dp_irq_enabled) {
+ mhcd->pmic_gpio_dp_irq_enabled = 0;
+ disable_irq_wake(irq);
+ disable_irq_nosync(irq);
+ }
+
+ if (!atomic_read(&mhcd->pm_usage_cnt)) {
+ atomic_set(&mhcd->pm_usage_cnt, 1);
+ pm_runtime_get(mhcd->dev);
+ }
+
+ return IRQ_HANDLED;
+}
+
static int msm_ehci_reset(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
@@ -952,6 +997,22 @@
* hence, runtime framework automatically calls this driver's
* runtime APIs based on root-hub's state.
*/
+ /* configure pmic_gpio_irq for D+ change */
+ if (pdata && pdata->pmic_gpio_dp_irq)
+ mhcd->pmic_gpio_dp_irq = pdata->pmic_gpio_dp_irq;
+ if (mhcd->pmic_gpio_dp_irq) {
+ ret = request_threaded_irq(mhcd->pmic_gpio_dp_irq, NULL,
+ msm_ehci_host_wakeup_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "msm_ehci_host_wakeup", mhcd);
+ if (!ret) {
+ disable_irq_nosync(mhcd->pmic_gpio_dp_irq);
+ } else {
+ dev_err(&pdev->dev, "request_irq(%d) failed: %d\n",
+ mhcd->pmic_gpio_dp_irq, ret);
+ mhcd->pmic_gpio_dp_irq = 0;
+ }
+ }
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
@@ -984,6 +1045,11 @@
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct msm_hcd *mhcd = hcd_to_mhcd(hcd);
+ if (mhcd->pmic_gpio_dp_irq) {
+ if (mhcd->pmic_gpio_dp_irq_enabled)
+ disable_irq_wake(mhcd->pmic_gpio_dp_irq);
+ free_irq(mhcd->pmic_gpio_dp_irq, mhcd);
+ }
device_init_wakeup(&pdev->dev, 0);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 13828e0..3aa2e5c 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -293,3 +293,13 @@
driver for dial up network and RMNET.
To compile this driver as a module, choose M here: the module
will be called mdm_bridge. If unsure, choose N.
+
+config USB_QCOM_KS_BRIDGE
+ tristate "USB Qualcomm kick start bridge"
+ depends on USB
+ help
+ Say Y here if you have a Qualcomm modem device connected via USB that
+ will be bridged in kernel space. This driver works as a bridge to pass
+ boot images, ram-dumps and efs sync
+ To compile this driver as a module, choose M here: the module
+ will be called ks_bridge. If unsure, choose N.
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index b4aee65..447e4d2 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -33,3 +33,4 @@
obj-$(CONFIG_USB_QCOM_DIAG_BRIDGE_TEST) += diag_bridge_test.o
mdm_bridge-y := mdm_ctrl_bridge.o mdm_data_bridge.o
obj-$(CONFIG_USB_QCOM_MDM_BRIDGE) += mdm_bridge.o
+obj-$(CONFIG_USB_QCOM_KS_BRIDGE) += ks_bridge.o
diff --git a/drivers/usb/misc/ks_bridge.c b/drivers/usb/misc/ks_bridge.c
new file mode 100644
index 0000000..10cbe59
--- /dev/null
+++ b/drivers/usb/misc/ks_bridge.c
@@ -0,0 +1,805 @@
+/*
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* add additional information to our printk's */
+#define pr_fmt(fmt) "%s: " fmt "\n", __func__
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kref.h>
+#include <linux/platform_device.h>
+#include <linux/ratelimit.h>
+#include <linux/uaccess.h>
+#include <linux/usb.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/miscdevice.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+
+#define DRIVER_DESC "USB host ks bridge driver"
+#define DRIVER_VERSION "1.0"
+
+struct data_pkt {
+ int n_read;
+ char *buf;
+ size_t len;
+ struct list_head list;
+ void *ctxt;
+};
+
+#define FILE_OPENED BIT(0)
+#define USB_DEV_CONNECTED BIT(1)
+#define NO_RX_REQS 10
+#define NO_BRIDGE_INSTANCES 2
+#define BOOT_BRIDGE_INDEX 0
+#define EFS_BRIDGE_INDEX 1
+#define MAX_DATA_PKT_SIZE 16384
+
+struct ks_bridge {
+ char *name;
+ spinlock_t lock;
+ struct workqueue_struct *wq;
+ struct work_struct to_mdm_work;
+ struct work_struct start_rx_work;
+ struct list_head to_mdm_list;
+ struct list_head to_ks_list;
+ wait_queue_head_t ks_wait_q;
+
+ /* usb specific */
+ struct usb_device *udev;
+ struct usb_interface *ifc;
+ __u8 in_epAddr;
+ __u8 out_epAddr;
+ unsigned int in_pipe;
+ unsigned int out_pipe;
+ struct usb_anchor submitted;
+
+ unsigned long flags;
+ unsigned int alloced_read_pkts;
+
+#define DBG_MSG_LEN 40
+#define DBG_MAX_MSG 500
+ unsigned int dbg_idx;
+ rwlock_t dbg_lock;
+ char (dbgbuf[DBG_MAX_MSG])[DBG_MSG_LEN]; /* buffer */
+};
+struct ks_bridge *__ksb[NO_BRIDGE_INSTANCES];
+
+/* by default debugging is enabled */
+static unsigned int enable_dbg = 1;
+module_param(enable_dbg, uint, S_IRUGO | S_IWUSR);
+
+static void
+dbg_log_event(struct ks_bridge *ksb, char *event, int d1, int d2)
+{
+ unsigned long flags;
+ unsigned long long t;
+ unsigned long nanosec;
+
+ if (!enable_dbg)
+ return;
+
+ write_lock_irqsave(&ksb->dbg_lock, flags);
+ t = cpu_clock(smp_processor_id());
+ nanosec = do_div(t, 1000000000)/1000;
+ scnprintf(ksb->dbgbuf[ksb->dbg_idx], DBG_MSG_LEN, "%5lu.%06lu:%s:%x:%x",
+ (unsigned long)t, nanosec, event, d1, d2);
+
+ ksb->dbg_idx++;
+ ksb->dbg_idx = ksb->dbg_idx % DBG_MAX_MSG;
+ write_unlock_irqrestore(&ksb->dbg_lock, flags);
+}
+
+static
+struct data_pkt *ksb_alloc_data_pkt(size_t count, gfp_t flags, void *ctxt)
+{
+ struct data_pkt *pkt;
+
+ pkt = kzalloc(sizeof(struct data_pkt), flags);
+ if (!pkt) {
+ pr_err("failed to allocate data packet\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pkt->buf = kmalloc(count, flags);
+ if (!pkt->buf) {
+ pr_err("failed to allocate data buffer\n");
+ kfree(pkt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pkt->len = count;
+ INIT_LIST_HEAD(&pkt->list);
+ pkt->ctxt = ctxt;
+
+ return pkt;
+}
+
+static void ksb_free_data_pkt(struct data_pkt *pkt)
+{
+ kfree(pkt->buf);
+ kfree(pkt);
+}
+
+
+static ssize_t ksb_fs_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ int ret;
+ unsigned long flags;
+ struct ks_bridge *ksb = fp->private_data;
+ struct data_pkt *pkt;
+ size_t space, copied;
+
+read_start:
+ if (!test_bit(USB_DEV_CONNECTED, &ksb->flags))
+ return -ENODEV;
+
+ spin_lock_irqsave(&ksb->lock, flags);
+ if (list_empty(&ksb->to_ks_list)) {
+ spin_unlock_irqrestore(&ksb->lock, flags);
+ ret = wait_event_interruptible(ksb->ks_wait_q,
+ !list_empty(&ksb->to_ks_list) ||
+ !test_bit(USB_DEV_CONNECTED, &ksb->flags));
+ if (ret < 0)
+ return ret;
+
+ goto read_start;
+ }
+
+ space = count;
+ copied = 0;
+ while (!list_empty(&ksb->to_ks_list) && space) {
+ size_t len;
+
+ pkt = list_first_entry(&ksb->to_ks_list, struct data_pkt, list);
+ len = min_t(size_t, space, pkt->len);
+ pkt->n_read += len;
+ spin_unlock_irqrestore(&ksb->lock, flags);
+
+ ret = copy_to_user(buf + copied, pkt->buf, len);
+ if (ret) {
+ pr_err("copy_to_user failed err:%d\n", ret);
+ ksb_free_data_pkt(pkt);
+ ksb->alloced_read_pkts--;
+ return ret;
+ }
+
+ space -= len;
+ copied += len;
+
+ spin_lock_irqsave(&ksb->lock, flags);
+ if (pkt->n_read == pkt->len) {
+ list_del_init(&pkt->list);
+ ksb_free_data_pkt(pkt);
+ ksb->alloced_read_pkts--;
+ }
+ }
+ spin_unlock_irqrestore(&ksb->lock, flags);
+
+ dbg_log_event(ksb, "KS_READ", copied, 0);
+
+ pr_debug("count:%d space:%d copied:%d", count, space, copied);
+
+ return copied;
+}
+
+static void ksb_tx_cb(struct urb *urb)
+{
+ struct data_pkt *pkt = urb->context;
+ struct ks_bridge *ksb = pkt->ctxt;
+
+ dbg_log_event(ksb, "C TX_URB", urb->status, 0);
+ pr_debug("status:%d", urb->status);
+
+ if (ksb->ifc)
+ usb_autopm_put_interface_async(ksb->ifc);
+
+ if (urb->status < 0)
+ pr_err_ratelimited("urb failed with err:%d", urb->status);
+
+ ksb_free_data_pkt(pkt);
+}
+
+static void ksb_tomdm_work(struct work_struct *w)
+{
+ struct ks_bridge *ksb = container_of(w, struct ks_bridge, to_mdm_work);
+ struct data_pkt *pkt;
+ unsigned long flags;
+ struct urb *urb;
+ int ret;
+
+ spin_lock_irqsave(&ksb->lock, flags);
+ while (!list_empty(&ksb->to_mdm_list)
+ && test_bit(USB_DEV_CONNECTED, &ksb->flags)) {
+ pkt = list_first_entry(&ksb->to_mdm_list,
+ struct data_pkt, list);
+ list_del_init(&pkt->list);
+ spin_unlock_irqrestore(&ksb->lock, flags);
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ pr_err_ratelimited("unable to allocate urb");
+ ksb_free_data_pkt(pkt);
+ return;
+ }
+
+ ret = usb_autopm_get_interface(ksb->ifc);
+ if (ret < 0 && ret != -EAGAIN && ret != -EACCES) {
+ pr_err_ratelimited("autopm_get failed:%d", ret);
+ usb_free_urb(urb);
+ ksb_free_data_pkt(pkt);
+ return;
+ }
+ usb_fill_bulk_urb(urb, ksb->udev, ksb->out_pipe,
+ pkt->buf, pkt->len, ksb_tx_cb, pkt);
+ usb_anchor_urb(urb, &ksb->submitted);
+
+ dbg_log_event(ksb, "S TX_URB", pkt->len, 0);
+
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ pr_err("out urb submission failed");
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
+ ksb_free_data_pkt(pkt);
+ usb_autopm_put_interface(ksb->ifc);
+ return;
+ }
+
+ spin_lock_irqsave(&ksb->lock, flags);
+ }
+ spin_unlock_irqrestore(&ksb->lock, flags);
+}
+
+static ssize_t ksb_fs_write(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ int ret;
+ struct data_pkt *pkt;
+ unsigned long flags;
+ struct ks_bridge *ksb = fp->private_data;
+
+ pkt = ksb_alloc_data_pkt(count, GFP_KERNEL, ksb);
+ if (IS_ERR(pkt)) {
+ pr_err("unable to allocate data packet");
+ return PTR_ERR(pkt);
+ }
+
+ ret = copy_from_user(pkt->buf, buf, count);
+ if (ret) {
+ pr_err("copy_from_user failed: err:%d", ret);
+ ksb_free_data_pkt(pkt);
+ return ret;
+ }
+
+ spin_lock_irqsave(&ksb->lock, flags);
+ list_add_tail(&pkt->list, &ksb->to_mdm_list);
+ spin_unlock_irqrestore(&ksb->lock, flags);
+
+ queue_work(ksb->wq, &ksb->to_mdm_work);
+
+ return count;
+}
+
+static int efs_fs_open(struct inode *ip, struct file *fp)
+{
+ struct ks_bridge *ksb = __ksb[EFS_BRIDGE_INDEX];
+
+ pr_debug(":%s", ksb->name);
+ dbg_log_event(ksb, "EFS-FS-OPEN", 0, 0);
+
+ if (!ksb) {
+ pr_err("ksb is being removed");
+ return -ENODEV;
+ }
+
+ fp->private_data = ksb;
+ set_bit(FILE_OPENED, &ksb->flags);
+
+ if (test_bit(USB_DEV_CONNECTED, &ksb->flags))
+ queue_work(ksb->wq, &ksb->start_rx_work);
+
+ return 0;
+}
+
+static int ksb_fs_open(struct inode *ip, struct file *fp)
+{
+ struct ks_bridge *ksb = __ksb[BOOT_BRIDGE_INDEX];
+
+ pr_debug(":%s", ksb->name);
+ dbg_log_event(ksb, "KS-FS-OPEN", 0, 0);
+
+ if (!ksb) {
+ pr_err("ksb is being removed");
+ return -ENODEV;
+ }
+
+ fp->private_data = ksb;
+ set_bit(FILE_OPENED, &ksb->flags);
+
+ if (test_bit(USB_DEV_CONNECTED, &ksb->flags))
+ queue_work(ksb->wq, &ksb->start_rx_work);
+
+ return 0;
+}
+
+static int ksb_fs_release(struct inode *ip, struct file *fp)
+{
+ struct ks_bridge *ksb = fp->private_data;
+
+ pr_debug(":%s", ksb->name);
+ dbg_log_event(ksb, "FS-RELEASE", 0, 0);
+
+ clear_bit(FILE_OPENED, &ksb->flags);
+ fp->private_data = NULL;
+
+ return 0;
+}
+
+static const struct file_operations ksb_fops = {
+ .owner = THIS_MODULE,
+ .read = ksb_fs_read,
+ .write = ksb_fs_write,
+ .open = ksb_fs_open,
+ .release = ksb_fs_release,
+};
+
+static struct miscdevice ksb_fboot_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "ks_bridge",
+ .fops = &ksb_fops,
+};
+
+static const struct file_operations efs_fops = {
+ .owner = THIS_MODULE,
+ .read = ksb_fs_read,
+ .write = ksb_fs_write,
+ .open = efs_fs_open,
+ .release = ksb_fs_release,
+};
+
+static struct miscdevice ksb_efs_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "efs_bridge",
+ .fops = &efs_fops,
+};
+
+static const struct usb_device_id ksb_usb_ids[] = {
+ { USB_DEVICE(0x5c6, 0x9008),
+ .driver_info = (unsigned long)&ksb_fboot_dev, },
+ { USB_DEVICE(0x5c6, 0x9048),
+ .driver_info = (unsigned long)&ksb_efs_dev, },
+ { USB_DEVICE(0x5c6, 0x904C),
+ .driver_info = (unsigned long)&ksb_efs_dev, },
+
+ {} /* terminating entry */
+};
+MODULE_DEVICE_TABLE(usb, ksb_usb_ids);
+
+static void ksb_rx_cb(struct urb *urb);
+static void submit_one_urb(struct ks_bridge *ksb)
+{
+ struct data_pkt *pkt;
+ struct urb *urb;
+ int ret;
+
+ pkt = ksb_alloc_data_pkt(MAX_DATA_PKT_SIZE, GFP_ATOMIC, ksb);
+ if (IS_ERR(pkt)) {
+ pr_err("unable to allocate data pkt");
+ return;
+ }
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ pr_err("unable to allocate urb");
+ ksb_free_data_pkt(pkt);
+ return;
+ }
+ ksb->alloced_read_pkts++;
+
+ usb_fill_bulk_urb(urb, ksb->udev, ksb->in_pipe,
+ pkt->buf, pkt->len,
+ ksb_rx_cb, pkt);
+ usb_anchor_urb(urb, &ksb->submitted);
+
+ dbg_log_event(ksb, "S RX_URB", pkt->len, 0);
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret) {
+ pr_err("in urb submission failed");
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
+ ksb_free_data_pkt(pkt);
+ ksb->alloced_read_pkts--;
+ return;
+ }
+
+ usb_free_urb(urb);
+}
+static void ksb_rx_cb(struct urb *urb)
+{
+ struct data_pkt *pkt = urb->context;
+ struct ks_bridge *ksb = pkt->ctxt;
+
+ dbg_log_event(ksb, "C RX_URB", urb->status, urb->actual_length);
+
+ pr_debug("status:%d actual:%d", urb->status, urb->actual_length);
+
+ if (urb->status < 0) {
+ if (urb->status != -ESHUTDOWN && urb->status != -ENOENT)
+ pr_err_ratelimited("urb failed with err:%d",
+ urb->status);
+ ksb_free_data_pkt(pkt);
+ ksb->alloced_read_pkts--;
+ return;
+ }
+
+ if (urb->actual_length == 0) {
+ ksb_free_data_pkt(pkt);
+ ksb->alloced_read_pkts--;
+ goto resubmit_urb;
+ }
+
+ spin_lock(&ksb->lock);
+ pkt->len = urb->actual_length;
+ list_add_tail(&pkt->list, &ksb->to_ks_list);
+ spin_unlock(&ksb->lock);
+
+ /* wake up read thread */
+ wake_up(&ksb->ks_wait_q);
+
+resubmit_urb:
+ submit_one_urb(ksb);
+
+}
+
+static void ksb_start_rx_work(struct work_struct *w)
+{
+ struct ks_bridge *ksb =
+ container_of(w, struct ks_bridge, start_rx_work);
+ struct data_pkt *pkt;
+ struct urb *urb;
+ int i = 0;
+ int ret;
+
+ for (i = 0; i < NO_RX_REQS; i++) {
+ pkt = ksb_alloc_data_pkt(MAX_DATA_PKT_SIZE, GFP_KERNEL, ksb);
+ if (IS_ERR(pkt)) {
+ pr_err("unable to allocate data pkt");
+ return;
+ }
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ pr_err("unable to allocate urb");
+ ksb_free_data_pkt(pkt);
+ return;
+ }
+
+ ret = usb_autopm_get_interface(ksb->ifc);
+ if (ret < 0 && ret != -EAGAIN && ret != -EACCES) {
+ pr_err_ratelimited("autopm_get failed:%d", ret);
+ usb_free_urb(urb);
+ ksb_free_data_pkt(pkt);
+ return;
+ }
+ ksb->alloced_read_pkts++;
+
+ usb_fill_bulk_urb(urb, ksb->udev, ksb->in_pipe,
+ pkt->buf, pkt->len,
+ ksb_rx_cb, pkt);
+ usb_anchor_urb(urb, &ksb->submitted);
+
+ dbg_log_event(ksb, "S RX_URB", pkt->len, 0);
+
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ pr_err("in urb submission failed");
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
+ ksb_free_data_pkt(pkt);
+ ksb->alloced_read_pkts--;
+ usb_autopm_put_interface(ksb->ifc);
+ return;
+ }
+
+ usb_autopm_put_interface_async(ksb->ifc);
+ usb_free_urb(urb);
+ }
+}
+
+static int
+ksb_usb_probe(struct usb_interface *ifc, const struct usb_device_id *id)
+{
+ __u8 ifc_num;
+ struct usb_host_interface *ifc_desc;
+ struct usb_endpoint_descriptor *ep_desc;
+ int i;
+ struct ks_bridge *ksb;
+ struct miscdevice *fs_dev;
+
+ ifc_num = ifc->cur_altsetting->desc.bInterfaceNumber;
+
+ switch (id->idProduct) {
+ case 0x9008:
+ if (ifc_num != 0)
+ return -ENODEV;
+ ksb = __ksb[BOOT_BRIDGE_INDEX];
+ break;
+ case 0x9048:
+ case 0x904C:
+ if (ifc_num != 2)
+ return -ENODEV;
+ ksb = __ksb[EFS_BRIDGE_INDEX];
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ if (!ksb) {
+ pr_err("ksb is not initialized");
+ return -ENODEV;
+ }
+
+ ksb->udev = usb_get_dev(interface_to_usbdev(ifc));
+ ksb->ifc = ifc;
+ ifc_desc = ifc->cur_altsetting;
+
+ for (i = 0; i < ifc_desc->desc.bNumEndpoints; i++) {
+ ep_desc = &ifc_desc->endpoint[i].desc;
+
+ if (!ksb->in_epAddr && usb_endpoint_is_bulk_in(ep_desc))
+ ksb->in_epAddr = ep_desc->bEndpointAddress;
+
+ if (!ksb->out_epAddr && usb_endpoint_is_bulk_out(ep_desc))
+ ksb->out_epAddr = ep_desc->bEndpointAddress;
+ }
+
+ if (!(ksb->in_epAddr && ksb->out_epAddr)) {
+ pr_err("could not find bulk in and bulk out endpoints");
+ usb_put_dev(ksb->udev);
+ ksb->ifc = NULL;
+ return -ENODEV;
+ }
+
+ ksb->in_pipe = usb_rcvbulkpipe(ksb->udev, ksb->in_epAddr);
+ ksb->out_pipe = usb_sndbulkpipe(ksb->udev, ksb->out_epAddr);
+
+ usb_set_intfdata(ifc, ksb);
+ set_bit(USB_DEV_CONNECTED, &ksb->flags);
+
+ dbg_log_event(ksb, "PID-ATT", id->idProduct, 0);
+
+ fs_dev = (struct miscdevice *)id->driver_info;
+ misc_register(fs_dev);
+
+ usb_enable_autosuspend(ksb->udev);
+
+ pr_debug("usb dev connected");
+
+ return 0;
+}
+
+static int ksb_usb_suspend(struct usb_interface *ifc, pm_message_t message)
+{
+ struct ks_bridge *ksb = usb_get_intfdata(ifc);
+
+ dbg_log_event(ksb, "SUSPEND", 0, 0);
+
+ pr_info("read cnt: %d", ksb->alloced_read_pkts);
+
+ usb_kill_anchored_urbs(&ksb->submitted);
+
+ return 0;
+}
+
+static int ksb_usb_resume(struct usb_interface *ifc)
+{
+ struct ks_bridge *ksb = usb_get_intfdata(ifc);
+
+ dbg_log_event(ksb, "RESUME", 0, 0);
+
+ if (test_bit(FILE_OPENED, &ksb->flags))
+ queue_work(ksb->wq, &ksb->start_rx_work);
+
+ return 0;
+}
+
+static void ksb_usb_disconnect(struct usb_interface *ifc)
+{
+ struct ks_bridge *ksb = usb_get_intfdata(ifc);
+ unsigned long flags;
+ struct data_pkt *pkt;
+
+ dbg_log_event(ksb, "PID-DETACH", 0, 0);
+
+ clear_bit(USB_DEV_CONNECTED, &ksb->flags);
+ wake_up(&ksb->ks_wait_q);
+ cancel_work_sync(&ksb->to_mdm_work);
+
+ usb_kill_anchored_urbs(&ksb->submitted);
+
+ spin_lock_irqsave(&ksb->lock, flags);
+ while (!list_empty(&ksb->to_ks_list)) {
+ pkt = list_first_entry(&ksb->to_ks_list,
+ struct data_pkt, list);
+ list_del_init(&pkt->list);
+ ksb_free_data_pkt(pkt);
+ }
+ while (!list_empty(&ksb->to_mdm_list)) {
+ pkt = list_first_entry(&ksb->to_mdm_list,
+ struct data_pkt, list);
+ list_del_init(&pkt->list);
+ ksb_free_data_pkt(pkt);
+ }
+ spin_unlock_irqrestore(&ksb->lock, flags);
+
+ usb_put_dev(ksb->udev);
+ ksb->ifc = NULL;
+ usb_set_intfdata(ifc, NULL);
+
+ return;
+}
+
+static struct usb_driver ksb_usb_driver = {
+ .name = "ks_bridge",
+ .probe = ksb_usb_probe,
+ .disconnect = ksb_usb_disconnect,
+ .suspend = ksb_usb_suspend,
+ .resume = ksb_usb_resume,
+ .id_table = ksb_usb_ids,
+ .supports_autosuspend = 1,
+};
+
+static ssize_t ksb_debug_show(struct seq_file *s, void *unused)
+{
+ unsigned long flags;
+ struct ks_bridge *ksb = s->private;
+ int i;
+
+ read_lock_irqsave(&ksb->dbg_lock, flags);
+ for (i = 0; i < DBG_MAX_MSG; i++) {
+ if (i == (ksb->dbg_idx - 1))
+ seq_printf(s, "-->%s\n", ksb->dbgbuf[i]);
+ else
+ seq_printf(s, "%s\n", ksb->dbgbuf[i]);
+ }
+ read_unlock_irqrestore(&ksb->dbg_lock, flags);
+
+ return 0;
+}
+
+static int ksb_debug_open(struct inode *ip, struct file *fp)
+{
+ return single_open(fp, ksb_debug_show, ip->i_private);
+
+ return 0;
+}
+
+static const struct file_operations dbg_fops = {
+ .open = ksb_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+static struct dentry *dbg_dir;
+static int __init ksb_init(void)
+{
+ struct ks_bridge *ksb;
+ int num_instances = 0;
+ int ret = 0;
+ int i;
+
+ dbg_dir = debugfs_create_dir("ks_bridge", NULL);
+ if (IS_ERR(dbg_dir))
+ pr_err("unable to create debug dir");
+
+ for (i = 0; i < NO_BRIDGE_INSTANCES; i++) {
+ ksb = kzalloc(sizeof(struct ks_bridge), GFP_KERNEL);
+ if (!ksb) {
+ pr_err("unable to allocat mem for ks_bridge");
+ return -ENOMEM;
+ }
+ __ksb[i] = ksb;
+
+ ksb->name = kasprintf(GFP_KERNEL, "ks_bridge:%i", i + 1);
+ if (!ksb->name) {
+ pr_info("unable to allocate name");
+ kfree(ksb);
+ ret = -ENOMEM;
+ goto dev_free;
+ }
+
+ spin_lock_init(&ksb->lock);
+ INIT_LIST_HEAD(&ksb->to_mdm_list);
+ INIT_LIST_HEAD(&ksb->to_ks_list);
+ init_waitqueue_head(&ksb->ks_wait_q);
+ ksb->wq = create_singlethread_workqueue(ksb->name);
+ if (!ksb->wq) {
+ pr_err("unable to allocate workqueue");
+ kfree(ksb->name);
+ kfree(ksb);
+ ret = -ENOMEM;
+ goto dev_free;
+ }
+
+ INIT_WORK(&ksb->to_mdm_work, ksb_tomdm_work);
+ INIT_WORK(&ksb->start_rx_work, ksb_start_rx_work);
+ init_usb_anchor(&ksb->submitted);
+
+ ksb->dbg_idx = 0;
+ ksb->dbg_lock = __RW_LOCK_UNLOCKED(lck);
+
+ if (!IS_ERR(dbg_dir))
+ debugfs_create_file(ksb->name, S_IRUGO, dbg_dir,
+ ksb, &dbg_fops);
+
+ num_instances++;
+ }
+
+ ret = usb_register(&ksb_usb_driver);
+ if (ret) {
+ pr_err("unable to register ks bridge driver");
+ goto dev_free;
+ }
+
+ pr_info("init done");
+
+ return 0;
+
+dev_free:
+ if (!IS_ERR(dbg_dir))
+ debugfs_remove_recursive(dbg_dir);
+
+ for (i = 0; i < num_instances; i++) {
+ ksb = __ksb[i];
+
+ destroy_workqueue(ksb->wq);
+ kfree(ksb->name);
+ kfree(ksb);
+ }
+
+ return ret;
+
+}
+
+static void __exit ksb_exit(void)
+{
+ struct ks_bridge *ksb;
+ int i;
+
+ if (!IS_ERR(dbg_dir))
+ debugfs_remove_recursive(dbg_dir);
+
+ usb_deregister(&ksb_usb_driver);
+
+ for (i = 0; i < NO_BRIDGE_INSTANCES; i++) {
+ ksb = __ksb[i];
+
+ destroy_workqueue(ksb->wq);
+ kfree(ksb->name);
+ kfree(ksb);
+ }
+}
+
+module_init(ksb_init);
+module_exit(ksb_exit);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index 8e6f347..c397f84 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -1432,11 +1432,9 @@
outpdw(MDP_BASE + 0x0004, 0);
} else if (term == MDP_OVERLAY1_TERM) {
mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- mdp_lut_enable();
outpdw(MDP_BASE + 0x0008, 0);
} else if (term == MDP_OVERLAY2_TERM) {
mdp_pipe_ctrl(MDP_OVERLAY2_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- mdp_lut_enable();
outpdw(MDP_BASE + 0x00D0, 0);
}
#else
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index 413b239..75d92f1 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -1817,7 +1817,13 @@
op_mode &= ~(MDP4_OP_FLIP_UD + MDP4_OP_SCALEY_EN);
outpdw(base + 0x0058, op_mode);
outpdw(base + 0x1008, 0); /* black */
+ /*
+ * Set src size and dst size same to avoid underruns
+ */
+ outpdw(base + 0x0000, inpdw(base + 0x0008));
} else {
+ u32 src_size = ((pipe->src_h << 16) | pipe->src_w);
+ outpdw(base + 0x0000, src_size);
format &= ~MDP4_FORMAT_SOLID_FILL;
blend->solidfill_pipe = NULL;
}
diff --git a/drivers/video/msm/mdp4_overlay_dsi_video.c b/drivers/video/msm/mdp4_overlay_dsi_video.c
index 398b1e6..16c5278 100644
--- a/drivers/video/msm/mdp4_overlay_dsi_video.c
+++ b/drivers/video/msm/mdp4_overlay_dsi_video.c
@@ -513,6 +513,8 @@
pipe->src_w = fbi->var.xres;
pipe->src_y = 0;
pipe->src_x = 0;
+ pipe->dst_h = fbi->var.yres;
+ pipe->dst_w = fbi->var.xres;
pipe->srcp0_ystride = fbi->fix.line_length;
pipe->bpp = bpp;
diff --git a/drivers/video/msm/mdp4_overlay_dtv.c b/drivers/video/msm/mdp4_overlay_dtv.c
index 57a07d0..f857ac8 100644
--- a/drivers/video/msm/mdp4_overlay_dtv.c
+++ b/drivers/video/msm/mdp4_overlay_dtv.c
@@ -700,6 +700,8 @@
pipe->src_w = fbi->var.xres;
pipe->src_y = 0;
pipe->src_x = 0;
+ pipe->dst_h = fbi->var.yres;
+ pipe->dst_w = fbi->var.xres;
pipe->srcp0_ystride = fbi->fix.line_length;
ret = mdp4_overlay_format2pipe(pipe);
diff --git a/drivers/video/msm/mdp4_overlay_lcdc.c b/drivers/video/msm/mdp4_overlay_lcdc.c
index 2da2052..2d5025b 100644
--- a/drivers/video/msm/mdp4_overlay_lcdc.c
+++ b/drivers/video/msm/mdp4_overlay_lcdc.c
@@ -503,6 +503,8 @@
pipe->src_w = fbi->var.xres;
pipe->src_y = 0;
pipe->src_x = 0;
+ pipe->dst_h = fbi->var.yres;
+ pipe->dst_w = fbi->var.xres;
if (mfd->display_iova)
pipe->srcp0_addr = mfd->display_iova + buf_offset;
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index 72e3600..b6f4874 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -1483,7 +1483,10 @@
ret = 0;
#ifdef CONFIG_HAS_EARLYSUSPEND
- if (hdmi_prim_display || mfd->panel_info.type != DTV_PANEL) {
+
+ if (hdmi_prim_display ||
+ (mfd->panel_info.type != DTV_PANEL &&
+ mfd->panel_info.type != WRITEBACK_PANEL)) {
mfd->early_suspend.suspend = msmfb_early_suspend;
mfd->early_suspend.resume = msmfb_early_resume;
mfd->early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB - 2;
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c
index 22eaf4f..72fe2e3 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c
@@ -1302,6 +1302,7 @@
ddl_process_decoder_metadata(ddl);
vidc_sm_get_aspect_ratio_info(
&ddl->shared_mem[ddl->command_channel],
+ decoder->codec.codec,
&output_vcd_frm->aspect_ratio_info);
ddl_context->ddl_callback(VCD_EVT_RESP_OUTPUT_DONE,
vcd_status, output_frame,
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.c
index 839a9c1..d45de2d 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.c
@@ -205,6 +205,10 @@
#define VIDC_SM_ASPECT_RATIO_INFO_ADDR 0x00c8
#define VIDC_SM_MPEG4_ASPECT_RATIO_INFO_BMSK 0xf
#define VIDC_SM_MPEG4_ASPECT_RATIO_INFO_SHFT 0x0
+#define VIDC_SM_MPEG2_ASPECT_RATIO_INFO_BMSK 0x000f0000
+#define VIDC_SM_MPEG2_ASPECT_RATIO_INFO_SHFT 16
+#define VIDC_SM_H264_ASPECT_RATIO_INFO_BMSK 0x00000ff0
+#define VIDC_SM_H264_ASPECT_RATIO_INFO_SHFT 4
#define VIDC_SM_EXTENDED_PAR_ADDR 0x00cc
#define VIDC_SM_EXTENDED_PAR_WIDTH_BMSK 0xffff0000
#define VIDC_SM_EXTENDED_PAR_WIDTH_SHFT 16
@@ -802,23 +806,160 @@
}
void vidc_sm_get_aspect_ratio_info(struct ddl_buf_addr *shared_mem,
- struct vcd_aspect_ratio *aspect_ratio_info)
+ enum vcd_codec codec, struct vcd_aspect_ratio *aspect_ratio_info)
{
- u32 extended_par_info = 0;
- aspect_ratio_info->aspect_ratio = DDL_MEM_READ_32(shared_mem,
+ u32 extended_par_info = 0, aspect_ratio = 0;
+
+ aspect_ratio = DDL_MEM_READ_32(shared_mem,
VIDC_SM_ASPECT_RATIO_INFO_ADDR);
- if (aspect_ratio_info->aspect_ratio == 0x0f) {
- extended_par_info = DDL_MEM_READ_32(shared_mem,
- VIDC_SM_EXTENDED_PAR_ADDR);
- aspect_ratio_info->extended_par_width =
- VIDC_GETFIELD(extended_par_info,
- VIDC_SM_EXTENDED_PAR_WIDTH_BMSK,
- VIDC_SM_EXTENDED_PAR_WIDTH_SHFT);
- aspect_ratio_info->extended_par_height =
- VIDC_GETFIELD(extended_par_info,
- VIDC_SM_EXTENDED_PAR_HEIGHT_BMSK,
- VIDC_SM_EXTENDED_PAR_HEIGHT_SHFT);
+ if (codec == VCD_CODEC_H264) {
+ aspect_ratio_info->aspect_ratio =
+ VIDC_GETFIELD(aspect_ratio,
+ VIDC_SM_H264_ASPECT_RATIO_INFO_BMSK,
+ VIDC_SM_H264_ASPECT_RATIO_INFO_SHFT);
+
+ switch (aspect_ratio_info->aspect_ratio) {
+ case 1:
+ aspect_ratio_info->par_width = 1;
+ aspect_ratio_info->par_height = 1;
+ break;
+ case 2:
+ aspect_ratio_info->par_width = 12;
+ aspect_ratio_info->par_height = 11;
+ break;
+ case 3:
+ aspect_ratio_info->par_width = 10;
+ aspect_ratio_info->par_height = 11;
+ break;
+ case 4:
+ aspect_ratio_info->par_width = 16;
+ aspect_ratio_info->par_height = 11;
+ break;
+ case 5:
+ aspect_ratio_info->par_width = 40;
+ aspect_ratio_info->par_height = 33;
+ break;
+ case 6:
+ aspect_ratio_info->par_width = 24;
+ aspect_ratio_info->par_height = 11;
+ break;
+ case 7:
+ aspect_ratio_info->par_width = 20;
+ aspect_ratio_info->par_height = 11;
+ break;
+ case 8:
+ aspect_ratio_info->par_width = 32;
+ aspect_ratio_info->par_height = 11;
+ break;
+ case 9:
+ aspect_ratio_info->par_width = 80;
+ aspect_ratio_info->par_height = 33;
+ break;
+ case 10:
+ aspect_ratio_info->par_width = 18;
+ aspect_ratio_info->par_height = 11;
+ break;
+ case 11:
+ aspect_ratio_info->par_width = 15;
+ aspect_ratio_info->par_height = 11;
+ break;
+ case 12:
+ aspect_ratio_info->par_width = 64;
+ aspect_ratio_info->par_height = 33;
+ break;
+ case 13:
+ aspect_ratio_info->par_width = 160;
+ aspect_ratio_info->par_height = 99;
+ break;
+ case 14:
+ aspect_ratio_info->par_width = 4;
+ aspect_ratio_info->par_height = 3;
+ break;
+ case 15:
+ aspect_ratio_info->par_width = 3;
+ aspect_ratio_info->par_height = 2;
+ break;
+ case 16:
+ aspect_ratio_info->par_width = 2;
+ aspect_ratio_info->par_height = 1;
+ break;
+ case 255:
+ extended_par_info = DDL_MEM_READ_32(shared_mem,
+ VIDC_SM_EXTENDED_PAR_ADDR);
+ aspect_ratio_info->par_width =
+ VIDC_GETFIELD(extended_par_info,
+ VIDC_SM_EXTENDED_PAR_WIDTH_BMSK,
+ VIDC_SM_EXTENDED_PAR_WIDTH_SHFT);
+ aspect_ratio_info->par_height =
+ VIDC_GETFIELD(extended_par_info,
+ VIDC_SM_EXTENDED_PAR_HEIGHT_BMSK,
+ VIDC_SM_EXTENDED_PAR_HEIGHT_SHFT);
+ break;
+ default:
+ DDL_MSG_ERROR("Incorrect Aspect Ratio.");
+ aspect_ratio_info->par_width = 1;
+ aspect_ratio_info->par_height = 1;
+ break;
+ }
+ } else if ((codec == VCD_CODEC_MPEG4) ||
+ (codec == VCD_CODEC_DIVX_4) ||
+ (codec == VCD_CODEC_DIVX_5) ||
+ (codec == VCD_CODEC_DIVX_6) ||
+ (codec == VCD_CODEC_XVID) ||
+ (codec == VCD_CODEC_MPEG2)) {
+
+ if (codec == VCD_CODEC_MPEG2) {
+ aspect_ratio_info->aspect_ratio =
+ VIDC_GETFIELD(aspect_ratio,
+ VIDC_SM_MPEG2_ASPECT_RATIO_INFO_BMSK,
+ VIDC_SM_MPEG2_ASPECT_RATIO_INFO_SHFT);
+ } else {
+ aspect_ratio_info->aspect_ratio =
+ VIDC_GETFIELD(aspect_ratio,
+ VIDC_SM_MPEG4_ASPECT_RATIO_INFO_BMSK,
+ VIDC_SM_MPEG4_ASPECT_RATIO_INFO_SHFT);
+ }
+
+ switch (aspect_ratio_info->aspect_ratio) {
+ case 1:
+ aspect_ratio_info->par_width = 1;
+ aspect_ratio_info->par_height = 1;
+ break;
+ case 2:
+ aspect_ratio_info->par_width = 12;
+ aspect_ratio_info->par_height = 11;
+ break;
+ case 3:
+ aspect_ratio_info->par_width = 10;
+ aspect_ratio_info->par_height = 11;
+ break;
+ case 4:
+ aspect_ratio_info->par_width = 16;
+ aspect_ratio_info->par_height = 11;
+ break;
+ case 5:
+ aspect_ratio_info->par_width = 40;
+ aspect_ratio_info->par_height = 33;
+ break;
+ case 15:
+ extended_par_info = DDL_MEM_READ_32(shared_mem,
+ VIDC_SM_EXTENDED_PAR_ADDR);
+ aspect_ratio_info->par_width =
+ VIDC_GETFIELD(extended_par_info,
+ VIDC_SM_EXTENDED_PAR_WIDTH_BMSK,
+ VIDC_SM_EXTENDED_PAR_WIDTH_SHFT);
+ aspect_ratio_info->par_height =
+ VIDC_GETFIELD(extended_par_info,
+ VIDC_SM_EXTENDED_PAR_HEIGHT_BMSK,
+ VIDC_SM_EXTENDED_PAR_HEIGHT_SHFT);
+ break;
+ default:
+ DDL_MSG_ERROR("Incorrect Aspect Ratio.");
+ aspect_ratio_info->par_width = 1;
+ aspect_ratio_info->par_height = 1;
+ break;
+ }
}
}
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.h b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.h
index 6cd75595..1a46c36 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.h
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.h
@@ -180,7 +180,7 @@
struct ddl_buf_addr *shared_mem,
enum vidc_sm_num_stuff_bytes_consume_info consume_info);
void vidc_sm_get_aspect_ratio_info(struct ddl_buf_addr *shared_mem,
- struct vcd_aspect_ratio *aspect_ratio_info);
+ enum vcd_codec codec, struct vcd_aspect_ratio *aspect_ratio_info);
void vidc_sm_set_encoder_slice_batch_int_ctrl(struct ddl_buf_addr *shared_mem,
u32 slice_batch_int_enable);
void vidc_sm_get_num_slices_comp(struct ddl_buf_addr *shared_mem,
diff --git a/drivers/video/msm/vidc/common/dec/vdec.c b/drivers/video/msm/vidc/common/dec/vdec.c
index 634011b..c3803b1 100644
--- a/drivers/video/msm/vidc/common/dec/vdec.c
+++ b/drivers/video/msm/vidc/common/dec/vdec.c
@@ -334,9 +334,9 @@
output_frame->aspect_ratio_info.aspect_ratio =
vcd_frame_data->aspect_ratio_info.aspect_ratio;
output_frame->aspect_ratio_info.par_width =
- vcd_frame_data->aspect_ratio_info.extended_par_width;
+ vcd_frame_data->aspect_ratio_info.par_width;
output_frame->aspect_ratio_info.par_height =
- vcd_frame_data->aspect_ratio_info.extended_par_height;
+ vcd_frame_data->aspect_ratio_info.par_height;
vdec_msg->vdec_msg_info.msgdatasize =
sizeof(struct vdec_output_frameinfo);
} else {
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index 3b1d06d..b9ecd60 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -376,6 +376,7 @@
struct msm_usb_host_platform_data {
unsigned int power_budget;
+ int pmic_gpio_dp_irq;
unsigned int dock_connect_irq;
};
@@ -389,21 +390,46 @@
bool core_clk_always_on_workaround;
};
+/**
+ * struct usb_bam_pipe_connect: pipe connection information
+ * between USB/HSIC BAM and another BAM. USB/HSIC BAM can be
+ * either src BAM or dst BAM
+ * @src_phy_addr: src bam physical address.
+ * @src_pipe_index: src bam pipe index.
+ * @dst_phy_addr: dst bam physical address.
+ * @dst_pipe_index: dst bam pipe index.
+ * @data_fifo_base_offset: data fifo offset.
+ * @data_fifo_size: data fifo size.
+ * @desc_fifo_base_offset: descriptor fifo offset.
+ * @desc_fifo_size: descriptor fifo size.
+ */
struct usb_bam_pipe_connect {
u32 src_phy_addr;
- int src_pipe_index;
+ u32 src_pipe_index;
u32 dst_phy_addr;
- int dst_pipe_index;
+ u32 dst_pipe_index;
u32 data_fifo_base_offset;
u32 data_fifo_size;
u32 desc_fifo_base_offset;
u32 desc_fifo_size;
};
+/**
+ * struct msm_usb_bam_platform_data: pipe connection information
+ * between USB/HSIC BAM and another BAM. USB/HSIC BAM can be
+ * either src BAM or dst BAM
+ * @connections: holds all pipe connections data.
+ * @usb_active_bam: set USB or HSIC as the active BAM.
+ * @usb_bam_num_pipes: max number of pipes to use.
+ * @active_conn_num: number of active pipe connections.
+ * @usb_base_address: BAM physical address.
+ */
struct msm_usb_bam_platform_data {
struct usb_bam_pipe_connect *connections;
int usb_active_bam;
int usb_bam_num_pipes;
+ u32 total_bam_num;
+ u32 usb_base_address;
};
enum usb_bam {
@@ -411,8 +437,27 @@
HSIC_BAM,
};
+#ifdef CONFIG_USB_DWC3_MSM
int msm_ep_config(struct usb_ep *ep);
int msm_ep_unconfig(struct usb_ep *ep);
-int msm_data_fifo_config(struct usb_ep *ep, u32 addr, u32 size);
+int msm_data_fifo_config(struct usb_ep *ep, u32 addr, u32 size,
+ u8 dst_pipe_idx);
+#else
+static inline int msm_data_fifo_config(struct usb_ep *ep, u32 addr, u32 size,
+ u8 dst_pipe_idx)
+{
+ return -ENODEV;
+}
+
+static inline int msm_ep_config(struct usb_ep *ep)
+{
+ return -ENODEV;
+}
+
+static inline int msm_ep_unconfig(struct usb_ep *ep)
+{
+ return -ENODEV;
+}
+#endif
#endif
diff --git a/include/media/msm/vcd_api.h b/include/media/msm/vcd_api.h
index c93b696..7104028 100644
--- a/include/media/msm/vcd_api.h
+++ b/include/media/msm/vcd_api.h
@@ -55,8 +55,8 @@
struct vcd_aspect_ratio {
u32 aspect_ratio;
- u32 extended_par_width;
- u32 extended_par_height;
+ u32 par_width;
+ u32 par_height;
};
struct vcd_frame_data {
diff --git a/include/media/msm_camera.h b/include/media/msm_camera.h
index 588fd07..11f7153 100644
--- a/include/media/msm_camera.h
+++ b/include/media/msm_camera.h
@@ -506,8 +506,7 @@
#define AXI_CMD_RECORD BIT(2)
#define AXI_CMD_ZSL BIT(3)
#define AXI_CMD_RAW_CAPTURE BIT(4)
-
-
+#define AXI_CMD_LIVESHOT BIT(5)
/* vfe config command: config command(from config thread)*/
struct msm_vfe_cfg_cmd {
@@ -1765,6 +1764,7 @@
uint32_t capture_count;
uint32_t skip_abort;
uint16_t port_info;
+ uint32_t inst_handle;
uint16_t cmd_type;
};
diff --git a/include/media/msm_isp.h b/include/media/msm_isp.h
index f8dbed9..9fa5932 100644
--- a/include/media/msm_isp.h
+++ b/include/media/msm_isp.h
@@ -220,6 +220,19 @@
#define VFE_CMD_STATS_BHIST_START 147
#define VFE_CMD_STATS_BHIST_STOP 148
#define VFE_CMD_RESET_2 149
+#define VFE_CMD_FOV_ENC_CFG 150
+#define VFE_CMD_FOV_VIEW_CFG 151
+#define VFE_CMD_FOV_ENC_UPDATE 152
+#define VFE_CMD_FOV_VIEW_UPDATE 153
+#define VFE_CMD_SCALER_ENC_CFG 154
+#define VFE_CMD_SCALER_VIEW_CFG 155
+#define VFE_CMD_SCALER_ENC_UPDATE 156
+#define VFE_CMD_SCALER_VIEW_UPDATE 157
+#define VFE_CMD_COLORXFORM_ENC_CFG 158
+#define VFE_CMD_COLORXFORM_VIEW_CFG 159
+#define VFE_CMD_COLORXFORM_ENC_UPDATE 160
+#define VFE_CMD_COLORXFORM_VIEW_UPDATE 161
+#define VFE_CMD_TEST_GEN_CFG 162
struct msm_isp_cmd {
int32_t id;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 8129d97..5e5ad91 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -18,6 +18,7 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
+#include <linux/netdevice.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
@@ -176,6 +177,7 @@
struct prio_sched_data *q = qdisc_priv(sch);
struct tc_prio_qopt *qopt;
int i;
+ int flow_change = 0;
if (nla_len(opt) < sizeof(*qopt))
return -EINVAL;
@@ -190,7 +192,10 @@
}
sch_tree_lock(sch);
- q->enable_flow = qopt->enable_flow;
+ if (q->enable_flow != qopt->enable_flow) {
+ q->enable_flow = qopt->enable_flow;
+ flow_change = 1;
+ }
q->bands = qopt->bands;
memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
@@ -225,6 +230,13 @@
}
}
}
+
+ /* Schedule qdisc when flow re-enabled */
+ if (flow_change && q->enable_flow) {
+ if (!test_bit(__QDISC_STATE_DEACTIVATED,
+ &sch->state))
+ __netif_schedule(qdisc_root(sch));
+ }
return 0;
}