Merge "mdss: mdp3: validate histogram data passed in"
diff --git a/Documentation/devicetree/bindings/usb/msm-ehci-hsic.txt b/Documentation/devicetree/bindings/usb/msm-ehci-hsic.txt
index b93dc4d..7ca741c 100644
--- a/Documentation/devicetree/bindings/usb/msm-ehci-hsic.txt
+++ b/Documentation/devicetree/bindings/usb/msm-ehci-hsic.txt
@@ -126,6 +126,7 @@
Required properties :
- compatible : should be "qcom,hsic-smsc-hub"
+- smsc,model-id : should be either <3503> or <4604> depending on hub model
- smsc,<gpio-name>-gpio : handle to the GPIO node, see "gpios property"
in Documentation/devicetree/bindings/gpio/gpio.txt.
Required "gpio-name" is "reset" and optionally - "refclk", "int".
@@ -137,6 +138,7 @@
Example SMSC HSIC HUB :
hsic_hub {
compatible = "qcom,hsic-smsc-hub";
+ smsc,model-id = <4604>;
ranges;
smsc,reset-gpio = <&pm8941_gpios 8 0x00>;
smsc,refclk-gpio = <&pm8941_gpios 16 0x00>;
diff --git a/arch/arm/boot/dts/apq8074-dragonboard.dtsi b/arch/arm/boot/dts/apq8074-dragonboard.dtsi
index acd0bb9..46f5f22 100644
--- a/arch/arm/boot/dts/apq8074-dragonboard.dtsi
+++ b/arch/arm/boot/dts/apq8074-dragonboard.dtsi
@@ -62,6 +62,7 @@
hsic_hub {
compatible = "qcom,hsic-smsc-hub";
+ smsc,model-id = <4604>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -604,7 +605,7 @@
};
qcom,dc-chgpth@1400 {
- status = "ok";
+ status = "disabled";
};
qcom,boost@1500 {
diff --git a/arch/arm/boot/dts/msm8610-coresight.dtsi b/arch/arm/boot/dts/msm8610-coresight.dtsi
index 2041bf6..98a99a7 100644
--- a/arch/arm/boot/dts/msm8610-coresight.dtsi
+++ b/arch/arm/boot/dts/msm8610-coresight.dtsi
@@ -16,6 +16,8 @@
reg = <0xfc326000 0x1000>,
<0xfc37c000 0x3000>;
reg-names = "tmc-base", "bam-base";
+ interrupts = <0 166 0>;
+ interrupt-names = "byte-cntr-irq";
qcom,memory-reservation-type = "EBI1";
qcom,memory-reservation-size = <0x100000>; /* 1M EBI1 buffer */
diff --git a/drivers/gpu/msm/adreno_trace.c b/arch/arm/boot/dts/msm8926-qrd-skug.dts
similarity index 69%
rename from drivers/gpu/msm/adreno_trace.c
rename to arch/arm/boot/dts/msm8926-qrd-skug.dts
index 607ba8c..557e0c8 100644
--- a/drivers/gpu/msm/adreno_trace.c
+++ b/arch/arm/boot/dts/msm8926-qrd-skug.dts
@@ -8,11 +8,15 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
*/
-#include "adreno.h"
+/dts-v1/;
+/include/ "msm8926.dtsi"
+/include/ "msm8226-qrd.dtsi"
-/* Instantiate tracepoints */
-#define CREATE_TRACE_POINTS
-#include "adreno_trace.h"
+/ {
+ model = "Qualcomm MSM 8926 QRD SKUG";
+ compatible = "qcom,msm8926-qrd", "qcom,msm8926", "qcom,qrd";
+ qcom,board-id = <11 5>;
+ qcom,msm-id = <200 0>;
+};
diff --git a/arch/arm/boot/dts/msm8974-liquid.dtsi b/arch/arm/boot/dts/msm8974-liquid.dtsi
index f90599a..1803f91 100644
--- a/arch/arm/boot/dts/msm8974-liquid.dtsi
+++ b/arch/arm/boot/dts/msm8974-liquid.dtsi
@@ -375,6 +375,7 @@
hsic_hub {
compatible = "qcom,hsic-smsc-hub";
+ smsc,model-id = <3503>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
diff --git a/arch/arm/configs/msm8610-perf_defconfig b/arch/arm/configs/msm8610-perf_defconfig
index b9d37b8..b8dc0d7b 100644
--- a/arch/arm/configs/msm8610-perf_defconfig
+++ b/arch/arm/configs/msm8610-perf_defconfig
@@ -69,10 +69,9 @@
CONFIG_MSM_OCMEM_NONSECURE=y
CONFIG_MSM_OCMEM_POWER_DISABLE=y
CONFIG_SENSORS_ADSP=y
-CONFIG_MSM_RTB=y
-CONFIG_MSM_RTB_SEPARATE_CPUS=y
CONFIG_MSM_ENABLE_WDOG_DEBUG_CONTROL=y
CONFIG_MSM_BOOT_STATS=y
+CONFIG_STRICT_MEMORY_RWX=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
diff --git a/arch/arm/configs/msm8610_defconfig b/arch/arm/configs/msm8610_defconfig
index a5f0704..dd4274a 100644
--- a/arch/arm/configs/msm8610_defconfig
+++ b/arch/arm/configs/msm8610_defconfig
@@ -1,6 +1,5 @@
# CONFIG_ARM_PATCH_PHYS_VIRT is not set
CONFIG_EXPERIMENTAL=y
-# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_RCU_FAST_NO_HZ=y
CONFIG_IKCONFIG=y
@@ -71,6 +70,7 @@
CONFIG_MSM_RTB_SEPARATE_CPUS=y
CONFIG_MSM_ENABLE_WDOG_DEBUG_CONTROL=y
CONFIG_MSM_BOOT_STATS=y
+CONFIG_STRICT_MEMORY_RWX=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
diff --git a/arch/arm/mach-msm/Makefile.boot b/arch/arm/mach-msm/Makefile.boot
index 9296515..01c7a2a 100644
--- a/arch/arm/mach-msm/Makefile.boot
+++ b/arch/arm/mach-msm/Makefile.boot
@@ -109,6 +109,7 @@
dtb-$(CONFIG_ARCH_MSM8226) += msm8926-cdp.dtb
dtb-$(CONFIG_ARCH_MSM8226) += msm8926-mtp.dtb
dtb-$(CONFIG_ARCH_MSM8226) += msm8926-qrd.dtb
+ dtb-$(CONFIG_ARCH_MSM8226) += msm8926-qrd-skug.dtb
dtb-$(CONFIG_ARCH_MSM8226) += msm8226-v1-qrd-skuf.dtb
dtb-$(CONFIG_ARCH_MSM8226) += msm8226-v2-qrd-skuf.dtb
dtb-$(CONFIG_ARCH_MSM8226) += apq8026-v1-xpm.dtb
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index 37567ed..f234712 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -75,7 +75,7 @@
#include <linux/mfd/wcd9xxx/pdata.h>
#endif
-#include <linux/smsc3503.h>
+#include <linux/smsc_hub.h>
#include <linux/msm_ion.h>
#include <mach/ion.h>
#include <mach/mdm2.h>
diff --git a/arch/arm/mach-msm/socinfo.c b/arch/arm/mach-msm/socinfo.c
index 6cb04c7..9479492 100644
--- a/arch/arm/mach-msm/socinfo.c
+++ b/arch/arm/mach-msm/socinfo.c
@@ -75,6 +75,7 @@
PLATFORM_SUBTYPE_SKUAA = 0x1,
PLATFORM_SUBTYPE_SKUF = 0x2,
PLATFORM_SUBTYPE_SKUAB = 0x3,
+ PLATFORM_SUBTYPE_SKUG = 0x5,
PLATFORM_SUBTYPE_QRD_INVALID,
};
@@ -83,6 +84,7 @@
[PLATFORM_SUBTYPE_SKUAA] = "SKUAA",
[PLATFORM_SUBTYPE_SKUF] = "SKUF",
[PLATFORM_SUBTYPE_SKUAB] = "SKUAB",
+ [PLATFORM_SUBTYPE_SKUG] = "SKUG",
};
enum {
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index a779b24..1deee5c 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -459,6 +459,7 @@
if ((read_len + 9) >= USER_SPACE_DATA) {
pr_err("diag: dci: Invalid length while forming dci pkt in %s",
__func__);
+ mutex_unlock(&driver->dci_mutex);
return -EIO;
}
diff --git a/drivers/coresight/coresight-tmc.c b/drivers/coresight/coresight-tmc.c
index c501700..d500c0a 100644
--- a/drivers/coresight/coresight-tmc.c
+++ b/drivers/coresight/coresight-tmc.c
@@ -1451,14 +1451,17 @@
if (!drvdata->byte_cntr_present) {
dev_info(&pdev->dev, "Byte Counter feature absent\n");
- return 0;
+ goto out;
}
drvdata->byte_cntr_irq = platform_get_irq_byname(pdev,
"byte-cntr-irq");
if (drvdata->byte_cntr_irq < 0) {
+ /* Even though this is an error condition, we do not fail
+ * the probe as the byte counter feature is optional
+ */
dev_err(&pdev->dev, "Byte-cntr-irq not specified\n");
- return 0;
+ goto err;
}
ret = devm_request_irq(&pdev->dev, drvdata->byte_cntr_irq,
tmc_etr_byte_cntr_irq,
@@ -1466,7 +1469,7 @@
node_name, drvdata);
if (ret) {
dev_err(&pdev->dev, "Request irq failed\n");
- return ret;
+ goto err;
}
init_waitqueue_head(&drvdata->wq);
node_size += strlen(node_name);
@@ -1477,10 +1480,14 @@
ret = tmc_etr_byte_cntr_dev_register(drvdata);
if (ret) {
dev_err(&pdev->dev, "Byte cntr node not registered\n");
- return ret;
+ goto err;
}
dev_info(&pdev->dev, "Byte Counter feature enabled\n");
return 0;
+err:
+ drvdata->byte_cntr_present = false;
+out:
+ return ret;
}
static void tmc_etr_byte_cntr_exit(struct tmc_drvdata *drvdata)
diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile
index aac183b..118e033 100644
--- a/drivers/gpu/msm/Makefile
+++ b/drivers/gpu/msm/Makefile
@@ -22,11 +22,9 @@
msm_adreno-y += \
adreno_ringbuffer.o \
adreno_drawctxt.o \
- adreno_dispatch.o \
adreno_postmortem.o \
adreno_snapshot.o \
adreno_coresight.o \
- adreno_trace.o \
adreno_a2xx.o \
adreno_a2xx_trace.o \
adreno_a2xx_snapshot.o \
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 8875d74..b964620 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -32,7 +32,6 @@
#include "adreno.h"
#include "adreno_pm4types.h"
-#include "adreno_trace.h"
#include "a2xx_reg.h"
#include "a3xx_reg.h"
@@ -114,6 +113,18 @@
.long_ib_detect = 1,
};
+/* This set of registers are used for Hang detection
+ * If the values of these registers are same after
+ * KGSL_TIMEOUT_PART time, GPU hang is reported in
+ * kernel log.
+ * *****ALERT******ALERT********ALERT*************
+ * Order of registers below is important, registers
+ * from LONG_IB_DETECT_REG_INDEX_START to
+ * LONG_IB_DETECT_REG_INDEX_END are used in long ib detection.
+ */
+#define LONG_IB_DETECT_REG_INDEX_START 1
+#define LONG_IB_DETECT_REG_INDEX_END 5
+
unsigned int ft_detect_regs[FT_DETECT_REGS_COUNT];
/*
@@ -202,6 +213,8 @@
512, 0, 2, SZ_128K, 0x3FF037, 0x3FF016 },
};
+static unsigned int adreno_isidle(struct kgsl_device *device);
+
/**
* adreno_perfcounter_init: Reserve kernel performance counters
* @device: device to configure
@@ -585,9 +598,23 @@
static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
{
+ irqreturn_t result;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- return adreno_dev->gpudev->irq_handler(adreno_dev);
+ result = adreno_dev->gpudev->irq_handler(adreno_dev);
+
+ device->pwrctrl.irq_last = 1;
+ if (device->requested_state == KGSL_STATE_NONE) {
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
+ queue_work(device->work_queue, &device->idle_check_ws);
+ }
+
+ /* Reset the time-out in our idle timer */
+ mod_timer_pending(&device->idle_timer,
+ jiffies + device->pwrctrl.interval_timeout);
+ mod_timer_pending(&device->hang_timer,
+ (jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART)));
+ return result;
}
static void adreno_cleanup_pt(struct kgsl_device *device,
@@ -894,7 +921,7 @@
adreno_dev->dev.cff_dump_enable);
}
-static int adreno_iommu_setstate(struct kgsl_device *device,
+static void adreno_iommu_setstate(struct kgsl_device *device,
unsigned int context_id,
uint32_t flags)
{
@@ -907,24 +934,22 @@
struct kgsl_context *context;
struct adreno_context *adreno_ctx = NULL;
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
- unsigned int result;
if (adreno_use_default_setstate(adreno_dev)) {
kgsl_mmu_device_setstate(&device->mmu, flags);
- return 0;
+ return;
}
num_iommu_units = kgsl_mmu_get_num_iommu_units(&device->mmu);
context = kgsl_context_get(device, context_id);
if (context == NULL)
- return -EINVAL;
+ return;
adreno_ctx = ADRENO_CONTEXT(context);
- result = kgsl_mmu_enable_clk(&device->mmu, KGSL_IOMMU_CONTEXT_USER);
-
- if (result)
- goto done;
+ if (kgsl_mmu_enable_clk(&device->mmu,
+ KGSL_IOMMU_CONTEXT_USER))
+ return;
pt_val = kgsl_mmu_get_pt_base_addr(&device->mmu,
device->mmu.hwpagetable);
@@ -958,24 +983,14 @@
* This returns the per context timestamp but we need to
* use the global timestamp for iommu clock disablement
*/
- result = adreno_ringbuffer_issuecmds(device, adreno_ctx,
- KGSL_CMD_FLAGS_PMODE, &link[0], sizedwords);
+ adreno_ringbuffer_issuecmds(device, adreno_ctx, KGSL_CMD_FLAGS_PMODE,
+ &link[0], sizedwords);
- /*
- * On error disable the IOMMU clock right away otherwise turn it off
- * after the command has been retired
- */
- if (result)
- kgsl_mmu_disable_clk_on_ts(&device->mmu, 0, false);
- else
- kgsl_mmu_disable_clk_on_ts(&device->mmu, rb->global_ts, true);
-
-done:
+ kgsl_mmu_disable_clk_on_ts(&device->mmu, rb->global_ts, true);
kgsl_context_put(context);
- return result;
}
-static int adreno_gpummu_setstate(struct kgsl_device *device,
+static void adreno_gpummu_setstate(struct kgsl_device *device,
unsigned int context_id,
uint32_t flags)
{
@@ -986,7 +1001,6 @@
unsigned int mh_mmu_invalidate = 0x00000003; /*invalidate all and tc */
struct kgsl_context *context;
struct adreno_context *adreno_ctx = NULL;
- int ret = 0;
/*
* Fix target freeze issue by adding TLB flush for each submit
@@ -1003,8 +1017,7 @@
if (!adreno_use_default_setstate(adreno_dev)) {
context = kgsl_context_get(device, context_id);
if (context == NULL)
- return -EINVAL;
-
+ return;
adreno_ctx = ADRENO_CONTEXT(context);
if (flags & KGSL_MMUFLAGS_PTUPDATE) {
@@ -1079,7 +1092,7 @@
sizedwords += 2;
}
- ret = adreno_ringbuffer_issuecmds(device, adreno_ctx,
+ adreno_ringbuffer_issuecmds(device, adreno_ctx,
KGSL_CMD_FLAGS_PMODE,
&link[0], sizedwords);
@@ -1087,11 +1100,9 @@
} else {
kgsl_mmu_device_setstate(&device->mmu, flags);
}
-
- return ret;
}
-static int adreno_setstate(struct kgsl_device *device,
+static void adreno_setstate(struct kgsl_device *device,
unsigned int context_id,
uint32_t flags)
{
@@ -1100,8 +1111,6 @@
return adreno_gpummu_setstate(device, context_id, flags);
else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
return adreno_iommu_setstate(device, context_id, flags);
-
- return 0;
}
static unsigned int
@@ -1577,10 +1586,6 @@
if (status)
goto error_close_rb;
- status = adreno_dispatcher_init(adreno_dev);
- if (status)
- goto error_close_device;
-
adreno_debugfs_init(device);
adreno_profile_init(device);
@@ -1596,8 +1601,6 @@
return 0;
-error_close_device:
- kgsl_device_platform_remove(device);
error_close_rb:
adreno_ringbuffer_close(&adreno_dev->ringbuffer);
error:
@@ -1620,7 +1623,6 @@
kgsl_pwrscale_detach_policy(device);
kgsl_pwrscale_close(device);
- adreno_dispatcher_close(adreno_dev);
adreno_ringbuffer_close(&adreno_dev->ringbuffer);
kgsl_device_platform_remove(device);
@@ -1632,7 +1634,8 @@
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
int i;
- kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
+ if (KGSL_STATE_DUMP_AND_FT != device->state)
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
/* Power up the device */
kgsl_pwrctrl_enable(device);
@@ -1702,15 +1705,13 @@
kgsl_cffdump_open(device);
- kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
+ if (KGSL_STATE_DUMP_AND_FT != device->state)
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
regulator_left_on = (regulator_is_enabled(device->pwrctrl.gpu_reg) ||
(device->pwrctrl.gpu_cx &&
regulator_is_enabled(device->pwrctrl.gpu_cx)));
- /* Clear any GPU faults that might have been left over */
- adreno_set_gpu_fault(adreno_dev, 0);
-
/* Power up the device */
kgsl_pwrctrl_enable(device);
@@ -1756,10 +1757,10 @@
if (status)
goto error_irq_off;
- adreno_perfcounter_start(adreno_dev);
+ mod_timer(&device->hang_timer,
+ (jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART)));
- /* Start the dispatcher */
- adreno_dispatcher_start(adreno_dev);
+ adreno_perfcounter_start(adreno_dev);
device->reset_counter++;
@@ -1790,7 +1791,6 @@
adreno_dev->drawctxt_active = NULL;
- adreno_dispatcher_stop(adreno_dev);
adreno_ringbuffer_stop(&adreno_dev->ringbuffer);
kgsl_mmu_stop(&device->mmu);
@@ -1798,6 +1798,7 @@
device->ftbl->irqctrl(device, 0);
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
del_timer_sync(&device->idle_timer);
+ del_timer_sync(&device->hang_timer);
adreno_ocmem_gmem_free(adreno_dev);
@@ -1809,48 +1810,918 @@
return 0;
}
-/**
- * adreno_reset() - Helper function to reset the GPU
- * @device: Pointer to the KGSL device structure for the GPU
- *
- * Try to reset the GPU to recover from a fault. First, try to do a low latency
- * soft reset. If the soft reset fails for some reason, then bring out the big
- * guns and toggle the footswitch.
+/*
+ * Set the reset status of all contexts to
+ * INNOCENT_CONTEXT_RESET_EXT except for the bad context
+ * since thats the guilty party, if fault tolerance failed then
+ * mark all as guilty
*/
-int adreno_reset(struct kgsl_device *device)
+
+static int _mark_context_status(int id, void *ptr, void *data)
+{
+ unsigned int ft_status = *((unsigned int *) data);
+ struct kgsl_context *context = ptr;
+ struct adreno_context *adreno_context = ADRENO_CONTEXT(context);
+
+ if (ft_status) {
+ context->reset_status =
+ KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT;
+ adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
+ } else if (KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT !=
+ context->reset_status) {
+ if (adreno_context->flags & (CTXT_FLAGS_GPU_HANG |
+ CTXT_FLAGS_GPU_HANG_FT))
+ context->reset_status =
+ KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT;
+ else
+ context->reset_status =
+ KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT;
+ }
+
+ return 0;
+}
+
+static void adreno_mark_context_status(struct kgsl_device *device,
+ int ft_status)
+{
+ /* Mark the status for all the contexts in the device */
+
+ read_lock(&device->context_lock);
+ idr_for_each(&device->context_idr, _mark_context_status, &ft_status);
+ read_unlock(&device->context_lock);
+}
+
+/*
+ * For hung contexts set the current memstore value to the most recent issued
+ * timestamp - this resets the status and lets the system continue on
+ */
+
+static int _set_max_ts(int id, void *ptr, void *data)
+{
+ struct kgsl_device *device = data;
+ struct kgsl_context *context = ptr;
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
+
+ if (drawctxt && drawctxt->flags & CTXT_FLAGS_GPU_HANG) {
+ kgsl_sharedmem_writel(device, &device->memstore,
+ KGSL_MEMSTORE_OFFSET(context->id,
+ soptimestamp), drawctxt->timestamp);
+ kgsl_sharedmem_writel(device, &device->memstore,
+ KGSL_MEMSTORE_OFFSET(context->id,
+ eoptimestamp), drawctxt->timestamp);
+ }
+
+ return 0;
+}
+
+static void adreno_set_max_ts_for_bad_ctxs(struct kgsl_device *device)
+{
+ read_lock(&device->context_lock);
+ idr_for_each(&device->context_idr, _set_max_ts, device);
+ read_unlock(&device->context_lock);
+}
+
+static void adreno_destroy_ft_data(struct adreno_ft_data *ft_data)
+{
+ vfree(ft_data->rb_buffer);
+ vfree(ft_data->bad_rb_buffer);
+ vfree(ft_data->good_rb_buffer);
+}
+
+static int _find_start_of_cmd_seq(struct adreno_ringbuffer *rb,
+ unsigned int *ptr,
+ bool inc)
+{
+ int status = -EINVAL;
+ unsigned int val1;
+ unsigned int size = rb->buffer_desc.size;
+ unsigned int start_ptr = *ptr;
+
+ while ((start_ptr / sizeof(unsigned int)) != rb->wptr) {
+ if (inc)
+ start_ptr = adreno_ringbuffer_inc_wrapped(start_ptr,
+ size);
+ else
+ start_ptr = adreno_ringbuffer_dec_wrapped(start_ptr,
+ size);
+ kgsl_sharedmem_readl(&rb->buffer_desc, &val1, start_ptr);
+ /* Ensure above read is finished before next read */
+ rmb();
+ if (KGSL_CMD_IDENTIFIER == val1) {
+ if ((start_ptr / sizeof(unsigned int)) != rb->wptr)
+ start_ptr = adreno_ringbuffer_dec_wrapped(
+ start_ptr, size);
+ *ptr = start_ptr;
+ status = 0;
+ break;
+ }
+ }
+ return status;
+}
+
+static int _find_cmd_seq_after_eop_ts(struct adreno_ringbuffer *rb,
+ unsigned int *rb_rptr,
+ unsigned int global_eop,
+ bool inc)
+{
+ int status = -EINVAL;
+ unsigned int temp_rb_rptr = *rb_rptr;
+ unsigned int size = rb->buffer_desc.size;
+ unsigned int val[3];
+ int i = 0;
+ bool check = false;
+
+ if (inc && temp_rb_rptr / sizeof(unsigned int) != rb->wptr)
+ return status;
+
+ do {
+ /*
+ * when decrementing we need to decrement first and
+ * then read make sure we cover all the data
+ */
+ if (!inc)
+ temp_rb_rptr = adreno_ringbuffer_dec_wrapped(
+ temp_rb_rptr, size);
+ kgsl_sharedmem_readl(&rb->buffer_desc, &val[i],
+ temp_rb_rptr);
+ /* Ensure above read is finished before next read */
+ rmb();
+
+ if (check && ((inc && val[i] == global_eop) ||
+ (!inc && (val[i] ==
+ cp_type3_packet(CP_MEM_WRITE, 2) ||
+ val[i] == CACHE_FLUSH_TS)))) {
+ /* decrement i, i.e i = (i - 1 + 3) % 3 if
+ * we are going forward, else increment i */
+ i = (i + 2) % 3;
+ if (val[i] == rb->device->memstore.gpuaddr +
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ eoptimestamp)) {
+ int j = ((i + 2) % 3);
+ if ((inc && (val[j] == CACHE_FLUSH_TS ||
+ val[j] == cp_type3_packet(
+ CP_MEM_WRITE, 2))) ||
+ (!inc && val[j] == global_eop)) {
+ /* Found the global eop */
+ status = 0;
+ break;
+ }
+ }
+ /* if no match found then increment i again
+ * since we decremented before matching */
+ i = (i + 1) % 3;
+ }
+ if (inc)
+ temp_rb_rptr = adreno_ringbuffer_inc_wrapped(
+ temp_rb_rptr, size);
+
+ i = (i + 1) % 3;
+ if (2 == i)
+ check = true;
+ } while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr);
+ /* temp_rb_rptr points to the command stream after global eop,
+ * move backward till the start of command sequence */
+ if (!status) {
+ status = _find_start_of_cmd_seq(rb, &temp_rb_rptr, false);
+ if (!status) {
+ *rb_rptr = temp_rb_rptr;
+ KGSL_FT_INFO(rb->device,
+ "Offset of cmd sequence after eop timestamp: 0x%x\n",
+ temp_rb_rptr / sizeof(unsigned int));
+ }
+ }
+ if (status)
+ KGSL_FT_ERR(rb->device,
+ "Failed to find the command sequence after eop timestamp %x\n",
+ global_eop);
+ return status;
+}
+
+static int _find_hanging_ib_sequence(struct adreno_ringbuffer *rb,
+ unsigned int *rb_rptr,
+ unsigned int ib1)
+{
+ int status = -EINVAL;
+ unsigned int temp_rb_rptr = *rb_rptr;
+ unsigned int size = rb->buffer_desc.size;
+ unsigned int val[2];
+ int i = 0;
+ bool check = false;
+ bool ctx_switch = false;
+
+ while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr) {
+ kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], temp_rb_rptr);
+ /* Ensure above read is finished before next read */
+ rmb();
+
+ if (check && val[i] == ib1) {
+ /* decrement i, i.e i = (i - 1 + 2) % 2 */
+ i = (i + 1) % 2;
+ if (adreno_cmd_is_ib(val[i])) {
+ /* go till start of command sequence */
+ status = _find_start_of_cmd_seq(rb,
+ &temp_rb_rptr, false);
+
+ KGSL_FT_INFO(rb->device,
+ "Found the hanging IB at offset 0x%x\n",
+ temp_rb_rptr / sizeof(unsigned int));
+ break;
+ }
+ /* if no match the increment i since we decremented
+ * before checking */
+ i = (i + 1) % 2;
+ }
+ /* Make sure you do not encounter a context switch twice, we can
+ * encounter it once for the bad context as the start of search
+ * can point to the context switch */
+ if (val[i] == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
+ if (ctx_switch) {
+ KGSL_FT_ERR(rb->device,
+ "Context switch encountered before bad "
+ "IB found\n");
+ break;
+ }
+ ctx_switch = true;
+ }
+ i = (i + 1) % 2;
+ if (1 == i)
+ check = true;
+ temp_rb_rptr = adreno_ringbuffer_inc_wrapped(temp_rb_rptr,
+ size);
+ }
+ if (!status)
+ *rb_rptr = temp_rb_rptr;
+ return status;
+}
+
+static void adreno_setup_ft_data(struct kgsl_device *device,
+ struct adreno_ft_data *ft_data)
{
int ret = 0;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+ struct kgsl_context *context;
+ struct adreno_context *adreno_context;
+ unsigned int rb_rptr = rb->wptr * sizeof(unsigned int);
- /* Try soft reset first */
- if (adreno_soft_reset(device) != 0) {
+ memset(ft_data, 0, sizeof(*ft_data));
+ ft_data->start_of_replay_cmds = 0xFFFFFFFF;
+ ft_data->replay_for_snapshot = 0xFFFFFFFF;
+
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BASE, &ft_data->ib1);
+
+ kgsl_sharedmem_readl(&device->memstore, &ft_data->context_id,
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ current_context));
+
+ kgsl_sharedmem_readl(&device->memstore,
+ &ft_data->global_eop,
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ eoptimestamp));
+
+ /* Ensure context id and global eop ts read complete */
+ rmb();
+
+ ft_data->rb_buffer = vmalloc(rb->buffer_desc.size);
+ if (!ft_data->rb_buffer) {
+ KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
+ rb->buffer_desc.size);
+ return;
+ }
+
+ ft_data->bad_rb_buffer = vmalloc(rb->buffer_desc.size);
+ if (!ft_data->bad_rb_buffer) {
+ KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
+ rb->buffer_desc.size);
+ return;
+ }
+
+ ft_data->good_rb_buffer = vmalloc(rb->buffer_desc.size);
+ if (!ft_data->good_rb_buffer) {
+ KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
+ rb->buffer_desc.size);
+ return;
+ }
+ ft_data->status = 0;
+
+ /* find the start of bad command sequence in rb */
+ context = kgsl_context_get(device, ft_data->context_id);
+
+ ft_data->ft_policy = adreno_dev->ft_policy;
+
+ if (!ft_data->ft_policy)
+ ft_data->ft_policy = KGSL_FT_DEFAULT_POLICY;
+
+ /* Look for the command stream that is right after the global eop */
+ ret = _find_cmd_seq_after_eop_ts(rb, &rb_rptr,
+ ft_data->global_eop + 1, false);
+ if (ret) {
+ ft_data->ft_policy |= KGSL_FT_TEMP_DISABLE;
+ goto done;
+ } else {
+ ft_data->start_of_replay_cmds = rb_rptr;
+ ft_data->ft_policy &= ~KGSL_FT_TEMP_DISABLE;
+ }
+
+ if (context) {
+ adreno_context = ADRENO_CONTEXT(context);
+ if (adreno_context->flags & CTXT_FLAGS_PREAMBLE) {
+ if (ft_data->ib1) {
+ ret = _find_hanging_ib_sequence(rb,
+ &rb_rptr, ft_data->ib1);
+ if (ret) {
+ KGSL_FT_ERR(device,
+ "Start not found for replay IB seq\n");
+ goto done;
+ }
+ ft_data->start_of_replay_cmds = rb_rptr;
+ ft_data->replay_for_snapshot = rb_rptr;
+ }
+ }
+ }
+
+done:
+ kgsl_context_put(context);
+}
+
+static int
+_adreno_check_long_ib(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ unsigned int curr_global_ts = 0;
+
+ /* check if the global ts is still the same */
+ kgsl_sharedmem_readl(&device->memstore,
+ &curr_global_ts,
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ eoptimestamp));
+ /* Ensure above read is finished before long ib check */
+ rmb();
+
+ /* Mark long ib as handled */
+ adreno_dev->long_ib = 0;
+
+ if (curr_global_ts == adreno_dev->long_ib_ts) {
+ KGSL_FT_ERR(device,
+ "IB ran too long, invalidate ctxt\n");
+ return 1;
+ } else {
+ /* Do nothing GPU has gone ahead */
+ KGSL_FT_INFO(device, "false long ib detection return\n");
+ return 0;
+ }
+}
+
+/**
+ * adreno_soft_reset() - Do a soft reset of the GPU hardware
+ * @device: KGSL device to soft reset
+ *
+ * "soft reset" the GPU hardware - this is a fast path GPU reset
+ * The GPU hardware is reset but we never pull power so we can skip
+ * a lot of the standard adreno_stop/adreno_start sequence
+ */
+int adreno_soft_reset(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ int ret;
+
+ /* If the jump table index is 0 soft reset is not supported */
+ if ((!adreno_dev->pm4_jt_idx) || (!adreno_dev->gpudev->soft_reset)) {
+ dev_WARN_ONCE(device->dev, 1, "Soft reset not supported");
+ return -EINVAL;
+ }
+
+ if (adreno_dev->drawctxt_active)
+ kgsl_context_put(&adreno_dev->drawctxt_active->base);
+
+ adreno_dev->drawctxt_active = NULL;
+
+ /* Stop the ringbuffer */
+ adreno_ringbuffer_stop(&adreno_dev->ringbuffer);
+
+ /* Delete the idle timer */
+ del_timer_sync(&device->idle_timer);
+
+ /* Make sure we are totally awake */
+ kgsl_pwrctrl_enable(device);
+
+ /* Reset the GPU */
+ adreno_dev->gpudev->soft_reset(adreno_dev);
+
+ /* Reinitialize the GPU */
+ adreno_dev->gpudev->start(adreno_dev);
+
+ /* Enable IRQ */
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
+ device->ftbl->irqctrl(device, 1);
+
+ /*
+ * Restart the ringbuffer - we can go down the warm start path because
+ * power was never yanked
+ */
+ ret = adreno_ringbuffer_warm_start(&adreno_dev->ringbuffer);
+ if (ret)
+ return ret;
+
+ device->reset_counter++;
+
+ return 0;
+}
+
+static int
+_adreno_ft_restart_device(struct kgsl_device *device,
+ struct kgsl_context *context)
+{
+ /* If device soft reset fails try hard reset */
+ if (adreno_soft_reset(device))
KGSL_DEV_ERR_ONCE(device, "Device soft reset failed\n");
+ else
+ /* Soft reset is successful */
+ goto reset_done;
- /* If it failed, then pull the power */
- ret = adreno_stop(device);
- if (ret)
- return ret;
+ /* restart device */
+ if (adreno_stop(device)) {
+ KGSL_FT_ERR(device, "Device stop failed\n");
+ return 1;
+ }
- ret = adreno_start(device);
+ if (adreno_init(device)) {
+ KGSL_FT_ERR(device, "Device init failed\n");
+ return 1;
+ }
- if (ret)
- return ret;
+ if (adreno_start(device)) {
+ KGSL_FT_ERR(device, "Device start failed\n");
+ return 1;
+ }
+
+reset_done:
+ if (context)
+ kgsl_mmu_setstate(&device->mmu, context->pagetable,
+ KGSL_MEMSTORE_GLOBAL);
+
+ /* If iommu is used then we need to make sure that the iommu clocks
+ * are on since there could be commands in pipeline that touch iommu */
+ if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
+ if (kgsl_mmu_enable_clk(&device->mmu,
+ KGSL_IOMMU_CONTEXT_USER))
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline void
+_adreno_debug_ft_info(struct kgsl_device *device,
+ struct adreno_ft_data *ft_data)
+{
+
+ /*
+ * Dumping rb is a very useful tool to debug FT.
+ * It will tell us if we are extracting the rb correctly
+ * NOP'ing the right IB, skipping the EOF correctly etc.
+ */
+ if (device->ft_log >= 7) {
+
+ /* Print fault tolerance data here */
+ KGSL_FT_INFO(device, "Temp RB buffer size 0x%X\n",
+ ft_data->rb_size);
+ adreno_dump_rb(device, ft_data->rb_buffer,
+ ft_data->rb_size<<2, 0, ft_data->rb_size);
+
+ KGSL_FT_INFO(device, "Bad RB buffer size 0x%X\n",
+ ft_data->bad_rb_size);
+ adreno_dump_rb(device, ft_data->bad_rb_buffer,
+ ft_data->bad_rb_size<<2, 0, ft_data->bad_rb_size);
+
+ KGSL_FT_INFO(device, "Good RB buffer size 0x%X\n",
+ ft_data->good_rb_size);
+ adreno_dump_rb(device, ft_data->good_rb_buffer,
+ ft_data->good_rb_size<<2, 0, ft_data->good_rb_size);
+
+ }
+}
+
+static int
+_adreno_ft_resubmit_rb(struct kgsl_device *device,
+ struct adreno_ringbuffer *rb,
+ struct kgsl_context *context,
+ struct adreno_ft_data *ft_data,
+ unsigned int *buff, unsigned int size)
+{
+ unsigned int ret = 0;
+ unsigned int retry_num = 0;
+
+ _adreno_debug_ft_info(device, ft_data);
+
+ do {
+ ret = _adreno_ft_restart_device(device, context);
+ if (ret == 0)
+ break;
+ /*
+ * If device restart fails sleep for 20ms before
+ * attempting restart. This allows GPU HW to settle
+ * and improve the chances of next restart to be
+ * successful.
+ */
+ msleep(20);
+ KGSL_FT_ERR(device, "Retry device restart %d\n", retry_num);
+ retry_num++;
+ } while (retry_num < 4);
+
+ if (ret) {
+ KGSL_FT_ERR(device, "Device restart failed\n");
+ BUG_ON(1);
+ goto done;
+ }
+
+ if (size) {
+
+ /* submit commands and wait for them to pass */
+ adreno_ringbuffer_restore(rb, buff, size);
+
+ ret = adreno_idle(device);
+ }
+
+done:
+ return ret;
+}
+
+
+static int
+_adreno_ft(struct kgsl_device *device,
+ struct adreno_ft_data *ft_data)
+{
+ int ret = 0, i;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+ struct kgsl_context *context;
+ struct adreno_context *adreno_context = NULL;
+ struct adreno_context *last_active_ctx = adreno_dev->drawctxt_active;
+ unsigned int long_ib = 0;
+ static int no_context_ft;
+ struct kgsl_mmu *mmu = &device->mmu;
+
+ context = kgsl_context_get(device, ft_data->context_id);
+
+ if (context == NULL) {
+ KGSL_FT_ERR(device, "Last context unknown id:%d\n",
+ ft_data->context_id);
+ if (no_context_ft) {
+ /*
+ * If 2 consecutive no context ft occurred then
+ * just reset GPU
+ */
+ no_context_ft = 0;
+ goto play_good_cmds;
+ }
+ } else {
+ no_context_ft = 0;
+ adreno_context = ADRENO_CONTEXT(context);
+ adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
+ /*
+ * set the invalid ts flag to 0 for this context since we have
+ * detected a hang for it
+ */
+ context->wait_on_invalid_ts = false;
+
+ if (!(adreno_context->flags & CTXT_FLAGS_PER_CONTEXT_TS)) {
+ ft_data->status = 1;
+ KGSL_FT_ERR(device, "Fault tolerance not supported\n");
+ goto play_good_cmds;
+ }
+
+ /*
+ * This flag will be set by userspace for contexts
+ * that do not want to be fault tolerant (ex: OPENCL)
+ */
+ if (adreno_context->flags & CTXT_FLAGS_NO_FAULT_TOLERANCE) {
+ ft_data->status = 1;
+ KGSL_FT_ERR(device,
+ "No FT set for this context play good cmds\n");
+ goto play_good_cmds;
+ }
+
+ }
+
+ /* Check if we detected a long running IB, if false return */
+ if ((adreno_context) && (adreno_dev->long_ib)) {
+ long_ib = _adreno_check_long_ib(device);
+ if (!long_ib) {
+ adreno_context->flags &= ~CTXT_FLAGS_GPU_HANG;
+ return 0;
+ }
}
/*
- * If active_cnt is non-zero then the system was active before
- * going into a reset - put it back in that state
+ * Extract valid contents from rb which can still be executed after
+ * hang
*/
+ adreno_ringbuffer_extract(rb, ft_data);
- if (atomic_read(&device->active_cnt))
- kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
+ /* If long IB detected do not attempt replay of bad cmds */
+ if (long_ib) {
+ ft_data->status = 1;
+ _adreno_debug_ft_info(device, ft_data);
+ goto play_good_cmds;
+ }
- /* Set the page table back to the default page table */
- kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
- KGSL_MEMSTORE_GLOBAL);
+ if ((ft_data->ft_policy & KGSL_FT_DISABLE) ||
+ (ft_data->ft_policy & KGSL_FT_TEMP_DISABLE)) {
+ KGSL_FT_ERR(device, "NO FT policy play only good cmds\n");
+ ft_data->status = 1;
+ goto play_good_cmds;
+ }
+ /* Do not try to replay if hang is due to a pagefault */
+ if (context && test_bit(KGSL_CONTEXT_PAGEFAULT, &context->priv)) {
+ /* Resume MMU */
+ mmu->mmu_ops->mmu_pagefault_resume(mmu);
+ if ((ft_data->context_id == context->id) &&
+ (ft_data->global_eop == context->pagefault_ts)) {
+ ft_data->ft_policy &= ~KGSL_FT_REPLAY;
+ KGSL_FT_ERR(device, "MMU fault skipping replay\n");
+ }
+ clear_bit(KGSL_CONTEXT_PAGEFAULT, &context->priv);
+ }
+
+ if (ft_data->ft_policy & KGSL_FT_REPLAY) {
+ ret = _adreno_ft_resubmit_rb(device, rb, context, ft_data,
+ ft_data->bad_rb_buffer, ft_data->bad_rb_size);
+
+ if (ret) {
+ KGSL_FT_ERR(device, "Replay status: 1\n");
+ ft_data->status = 1;
+ } else
+ goto play_good_cmds;
+ }
+
+ if (ft_data->ft_policy & KGSL_FT_SKIPIB) {
+ for (i = 0; i < ft_data->bad_rb_size; i++) {
+ if ((ft_data->bad_rb_buffer[i] ==
+ CP_HDR_INDIRECT_BUFFER_PFD) &&
+ (ft_data->bad_rb_buffer[i+1] == ft_data->ib1)) {
+
+ ft_data->bad_rb_buffer[i] = cp_nop_packet(2);
+ ft_data->bad_rb_buffer[i+1] =
+ KGSL_NOP_IB_IDENTIFIER;
+ ft_data->bad_rb_buffer[i+2] =
+ KGSL_NOP_IB_IDENTIFIER;
+ break;
+ }
+ }
+
+ if ((i == (ft_data->bad_rb_size)) || (!ft_data->ib1)) {
+ KGSL_FT_ERR(device, "Bad IB to NOP not found\n");
+ ft_data->status = 1;
+ goto play_good_cmds;
+ }
+
+ ret = _adreno_ft_resubmit_rb(device, rb, context, ft_data,
+ ft_data->bad_rb_buffer, ft_data->bad_rb_size);
+
+ if (ret) {
+ KGSL_FT_ERR(device, "NOP faulty IB status: 1\n");
+ ft_data->status = 1;
+ } else {
+ ft_data->status = 0;
+ goto play_good_cmds;
+ }
+ }
+
+ if (ft_data->ft_policy & KGSL_FT_SKIPFRAME) {
+ for (i = 0; i < ft_data->bad_rb_size; i++) {
+ if (ft_data->bad_rb_buffer[i] ==
+ KGSL_END_OF_FRAME_IDENTIFIER) {
+ ft_data->bad_rb_buffer[0] = cp_nop_packet(i);
+ break;
+ }
+ }
+
+ /* EOF not found in RB, discard till EOF in
+ next IB submission */
+ if (adreno_context && (i == ft_data->bad_rb_size)) {
+ adreno_context->flags |= CTXT_FLAGS_SKIP_EOF;
+ KGSL_FT_INFO(device,
+ "EOF not found in RB, skip next issueib till EOF\n");
+ ft_data->bad_rb_buffer[0] = cp_nop_packet(i);
+ }
+
+ ret = _adreno_ft_resubmit_rb(device, rb, context, ft_data,
+ ft_data->bad_rb_buffer, ft_data->bad_rb_size);
+
+ if (ret) {
+ KGSL_FT_ERR(device, "Skip EOF status: 1\n");
+ ft_data->status = 1;
+ } else {
+ ft_data->status = 0;
+ goto play_good_cmds;
+ }
+ }
+
+play_good_cmds:
+
+ if (ft_data->status)
+ KGSL_FT_ERR(device, "Bad context commands failed\n");
+ else {
+ KGSL_FT_INFO(device, "Bad context commands success\n");
+
+ if (adreno_context) {
+ adreno_context->flags = (adreno_context->flags &
+ ~CTXT_FLAGS_GPU_HANG) | CTXT_FLAGS_GPU_HANG_FT;
+ }
+
+ if (last_active_ctx)
+ _kgsl_context_get(&last_active_ctx->base);
+
+ adreno_dev->drawctxt_active = last_active_ctx;
+ }
+
+ ret = _adreno_ft_resubmit_rb(device, rb, context, ft_data,
+ ft_data->good_rb_buffer, ft_data->good_rb_size);
+
+ if (ret) {
+ /*
+ * If we fail here we can try to invalidate another
+ * context and try fault tolerance again, although
+ * we will only try ft with no context once to avoid
+ * going into continuous loop of trying ft with no context
+ */
+ if (!context)
+ no_context_ft = 1;
+ ret = -EAGAIN;
+ KGSL_FT_ERR(device, "Playing good commands unsuccessful\n");
+ goto done;
+ } else
+ KGSL_FT_INFO(device, "Playing good commands successful\n");
+
+ /* ringbuffer now has data from the last valid context id,
+ * so restore the active_ctx to the last valid context */
+ if (ft_data->last_valid_ctx_id) {
+ struct kgsl_context *last_ctx = kgsl_context_get(device,
+ ft_data->last_valid_ctx_id);
+
+ adreno_dev->drawctxt_active = ADRENO_CONTEXT(last_ctx);
+ }
+
+done:
+ /* Turn off iommu clocks */
+ if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
+ kgsl_mmu_disable_clk_on_ts(&device->mmu, 0, false);
+
+ kgsl_context_put(context);
return ret;
}
+static int
+adreno_ft(struct kgsl_device *device,
+ struct adreno_ft_data *ft_data)
+{
+ int ret = 0;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+
+ /*
+ * If GPU FT is turned off do not run FT.
+ * If GPU stall detection is suspected to be false,
+ * we can use this option to confirm stall detection.
+ */
+ if (ft_data->ft_policy & KGSL_FT_OFF) {
+ KGSL_FT_ERR(device, "GPU FT turned off\n");
+ return 0;
+ }
+
+ KGSL_FT_INFO(device,
+ "Start Parameters: IB1: 0x%X, "
+ "Bad context_id: %u, global_eop: 0x%x\n",
+ ft_data->ib1, ft_data->context_id, ft_data->global_eop);
+
+ KGSL_FT_INFO(device, "Last issued global timestamp: %x\n",
+ rb->global_ts);
+
+ /* We may need to replay commands multiple times based on whether
+ * multiple contexts hang the GPU */
+ while (true) {
+
+ ret = _adreno_ft(device, ft_data);
+
+ if (-EAGAIN == ret) {
+ /* setup new fault tolerance parameters and retry, this
+ * means more than 1 contexts are causing hang */
+ adreno_destroy_ft_data(ft_data);
+ adreno_setup_ft_data(device, ft_data);
+ KGSL_FT_INFO(device,
+ "Retry. Parameters: "
+ "IB1: 0x%X, Bad context_id: %u, global_eop: 0x%x\n",
+ ft_data->ib1, ft_data->context_id,
+ ft_data->global_eop);
+ } else {
+ break;
+ }
+ }
+
+ if (ret)
+ goto done;
+
+ /* Restore correct states after fault tolerance */
+ if (adreno_dev->drawctxt_active)
+ device->mmu.hwpagetable =
+ adreno_dev->drawctxt_active->base.pagetable;
+ else
+ device->mmu.hwpagetable = device->mmu.defaultpagetable;
+ kgsl_sharedmem_writel(device, &device->memstore,
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ eoptimestamp), rb->global_ts);
+
+ /* switch to NULL ctxt */
+ if (adreno_dev->drawctxt_active != NULL)
+ adreno_drawctxt_switch(adreno_dev, NULL, 0);
+
+done:
+ adreno_set_max_ts_for_bad_ctxs(device);
+ adreno_mark_context_status(device, ret);
+ KGSL_FT_ERR(device, "policy 0x%X status 0x%x\n",
+ ft_data->ft_policy, ret);
+ return ret;
+}
+
+int
+adreno_dump_and_exec_ft(struct kgsl_device *device)
+{
+ int result = -ETIMEDOUT;
+ struct adreno_ft_data ft_data;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ unsigned int curr_pwrlevel;
+
+ if (device->state == KGSL_STATE_HUNG)
+ goto done;
+ if (device->state == KGSL_STATE_DUMP_AND_FT) {
+ mutex_unlock(&device->mutex);
+ wait_for_completion(&device->ft_gate);
+ mutex_lock(&device->mutex);
+ if (device->state != KGSL_STATE_HUNG)
+ result = 0;
+ } else {
+ /*
+ * While fault tolerance is happening we do not want the
+ * idle_timer to fire and attempt to change any device state
+ */
+ del_timer_sync(&device->idle_timer);
+
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_DUMP_AND_FT);
+ INIT_COMPLETION(device->ft_gate);
+ /* Detected a hang */
+
+ kgsl_cffdump_hang(device);
+ /* Run fault tolerance at max power level */
+ curr_pwrlevel = pwr->active_pwrlevel;
+ kgsl_pwrctrl_pwrlevel_change(device, pwr->max_pwrlevel);
+
+ /* Get the fault tolerance data as soon as hang is detected */
+ adreno_setup_ft_data(device, &ft_data);
+
+ /*
+ * If long ib is detected, do not attempt postmortem or
+ * snapshot, if GPU is still executing commands
+ * we will get errors
+ */
+ if (!adreno_dev->long_ib) {
+ /*
+ * Trigger an automatic dump of the state to
+ * the console
+ */
+ kgsl_postmortem_dump(device, 0);
+
+ /*
+ * Make a GPU snapshot. For now, do it after the
+ * PM dump so we can at least be sure the PM dump
+ * will work as it always has
+ */
+ kgsl_device_snapshot(device, 1);
+ }
+
+ result = adreno_ft(device, &ft_data);
+ adreno_destroy_ft_data(&ft_data);
+
+ /* restore power level */
+ kgsl_pwrctrl_pwrlevel_change(device, curr_pwrlevel);
+
+ if (result) {
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_HUNG);
+ } else {
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
+ mod_timer(&device->hang_timer,
+ (jiffies +
+ msecs_to_jiffies(KGSL_TIMEOUT_PART)));
+ }
+ complete_all(&device->ft_gate);
+ }
+done:
+ return result;
+}
+EXPORT_SYMBOL(adreno_dump_and_exec_ft);
+
/**
* _ft_sysfs_store() - Common routine to write to FT sysfs files
* @buf: value to write
@@ -2248,185 +3119,140 @@
return status;
}
-/**
- * adreno_hw_isidle() - Check if the GPU core is idle
- * @device: Pointer to the KGSL device structure for the GPU
- *
- * Return true if the RBBM status register for the GPU type indicates that the
- * hardware is idle
- */
-static bool adreno_hw_isidle(struct kgsl_device *device)
-{
- unsigned int reg_rbbm_status;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-
- /* Don't consider ourselves idle if there is an IRQ pending */
- if (adreno_dev->gpudev->irq_pending(adreno_dev))
- return false;
-
- adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS,
- ®_rbbm_status);
-
- if (adreno_is_a2xx(adreno_dev)) {
- if (reg_rbbm_status == 0x110)
- return true;
- } else if (adreno_is_a3xx(adreno_dev)) {
- if (!(reg_rbbm_status & 0x80000000))
- return true;
- }
-
- return false;
-}
-
-/**
- * adreno_soft_reset() - Do a soft reset of the GPU hardware
- * @device: KGSL device to soft reset
- *
- * "soft reset" the GPU hardware - this is a fast path GPU reset
- * The GPU hardware is reset but we never pull power so we can skip
- * a lot of the standard adreno_stop/adreno_start sequence
- */
-int adreno_soft_reset(struct kgsl_device *device)
+static int adreno_ringbuffer_drain(struct kgsl_device *device,
+ unsigned int *regs)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- int ret;
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+ unsigned long wait = jiffies;
+ unsigned long timeout = jiffies + msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
+ unsigned int rptr;
- if (!adreno_dev->gpudev->soft_reset) {
- dev_WARN_ONCE(device->dev, 1, "Soft reset not supported");
- return -EINVAL;
- }
+ do {
+ /*
+ * Wait is "jiffies" first time in the loop to start
+ * GPU stall detection immediately.
+ */
+ if (time_after(jiffies, wait)) {
+ /* Check to see if the core is hung */
+ if (adreno_ft_detect(device, regs))
+ return -ETIMEDOUT;
- if (adreno_dev->drawctxt_active)
- kgsl_context_put(&adreno_dev->drawctxt_active->base);
-
- adreno_dev->drawctxt_active = NULL;
-
- /* Stop the ringbuffer */
- adreno_ringbuffer_stop(&adreno_dev->ringbuffer);
-
- if (kgsl_pwrctrl_isenabled(device))
- device->ftbl->irqctrl(device, 0);
-
- kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
-
- adreno_set_gpu_fault(adreno_dev, 0);
-
- /* Delete the idle timer */
- del_timer_sync(&device->idle_timer);
-
- /* Make sure we are totally awake */
- kgsl_pwrctrl_enable(device);
-
- /* Reset the GPU */
- adreno_dev->gpudev->soft_reset(adreno_dev);
-
- /* Reinitialize the GPU */
- adreno_dev->gpudev->start(adreno_dev);
-
- /* Enable IRQ */
- kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
- device->ftbl->irqctrl(device, 1);
-
- /*
- * If we have offsets for the jump tables we can try to do a warm start,
- * otherwise do a full ringbuffer restart
- */
-
- if (adreno_dev->pm4_jt_idx)
- ret = adreno_ringbuffer_warm_start(&adreno_dev->ringbuffer);
- else
- ret = adreno_ringbuffer_start(&adreno_dev->ringbuffer);
-
- if (ret)
- return ret;
-
- device->reset_counter++;
+ wait = jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART);
+ }
+ rptr = adreno_get_rptr(rb);
+ if (time_after(jiffies, timeout)) {
+ KGSL_DRV_ERR(device, "rptr: %x, wptr: %x\n",
+ rptr, rb->wptr);
+ return -ETIMEDOUT;
+ }
+ } while (rptr != rb->wptr);
return 0;
}
-/*
- * adreno_isidle() - return true if the GPU hardware is idle
- * @device: Pointer to the KGSL device structure for the GPU
- *
- * Return true if the GPU hardware is idle and there are no commands pending in
- * the ringbuffer
- */
-bool adreno_isidle(struct kgsl_device *device)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- unsigned int rptr;
-
- if (!kgsl_pwrctrl_isenabled(device))
- return true;
-
- rptr = adreno_get_rptr(&adreno_dev->ringbuffer);
-
- if (rptr == adreno_dev->ringbuffer.wptr)
- return adreno_hw_isidle(device);
-
- return false;
-}
-
-/**
- * adreno_idle() - wait for the GPU hardware to go idle
- * @device: Pointer to the KGSL device structure for the GPU
- *
- * Wait up to ADRENO_IDLE_TIMEOUT milliseconds for the GPU hardware to go quiet.
- */
-
+/* Caller must hold the device mutex. */
int adreno_idle(struct kgsl_device *device)
{
+ unsigned long wait_time;
+ unsigned long wait_time_part;
+ unsigned int prev_reg_val[FT_DETECT_REGS_COUNT];
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- unsigned long wait = jiffies + msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
- /*
- * Make sure the device mutex is held so the dispatcher can't send any
- * more commands to the hardware
- */
+ memset(prev_reg_val, 0, sizeof(prev_reg_val));
- BUG_ON(!mutex_is_locked(&device->mutex));
+ kgsl_cffdump_regpoll(device,
+ adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS) << 2,
+ 0x00000000, 0x80000000);
- if (adreno_is_a3xx(adreno_dev))
- kgsl_cffdump_regpoll(device,
- adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS) << 2,
- 0x00000000, 0x80000000);
- else
- kgsl_cffdump_regpoll(device,
- adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS) << 2,
- 0x110, 0x110);
+retry:
+ /* First, wait for the ringbuffer to drain */
+ if (adreno_ringbuffer_drain(device, prev_reg_val))
+ goto err;
- while (time_before(jiffies, wait)) {
- /*
- * If we fault, stop waiting and return an error. The dispatcher
- * will clean up the fault from the work queue, but we need to
- * make sure we don't block it by waiting for an idle that
- * will never come.
- */
+ /* now, wait for the GPU to finish its operations */
+ wait_time = jiffies + msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
+ wait_time_part = jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART);
- if (adreno_gpu_fault(adreno_dev) != 0)
- return -EDEADLK;
-
+ while (time_before(jiffies, wait_time)) {
if (adreno_isidle(device))
return 0;
+
+ /* Dont wait for timeout, detect hang faster. */
+ if (time_after(jiffies, wait_time_part)) {
+ wait_time_part = jiffies +
+ msecs_to_jiffies(KGSL_TIMEOUT_PART);
+ if ((adreno_ft_detect(device, prev_reg_val)))
+ goto err;
+ }
+
}
+err:
+ KGSL_DRV_ERR(device, "spun too long waiting for RB to idle\n");
+ if (KGSL_STATE_DUMP_AND_FT != device->state &&
+ !adreno_dump_and_exec_ft(device)) {
+ wait_time = jiffies + ADRENO_IDLE_TIMEOUT;
+ goto retry;
+ }
return -ETIMEDOUT;
}
/**
- * adreno_drain() - Drain the dispatch queue
- * @device: Pointer to the KGSL device structure for the GPU
- *
- * Tell the dispatcher to pause - this has the effect of draining the inflight
- * command batches
+ * is_adreno_rbbm_status_idle - Check if GPU core is idle by probing
+ * rbbm_status register
+ * @device - Pointer to the GPU device whose idle status is to be
+ * checked
+ * @returns - Returns whether the core is idle (based on rbbm_status)
+ * false if the core is active, true if the core is idle
*/
-static int adreno_drain(struct kgsl_device *device)
+static bool is_adreno_rbbm_status_idle(struct kgsl_device *device)
{
+ unsigned int reg_rbbm_status;
+ bool status = false;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- adreno_dispatcher_pause(adreno_dev);
- return 0;
+ /* Is the core idle? */
+ adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS,
+ ®_rbbm_status);
+
+ if (adreno_is_a2xx(adreno_dev)) {
+ if (reg_rbbm_status == 0x110)
+ status = true;
+ } else {
+ if (!(reg_rbbm_status & 0x80000000))
+ status = true;
+ }
+ return status;
+}
+
+static unsigned int adreno_isidle(struct kgsl_device *device)
+{
+ int status = false;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+
+ /* If the device isn't active, don't force it on. */
+ if (kgsl_pwrctrl_isenabled(device)) {
+ /* Is the ring buffer is empty? */
+ unsigned int rptr = adreno_get_rptr(rb);
+ if (rptr == rb->wptr) {
+ /*
+ * Are there interrupts pending? If so then pretend we
+ * are not idle - this avoids the possiblity that we go
+ * to a lower power state without handling interrupts
+ * first.
+ */
+
+ if (!adreno_dev->gpudev->irq_pending(adreno_dev)) {
+ /* Is the core idle? */
+ status = is_adreno_rbbm_status_idle(device);
+ }
+ }
+ } else {
+ status = true;
+ }
+ return status;
}
/* Caller must hold the device mutex. */
@@ -2597,6 +3423,342 @@
__raw_writel(value, reg);
}
+static unsigned int _get_context_id(struct kgsl_context *k_ctxt)
+{
+ unsigned int context_id = KGSL_MEMSTORE_GLOBAL;
+
+ if (k_ctxt != NULL) {
+ struct adreno_context *a_ctxt = ADRENO_CONTEXT(k_ctxt);
+ if (kgsl_context_detached(k_ctxt))
+ context_id = KGSL_CONTEXT_INVALID;
+ else if (a_ctxt->flags & CTXT_FLAGS_PER_CONTEXT_TS)
+ context_id = k_ctxt->id;
+ }
+
+ return context_id;
+}
+
+static unsigned int adreno_check_hw_ts(struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int timestamp)
+{
+ int status = 0;
+ unsigned int ref_ts, enableflag;
+ unsigned int context_id = _get_context_id(context);
+
+ /*
+ * If the context ID is invalid, we are in a race with
+ * the context being destroyed by userspace so bail.
+ */
+ if (context_id == KGSL_CONTEXT_INVALID) {
+ KGSL_DRV_WARN(device, "context was detached");
+ return -EINVAL;
+ }
+
+ status = kgsl_check_timestamp(device, context, timestamp);
+ if (status)
+ return status;
+
+ kgsl_sharedmem_readl(&device->memstore, &enableflag,
+ KGSL_MEMSTORE_OFFSET(context_id, ts_cmp_enable));
+ /*
+ * Barrier is needed here to make sure the read from memstore
+ * has posted
+ */
+
+ mb();
+
+ if (enableflag) {
+ kgsl_sharedmem_readl(&device->memstore, &ref_ts,
+ KGSL_MEMSTORE_OFFSET(context_id,
+ ref_wait_ts));
+
+ /* Make sure the memstore read has posted */
+ mb();
+ if (timestamp_cmp(ref_ts, timestamp) >= 0) {
+ kgsl_sharedmem_writel(device, &device->memstore,
+ KGSL_MEMSTORE_OFFSET(context_id,
+ ref_wait_ts), timestamp);
+ /* Make sure the memstore write is posted */
+ wmb();
+ }
+ } else {
+ kgsl_sharedmem_writel(device, &device->memstore,
+ KGSL_MEMSTORE_OFFSET(context_id,
+ ref_wait_ts), timestamp);
+ enableflag = 1;
+ kgsl_sharedmem_writel(device, &device->memstore,
+ KGSL_MEMSTORE_OFFSET(context_id,
+ ts_cmp_enable), enableflag);
+
+ /* Make sure the memstore write gets posted */
+ wmb();
+
+ /*
+ * submit a dummy packet so that even if all
+ * commands upto timestamp get executed we will still
+ * get an interrupt
+ */
+
+ if (context && device->state != KGSL_STATE_SLUMBER) {
+ adreno_ringbuffer_issuecmds(device,
+ ADRENO_CONTEXT(context),
+ KGSL_CMD_FLAGS_GET_INT, NULL, 0);
+ }
+ }
+
+ return 0;
+}
+
+/* Return 1 if the event timestmp has already passed, 0 if it was marked */
+static int adreno_next_event(struct kgsl_device *device,
+ struct kgsl_event *event)
+{
+ return adreno_check_hw_ts(device, event->context, event->timestamp);
+}
+
+static int adreno_check_interrupt_timestamp(struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int timestamp)
+{
+ int status;
+
+ mutex_lock(&device->mutex);
+ status = adreno_check_hw_ts(device, context, timestamp);
+ mutex_unlock(&device->mutex);
+
+ return status;
+}
+
+/*
+ wait_event_interruptible_timeout checks for the exit condition before
+ placing a process in wait q. For conditional interrupts we expect the
+ process to already be in its wait q when its exit condition checking
+ function is called.
+*/
+#define kgsl_wait_event_interruptible_timeout(wq, condition, timeout, io)\
+({ \
+ long __ret = timeout; \
+ if (io) \
+ __wait_io_event_interruptible_timeout(wq, condition, __ret);\
+ else \
+ __wait_event_interruptible_timeout(wq, condition, __ret);\
+ __ret; \
+})
+
+
+
+unsigned int adreno_ft_detect(struct kgsl_device *device,
+ unsigned int *prev_reg_val)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+ unsigned int curr_reg_val[FT_DETECT_REGS_COUNT];
+ unsigned int fast_hang_detected = 1;
+ unsigned int long_ib_detected = 1;
+ unsigned int i;
+ static unsigned long next_hang_detect_time;
+ static unsigned int prev_global_ts;
+ unsigned int curr_global_ts = 0;
+ unsigned int curr_context_id = 0;
+ static struct adreno_context *curr_context;
+ static struct kgsl_context *context;
+ static char pid_name[TASK_COMM_LEN] = "unknown";
+
+ if (!adreno_dev->fast_hang_detect)
+ fast_hang_detected = 0;
+
+ if (!adreno_dev->long_ib_detect)
+ long_ib_detected = 0;
+
+ if (!(adreno_dev->ringbuffer.flags & KGSL_FLAGS_STARTED))
+ return 0;
+
+ if (is_adreno_rbbm_status_idle(device) &&
+ (kgsl_readtimestamp(device, NULL, KGSL_TIMESTAMP_RETIRED)
+ == rb->global_ts)) {
+
+ /*
+ * On A2XX if the RPTR != WPTR and the device is idle, then
+ * the last write to WPTR probably failed to latch so write it
+ * again
+ */
+
+ if (adreno_is_a2xx(adreno_dev)) {
+ unsigned int rptr;
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR,
+ &rptr);
+ if (rptr != adreno_dev->ringbuffer.wptr)
+ adreno_writereg(adreno_dev,
+ ADRENO_REG_CP_RB_WPTR,
+ adreno_dev->ringbuffer.wptr);
+ }
+
+ return 0;
+ }
+
+ /*
+ * Time interval between hang detection should be KGSL_TIMEOUT_PART
+ * or more, if next hang detection is requested < KGSL_TIMEOUT_PART
+ * from the last time do nothing.
+ */
+ if ((next_hang_detect_time) &&
+ (time_before(jiffies, next_hang_detect_time)))
+ return 0;
+ else
+ next_hang_detect_time = (jiffies +
+ msecs_to_jiffies(KGSL_TIMEOUT_PART-1));
+
+ /* Read the current Hang detect reg values here */
+ for (i = 0; i < FT_DETECT_REGS_COUNT; i++) {
+ if (ft_detect_regs[i] == 0)
+ continue;
+ kgsl_regread(device, ft_detect_regs[i],
+ &curr_reg_val[i]);
+ }
+
+ /* Read the current global timestamp here */
+ kgsl_sharedmem_readl(&device->memstore,
+ &curr_global_ts,
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ eoptimestamp));
+ /* Make sure the memstore read has posted */
+ mb();
+
+ if (curr_global_ts == prev_global_ts) {
+
+ /* If we don't already have a good context, get it. */
+ if (kgsl_context_detached(context)) {
+ kgsl_context_put(context);
+ context = NULL;
+ curr_context = NULL;
+ strlcpy(pid_name, "unknown", sizeof(pid_name));
+
+ kgsl_sharedmem_readl(&device->memstore,
+ &curr_context_id,
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ current_context));
+ /* Make sure the memstore read has posted */
+ mb();
+
+ context = kgsl_context_get(device, curr_context_id);
+ if (context != NULL) {
+ struct task_struct *task;
+ curr_context = ADRENO_CONTEXT(context);
+ curr_context->ib_gpu_time_used = 0;
+ task = find_task_by_vpid(context->pid);
+ if (task)
+ get_task_comm(pid_name, task);
+ } else {
+ KGSL_DRV_ERR(device,
+ "Fault tolerance no context found\n");
+ }
+ }
+ for (i = 0; i < FT_DETECT_REGS_COUNT; i++) {
+ if (curr_reg_val[i] != prev_reg_val[i]) {
+ fast_hang_detected = 0;
+
+ /* Check for long IB here */
+ if ((i >=
+ LONG_IB_DETECT_REG_INDEX_START)
+ &&
+ (i <=
+ LONG_IB_DETECT_REG_INDEX_END))
+ long_ib_detected = 0;
+ }
+ }
+
+ if (fast_hang_detected) {
+ KGSL_FT_ERR(device,
+ "Proc %s, ctxt_id %d ts %d triggered fault tolerance"
+ " on global ts %d\n",
+ pid_name, context ? context->id : 0,
+ (kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED) + 1),
+ curr_global_ts + 1);
+ return 1;
+ }
+
+ if (curr_context != NULL) {
+
+ curr_context->ib_gpu_time_used += KGSL_TIMEOUT_PART;
+ KGSL_FT_INFO(device,
+ "Proc %s used GPU Time %d ms on timestamp 0x%X\n",
+ pid_name, curr_context->ib_gpu_time_used,
+ curr_global_ts+1);
+
+ if ((long_ib_detected) &&
+ (!(curr_context->flags &
+ CTXT_FLAGS_NO_FAULT_TOLERANCE))) {
+ curr_context->ib_gpu_time_used +=
+ KGSL_TIMEOUT_PART;
+ if (curr_context->ib_gpu_time_used >
+ KGSL_TIMEOUT_LONG_IB_DETECTION) {
+ if (adreno_dev->long_ib_ts !=
+ curr_global_ts) {
+ KGSL_FT_ERR(device,
+ "Proc %s, ctxt_id %d ts %d"
+ "used GPU for %d ms long ib "
+ "detected on global ts %d\n",
+ pid_name, context->id,
+ (kgsl_readtimestamp(device,
+ context,
+ KGSL_TIMESTAMP_RETIRED)+1),
+ curr_context->ib_gpu_time_used,
+ curr_global_ts+1);
+ adreno_dev->long_ib = 1;
+ adreno_dev->long_ib_ts =
+ curr_global_ts;
+ curr_context->ib_gpu_time_used =
+ 0;
+ return 1;
+ }
+ }
+ }
+ }
+ } else {
+ /* GPU is moving forward */
+ prev_global_ts = curr_global_ts;
+ kgsl_context_put(context);
+ context = NULL;
+ curr_context = NULL;
+ strlcpy(pid_name, "unknown", sizeof(pid_name));
+ adreno_dev->long_ib = 0;
+ adreno_dev->long_ib_ts = 0;
+ }
+
+
+ /* If hangs are not detected copy the current reg values
+ * to previous values and return no hang */
+ for (i = 0; i < FT_DETECT_REGS_COUNT; i++)
+ prev_reg_val[i] = curr_reg_val[i];
+ return 0;
+}
+
+static int _check_pending_timestamp(struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int timestamp)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ unsigned int context_id = _get_context_id(context);
+ unsigned int ts_issued;
+
+ if (context_id == KGSL_CONTEXT_INVALID)
+ return -EINVAL;
+
+ ts_issued = adreno_context_timestamp(context, &adreno_dev->ringbuffer);
+
+ if (timestamp_cmp(timestamp, ts_issued) <= 0)
+ return 0;
+
+ if (context && !context->wait_on_invalid_ts) {
+ KGSL_DRV_ERR(device, "Cannot wait for invalid ts <%d:0x%x>, last issued ts <%d:0x%x>\n",
+ context_id, timestamp, context_id, ts_issued);
+
+ /* Only print this message once */
+ context->wait_on_invalid_ts = true;
+ }
+
+ return -EINVAL;
+}
+
/**
* adreno_waittimestamp - sleep while waiting for the specified timestamp
* @device - pointer to a KGSL device structure
@@ -2604,35 +3766,147 @@
* @timestamp - GPU timestamp to wait for
* @msecs - amount of time to wait (in milliseconds)
*
- * Wait up to 'msecs' milliseconds for the specified timestamp to expire.
+ * Wait 'msecs' milliseconds for the specified timestamp to expire. Wake up
+ * every KGSL_TIMEOUT_PART milliseconds to check for a device hang and process
+ * one if it happened. Otherwise, spend most of our time in an interruptible
+ * wait for the timestamp interrupt to be processed. This function must be
+ * called with the mutex already held.
*/
static int adreno_waittimestamp(struct kgsl_device *device,
- struct kgsl_context *context,
- unsigned int timestamp,
- unsigned int msecs)
+ struct kgsl_context *context,
+ unsigned int timestamp,
+ unsigned int msecs)
{
- int ret;
- struct adreno_context *drawctxt;
+ static unsigned int io_cnt;
+ struct adreno_context *adreno_ctx = context ? ADRENO_CONTEXT(context) :
+ NULL;
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ unsigned int context_id = _get_context_id(context);
+ unsigned int time_elapsed = 0;
+ unsigned int wait;
+ int ts_compare = 1;
+ int io, ret = -ETIMEDOUT;
- if (context == NULL) {
- /* If they are doing then complain once */
- dev_WARN_ONCE(device->dev, 1,
- "IOCTL_KGSL_DEVICE_WAITTIMESTAMP is deprecated\n");
+ if (context_id == KGSL_CONTEXT_INVALID) {
+ KGSL_DRV_WARN(device, "context was detached");
return -EINVAL;
}
- /* Return -EINVAL if the context has been detached */
- if (kgsl_context_detached(context))
- return -EINVAL;
+ /*
+ * Check to see if the requested timestamp is "newer" then the last
+ * timestamp issued. If it is complain once and return error. Only
+ * print the message once per context so that badly behaving
+ * applications don't spam the logs
+ */
- ret = adreno_drawctxt_wait(ADRENO_DEVICE(device), context,
- timestamp, msecs_to_jiffies(msecs));
+ if (adreno_ctx && !(adreno_ctx->flags & CTXT_FLAGS_USER_GENERATED_TS)) {
+ if (_check_pending_timestamp(device, context, timestamp))
+ return -EINVAL;
- /* If the context got invalidated then return a specific error */
- drawctxt = ADRENO_CONTEXT(context);
+ /* Reset the invalid timestamp flag on a valid wait */
+ context->wait_on_invalid_ts = false;
+ }
- if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
- ret = -EDEADLK;
+ /*
+ * On the first time through the loop only wait 100ms.
+ * this gives enough time for the engine to start moving and oddly
+ * provides better hang detection results than just going the full
+ * KGSL_TIMEOUT_PART right off the bat. The exception to this rule
+ * is if msecs happens to be < 100ms then just use 20ms or the msecs,
+ * whichever is larger because anything less than 20 is unreliable
+ */
+
+ if (msecs == 0 || msecs >= 100)
+ wait = 100;
+ else
+ wait = (msecs > 20) ? msecs : 20;
+
+ do {
+ long status;
+
+ /*
+ * if the timestamp happens while we're not
+ * waiting, there's a chance that an interrupt
+ * will not be generated and thus the timestamp
+ * work needs to be queued.
+ */
+
+ if (kgsl_check_timestamp(device, context, timestamp)) {
+ queue_work(device->work_queue, &device->ts_expired_ws);
+ ret = 0;
+ break;
+ }
+
+ /*
+ * For proper power accounting sometimes we need to call
+ * io_wait_interruptible_timeout and sometimes we need to call
+ * plain old wait_interruptible_timeout. We call the regular
+ * timeout N times out of 100, where N is a number specified by
+ * the current power level
+ */
+
+ io_cnt = (io_cnt + 1) % 100;
+ io = (io_cnt < pwr->pwrlevels[pwr->active_pwrlevel].io_fraction)
+ ? 0 : 1;
+
+ mutex_unlock(&device->mutex);
+
+ /* Wait for a timestamp event */
+ status = kgsl_wait_event_interruptible_timeout(
+ device->wait_queue,
+ adreno_check_interrupt_timestamp(device, context,
+ timestamp), msecs_to_jiffies(wait), io);
+
+ mutex_lock(&device->mutex);
+
+ /*
+ * If status is non zero then either the condition was satisfied
+ * or there was an error. In either event, this is the end of
+ * the line for us
+ */
+
+ if (status != 0) {
+ ret = (status > 0) ? 0 : (int) status;
+ break;
+ }
+ time_elapsed += wait;
+
+ /* If user specified timestamps are being used, wait at least
+ * KGSL_SYNCOBJ_SERVER_TIMEOUT msecs for the user driver to
+ * issue a IB for a timestamp before checking to see if the
+ * current timestamp we are waiting for is valid or not
+ */
+
+ if (ts_compare && (adreno_ctx &&
+ (adreno_ctx->flags & CTXT_FLAGS_USER_GENERATED_TS))) {
+ if (time_elapsed > KGSL_SYNCOBJ_SERVER_TIMEOUT) {
+ ret = _check_pending_timestamp(device, context,
+ timestamp);
+ if (ret)
+ break;
+
+ /* Don't do this check again */
+ ts_compare = 0;
+
+ /*
+ * Reset the invalid timestamp flag on a valid
+ * wait
+ */
+ context->wait_on_invalid_ts = false;
+ }
+ }
+
+ /*
+ * We want to wait the floor of KGSL_TIMEOUT_PART
+ * and (msecs - time_elapsed).
+ */
+
+ if (KGSL_TIMEOUT_PART < (msecs - time_elapsed))
+ wait = KGSL_TIMEOUT_PART;
+ else
+ wait = (msecs - time_elapsed);
+
+ } while (!msecs || time_elapsed < msecs);
return ret;
}
@@ -2641,13 +3915,13 @@
struct kgsl_context *context, enum kgsl_timestamp_type type)
{
unsigned int timestamp = 0;
- unsigned int id = context ? context->id : KGSL_MEMSTORE_GLOBAL;
+ unsigned int context_id = _get_context_id(context);
/*
- * If the context is detached we are in a race with
+ * If the context ID is invalid, we are in a race with
* the context being destroyed by userspace so bail.
*/
- if (context && kgsl_context_detached(context)) {
+ if (context_id == KGSL_CONTEXT_INVALID) {
KGSL_DRV_WARN(device, "context was detached");
return timestamp;
}
@@ -2661,11 +3935,11 @@
}
case KGSL_TIMESTAMP_CONSUMED:
kgsl_sharedmem_readl(&device->memstore, ×tamp,
- KGSL_MEMSTORE_OFFSET(id, soptimestamp));
+ KGSL_MEMSTORE_OFFSET(context_id, soptimestamp));
break;
case KGSL_TIMESTAMP_RETIRED:
kgsl_sharedmem_readl(&device->memstore, ×tamp,
- KGSL_MEMSTORE_OFFSET(id, eoptimestamp));
+ KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp));
break;
}
@@ -2825,7 +4099,6 @@
.gpuid = adreno_gpuid,
.snapshot = adreno_snapshot,
.irq_handler = adreno_irq_handler,
- .drain = adreno_drain,
/* Optional functions */
.setstate = adreno_setstate,
.drawctxt_create = adreno_drawctxt_create,
@@ -2833,7 +4106,7 @@
.drawctxt_destroy = adreno_drawctxt_destroy,
.setproperty = adreno_setproperty,
.postmortem_dump = adreno_dump,
- .drawctxt_sched = adreno_drawctxt_sched,
+ .next_event = adreno_next_event,
};
static struct platform_driver adreno_platform_driver = {
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 3a19a17..72f15e7 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -35,11 +35,12 @@
#define ADRENO_CHIPID_PATCH(_id) ((_id) & 0xFF)
/* Flags to control command packet settings */
-#define KGSL_CMD_FLAGS_NONE 0
-#define KGSL_CMD_FLAGS_PMODE BIT(0)
-#define KGSL_CMD_FLAGS_INTERNAL_ISSUE BIT(1)
-#define KGSL_CMD_FLAGS_WFI BIT(2)
-#define KGSL_CMD_FLAGS_PROFILE BIT(3)
+#define KGSL_CMD_FLAGS_NONE 0x00000000
+#define KGSL_CMD_FLAGS_PMODE 0x00000001
+#define KGSL_CMD_FLAGS_INTERNAL_ISSUE 0x00000002
+#define KGSL_CMD_FLAGS_GET_INT 0x00000004
+#define KGSL_CMD_FLAGS_PROFILE 0x00000008
+#define KGSL_CMD_FLAGS_EOF 0x00000100
/* Command identifiers */
#define KGSL_CONTEXT_TO_MEM_IDENTIFIER 0x2EADBEEF
@@ -95,51 +96,6 @@
TRACE_BUS_CTL,
};
-#define ADRENO_SOFT_FAULT 1
-#define ADRENO_HARD_FAULT 2
-#define ADRENO_TIMEOUT_FAULT 3
-
-/*
- * Maximum size of the dispatcher ringbuffer - the actual inflight size will be
- * smaller then this but this size will allow for a larger range of inflight
- * sizes that can be chosen at runtime
- */
-
-#define ADRENO_DISPATCH_CMDQUEUE_SIZE 128
-
-/**
- * struct adreno_dispatcher - container for the adreno GPU dispatcher
- * @mutex: Mutex to protect the structure
- * @state: Current state of the dispatcher (active or paused)
- * @timer: Timer to monitor the progress of the command batches
- * @inflight: Number of command batch operations pending in the ringbuffer
- * @fault: Non-zero if a fault was detected.
- * @pending: Priority list of contexts waiting to submit command batches
- * @plist_lock: Spin lock to protect the pending queue
- * @cmdqueue: Queue of command batches currently flight
- * @head: pointer to the head of of the cmdqueue. This is the oldest pending
- * operation
- * @tail: pointer to the tail of the cmdqueue. This is the most recently
- * submitted operation
- * @work: work_struct to put the dispatcher in a work queue
- * @kobj: kobject for the dispatcher directory in the device sysfs node
- */
-struct adreno_dispatcher {
- struct mutex mutex;
- unsigned int state;
- struct timer_list timer;
- struct timer_list fault_timer;
- unsigned int inflight;
- atomic_t fault;
- struct plist_head pending;
- spinlock_t plist_lock;
- struct kgsl_cmdbatch *cmdqueue[ADRENO_DISPATCH_CMDQUEUE_SIZE];
- unsigned int head;
- unsigned int tail;
- struct work_struct work;
- struct kobject kobj;
-};
-
struct adreno_gpudev;
struct adreno_device {
@@ -180,7 +136,6 @@
unsigned int ocmem_base;
unsigned int gpu_cycles;
struct adreno_profile profile;
- struct adreno_dispatcher dispatcher;
};
#define PERFCOUNTER_FLAG_NONE 0x0
@@ -311,9 +266,9 @@
/* GPU specific function hooks */
int (*ctxt_create)(struct adreno_device *, struct adreno_context *);
- int (*ctxt_save)(struct adreno_device *, struct adreno_context *);
- int (*ctxt_restore)(struct adreno_device *, struct adreno_context *);
- int (*ctxt_draw_workaround)(struct adreno_device *,
+ void (*ctxt_save)(struct adreno_device *, struct adreno_context *);
+ void (*ctxt_restore)(struct adreno_device *, struct adreno_context *);
+ void (*ctxt_draw_workaround)(struct adreno_device *,
struct adreno_context *);
irqreturn_t (*irq_handler)(struct adreno_device *);
void (*irq_control)(struct adreno_device *, int);
@@ -336,6 +291,46 @@
void (*postmortem_dump)(struct adreno_device *adreno_dev);
};
+/*
+ * struct adreno_ft_data - Structure that contains all information to
+ * perform gpu fault tolerance
+ * @ib1 - IB1 that the GPU was executing when hang happened
+ * @context_id - Context which caused the hang
+ * @global_eop - eoptimestamp at time of hang
+ * @rb_buffer - Buffer that holds the commands from good contexts
+ * @rb_size - Number of valid dwords in rb_buffer
+ * @bad_rb_buffer - Buffer that holds commands from the hanging context
+ * bad_rb_size - Number of valid dwords in bad_rb_buffer
+ * @good_rb_buffer - Buffer that holds commands from good contexts
+ * good_rb_size - Number of valid dwords in good_rb_buffer
+ * @last_valid_ctx_id - The last context from which commands were placed in
+ * ringbuffer before the GPU hung
+ * @step - Current fault tolerance step being executed
+ * @err_code - Fault tolerance error code
+ * @fault - Indicates whether the hang was caused due to a pagefault
+ * @start_of_replay_cmds - Offset in ringbuffer from where commands can be
+ * replayed during fault tolerance
+ * @replay_for_snapshot - Offset in ringbuffer where IB's can be saved for
+ * replaying with snapshot
+ */
+struct adreno_ft_data {
+ unsigned int ib1;
+ unsigned int context_id;
+ unsigned int global_eop;
+ unsigned int *rb_buffer;
+ unsigned int rb_size;
+ unsigned int *bad_rb_buffer;
+ unsigned int bad_rb_size;
+ unsigned int *good_rb_buffer;
+ unsigned int good_rb_size;
+ unsigned int last_valid_ctx_id;
+ unsigned int status;
+ unsigned int ft_policy;
+ unsigned int err_code;
+ unsigned int start_of_replay_cmds;
+ unsigned int replay_for_snapshot;
+};
+
#define FT_DETECT_REGS_COUNT 12
struct log_field {
@@ -344,16 +339,13 @@
};
/* Fault Tolerance policy flags */
-#define KGSL_FT_OFF 0
-#define KGSL_FT_REPLAY 1
-#define KGSL_FT_SKIPIB 2
-#define KGSL_FT_SKIPFRAME 3
-#define KGSL_FT_DISABLE 4
-#define KGSL_FT_TEMP_DISABLE 5
-#define KGSL_FT_DEFAULT_POLICY (BIT(KGSL_FT_REPLAY) + BIT(KGSL_FT_SKIPIB))
-
-/* This internal bit is used to skip the PM dump on replayed command batches */
-#define KGSL_FT_SKIP_PMDUMP 31
+#define KGSL_FT_OFF BIT(0)
+#define KGSL_FT_REPLAY BIT(1)
+#define KGSL_FT_SKIPIB BIT(2)
+#define KGSL_FT_SKIPFRAME BIT(3)
+#define KGSL_FT_DISABLE BIT(4)
+#define KGSL_FT_TEMP_DISABLE BIT(5)
+#define KGSL_FT_DEFAULT_POLICY (KGSL_FT_REPLAY + KGSL_FT_SKIPIB)
/* Pagefault policy flags */
#define KGSL_FT_PAGEFAULT_INT_ENABLE BIT(0)
@@ -363,14 +355,6 @@
#define KGSL_FT_PAGEFAULT_DEFAULT_POLICY (KGSL_FT_PAGEFAULT_INT_ENABLE + \
KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)
-#define ADRENO_FT_TYPES \
- { BIT(KGSL_FT_OFF), "off" }, \
- { BIT(KGSL_FT_REPLAY), "replay" }, \
- { BIT(KGSL_FT_SKIPIB), "skipib" }, \
- { BIT(KGSL_FT_SKIPFRAME), "skipframe" }, \
- { BIT(KGSL_FT_DISABLE), "disable" }, \
- { BIT(KGSL_FT_TEMP_DISABLE), "temp" }
-
extern struct adreno_gpudev adreno_a2xx_gpudev;
extern struct adreno_gpudev adreno_a3xx_gpudev;
@@ -400,7 +384,6 @@
int adreno_coresight_init(struct platform_device *pdev);
int adreno_idle(struct kgsl_device *device);
-bool adreno_isidle(struct kgsl_device *device);
void adreno_shadermem_regread(struct kgsl_device *device,
unsigned int offsetwords,
@@ -427,23 +410,13 @@
void *adreno_snapshot(struct kgsl_device *device, void *snapshot, int *remain,
int hang);
-void adreno_dispatcher_start(struct adreno_device *adreno_dev);
-int adreno_dispatcher_init(struct adreno_device *adreno_dev);
-void adreno_dispatcher_close(struct adreno_device *adreno_dev);
-int adreno_dispatcher_idle(struct adreno_device *adreno_dev,
- unsigned int timeout);
-void adreno_dispatcher_irq_fault(struct kgsl_device *device);
-void adreno_dispatcher_stop(struct adreno_device *adreno_dev);
+int adreno_dump_and_exec_ft(struct kgsl_device *device);
-int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch,
- uint32_t *timestamp);
+void adreno_dump_rb(struct kgsl_device *device, const void *buf,
+ size_t len, int start, int size);
-void adreno_dispatcher_schedule(struct kgsl_device *device);
-void adreno_dispatcher_pause(struct adreno_device *adreno_dev);
-void adreno_dispatcher_queue_context(struct kgsl_device *device,
- struct adreno_context *drawctxt);
-int adreno_reset(struct kgsl_device *device);
+unsigned int adreno_ft_detect(struct kgsl_device *device,
+ unsigned int *prev_reg_val);
int adreno_ft_init_sysfs(struct kgsl_device *device);
void adreno_ft_uninit_sysfs(struct kgsl_device *device);
@@ -560,7 +533,9 @@
{
if (k_ctxt) {
struct adreno_context *a_ctxt = ADRENO_CONTEXT(k_ctxt);
- return a_ctxt->timestamp;
+
+ if (a_ctxt->flags & CTXT_FLAGS_PER_CONTEXT_TS)
+ return a_ctxt->timestamp;
}
return rb->global_ts;
}
@@ -759,31 +734,4 @@
return ADRENO_REG_REGISTER_MAX;
return adreno_dev->gpudev->reg_offsets->offsets[offset_name];
}
-
-/**
- * adreno_gpu_fault() - Return the current state of the GPU
- * @adreno_dev: A ponter to the adreno_device to query
- *
- * Return 0 if there is no fault or positive with the last type of fault that
- * occurred
- */
-static inline unsigned int adreno_gpu_fault(struct adreno_device *adreno_dev)
-{
- smp_rmb();
- return atomic_read(&adreno_dev->dispatcher.fault);
-}
-
-/**
- * adreno_set_gpu_fault() - Set the current fault status of the GPU
- * @adreno_dev: A pointer to the adreno_device to set
- * @state: fault state to set
- *
- */
-static inline void adreno_set_gpu_fault(struct adreno_device *adreno_dev,
- int state)
-{
- atomic_set(&adreno_dev->dispatcher.fault, state);
- smp_wmb();
-}
-
#endif /*__ADRENO_H */
diff --git a/drivers/gpu/msm/adreno_a2xx.c b/drivers/gpu/msm/adreno_a2xx.c
index cce4f91..3d72c5c 100644
--- a/drivers/gpu/msm/adreno_a2xx.c
+++ b/drivers/gpu/msm/adreno_a2xx.c
@@ -1451,7 +1451,7 @@
return ret;
}
-static int a2xx_drawctxt_draw_workaround(struct adreno_device *adreno_dev,
+static void a2xx_drawctxt_draw_workaround(struct adreno_device *adreno_dev,
struct adreno_context *context)
{
struct kgsl_device *device = &adreno_dev->dev;
@@ -1468,7 +1468,7 @@
ADRENO_NUM_CTX_SWITCH_ALLOWED_BEFORE_DRAW)
adreno_dev->gpudev->ctx_switches_since_last_draw = 0;
else
- return 0;
+ return;
/*
* Issue an empty draw call to avoid possible hangs due to
* repeated idles without intervening draw calls.
@@ -1499,46 +1499,41 @@
| adreno_dev->pix_shader_start;
}
- return adreno_ringbuffer_issuecmds(device, context,
- KGSL_CMD_FLAGS_PMODE, &cmd[0], cmds - cmd);
+ adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_PMODE,
+ &cmd[0], cmds - cmd);
}
-static int a2xx_drawctxt_save(struct adreno_device *adreno_dev,
+static void a2xx_drawctxt_save(struct adreno_device *adreno_dev,
struct adreno_context *context)
{
struct kgsl_device *device = &adreno_dev->dev;
- int ret;
if (context == NULL || (context->flags & CTXT_FLAGS_BEING_DESTROYED))
- return 0;
+ return;
- if (context->state == ADRENO_CONTEXT_STATE_INVALID)
- return 0;
+ if (context->flags & CTXT_FLAGS_GPU_HANG)
+ KGSL_CTXT_WARN(device,
+ "Current active context has caused gpu hang\n");
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
kgsl_cffdump_syncmem(context->base.device, &context->gpustate,
context->reg_save[1],
context->reg_save[2] << 2, true);
/* save registers and constants. */
- ret = adreno_ringbuffer_issuecmds(device, context,
+ adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->reg_save, 3);
- if (ret)
- return ret;
-
if (context->flags & CTXT_FLAGS_SHADER_SAVE) {
kgsl_cffdump_syncmem(context->base.device,
&context->gpustate,
context->shader_save[1],
context->shader_save[2] << 2, true);
/* save shader partitioning and instructions. */
- ret = adreno_ringbuffer_issuecmds(device, context,
+ adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_PMODE,
context->shader_save, 3);
- if (ret)
- return ret;
kgsl_cffdump_syncmem(context->base.device,
&context->gpustate,
context->shader_fixup[1],
@@ -1547,13 +1542,10 @@
* fixup shader partitioning parameter for
* SET_SHADER_BASES.
*/
- ret = adreno_ringbuffer_issuecmds(device, context,
+ adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->shader_fixup, 3);
- if (ret)
- return ret;
-
context->flags |= CTXT_FLAGS_SHADER_RESTORE;
}
}
@@ -1566,41 +1558,32 @@
/* save gmem.
* (note: changes shader. shader must already be saved.)
*/
- ret = adreno_ringbuffer_issuecmds(device, context,
+ adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_PMODE,
context->context_gmem_shadow.gmem_save, 3);
- if (ret)
- return ret;
-
kgsl_cffdump_syncmem(context->base.device, &context->gpustate,
context->chicken_restore[1],
context->chicken_restore[2] << 2, true);
/* Restore TP0_CHICKEN */
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
- ret = adreno_ringbuffer_issuecmds(device, context,
+ adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->chicken_restore, 3);
-
- if (ret)
- return ret;
}
adreno_dev->gpudev->ctx_switches_since_last_draw = 0;
context->flags |= CTXT_FLAGS_GMEM_RESTORE;
} else if (adreno_is_a2xx(adreno_dev))
- return a2xx_drawctxt_draw_workaround(adreno_dev, context);
-
- return 0;
+ a2xx_drawctxt_draw_workaround(adreno_dev, context);
}
-static int a2xx_drawctxt_restore(struct adreno_device *adreno_dev,
+static void a2xx_drawctxt_restore(struct adreno_device *adreno_dev,
struct adreno_context *context)
{
struct kgsl_device *device = &adreno_dev->dev;
unsigned int cmds[5];
- int ret = 0;
if (context == NULL) {
/* No context - set the default pagetable and thats it */
@@ -1615,7 +1598,7 @@
: KGSL_CONTEXT_INVALID;
kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
id);
- return 0;
+ return;
}
cmds[0] = cp_nop_packet(1);
@@ -1624,11 +1607,8 @@
cmds[3] = device->memstore.gpuaddr +
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
cmds[4] = context->base.id;
- ret = adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
+ adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
cmds, 5);
- if (ret)
- return ret;
-
kgsl_mmu_setstate(&device->mmu, context->base.pagetable,
context->base.id);
@@ -1641,11 +1621,9 @@
context->context_gmem_shadow.gmem_restore[2] << 2,
true);
- ret = adreno_ringbuffer_issuecmds(device, context,
+ adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_PMODE,
context->context_gmem_shadow.gmem_restore, 3);
- if (ret)
- return ret;
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
kgsl_cffdump_syncmem(context->base.device,
@@ -1654,11 +1632,9 @@
context->chicken_restore[2] << 2, true);
/* Restore TP0_CHICKEN */
- ret = adreno_ringbuffer_issuecmds(device, context,
+ adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->chicken_restore, 3);
- if (ret)
- return ret;
}
context->flags &= ~CTXT_FLAGS_GMEM_RESTORE;
@@ -1670,10 +1646,8 @@
context->reg_restore[2] << 2, true);
/* restore registers and constants. */
- ret = adreno_ringbuffer_issuecmds(device, context,
+ adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE, context->reg_restore, 3);
- if (ret)
- return ret;
/* restore shader instructions & partitioning. */
if (context->flags & CTXT_FLAGS_SHADER_RESTORE) {
@@ -1682,22 +1656,18 @@
context->shader_restore[1],
context->shader_restore[2] << 2, true);
- ret = adreno_ringbuffer_issuecmds(device, context,
+ adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->shader_restore, 3);
- if (ret)
- return ret;
}
}
if (adreno_is_a20x(adreno_dev)) {
cmds[0] = cp_type3_packet(CP_SET_BIN_BASE_OFFSET, 1);
cmds[1] = context->bin_base_offset;
- ret = adreno_ringbuffer_issuecmds(device, context,
+ adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE, cmds, 2);
}
-
- return ret;
}
/*
@@ -1764,14 +1734,13 @@
if (!status) {
if (master_status & MASTER_INT_SIGNAL__CP_INT_STAT) {
- /*
- * This indicates that we could not read CP_INT_STAT.
- * As a precaution schedule the dispatcher to check
- * things out. Since we did not ack any interrupts this
- * interrupt will be generated again
- */
+ /* This indicates that we could not read CP_INT_STAT.
+ * As a precaution just wake up processes so
+ * they can check their timestamps. Since, we
+ * did not ack any interrupts this interrupt will
+ * be generated again */
KGSL_DRV_WARN(device, "Unable to read CP_INT_STATUS\n");
- adreno_dispatcher_schedule(device);
+ wake_up_interruptible_all(&device->wait_queue);
} else
KGSL_DRV_WARN(device, "Spurious interrput detected\n");
return;
@@ -1797,7 +1766,7 @@
if (status & (CP_INT_CNTL__IB1_INT_MASK | CP_INT_CNTL__RB_INT_MASK)) {
queue_work(device->work_queue, &device->ts_expired_ws);
- adreno_dispatcher_schedule(device);
+ wake_up_interruptible_all(&device->wait_queue);
}
}
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index c4f81fa..d96965c 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -2382,38 +2382,32 @@
return ret;
}
-static int a3xx_drawctxt_save(struct adreno_device *adreno_dev,
+static void a3xx_drawctxt_save(struct adreno_device *adreno_dev,
struct adreno_context *context)
{
struct kgsl_device *device = &adreno_dev->dev;
- int ret;
if (context == NULL || (context->flags & CTXT_FLAGS_BEING_DESTROYED))
- return 0;
+ return;
- if (context->state == ADRENO_CONTEXT_STATE_INVALID)
- return 0;
+ if (context->flags & CTXT_FLAGS_GPU_HANG)
+ KGSL_CTXT_WARN(device,
+ "Current active context has caused gpu hang\n");
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
/* Fixup self modifying IBs for save operations */
- ret = adreno_ringbuffer_issuecmds(device, context,
+ adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE, context->save_fixup, 3);
- if (ret)
- return ret;
/* save registers and constants. */
- ret = adreno_ringbuffer_issuecmds(device, context,
+ adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->regconstant_save, 3);
- if (ret)
- return ret;
if (context->flags & CTXT_FLAGS_SHADER_SAVE) {
/* Save shader instructions */
- ret = adreno_ringbuffer_issuecmds(device, context,
+ adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_PMODE, context->shader_save, 3);
- if (ret)
- return ret;
context->flags |= CTXT_FLAGS_SHADER_RESTORE;
}
@@ -2431,25 +2425,19 @@
context->context_gmem_shadow.gmem_save[1],
context->context_gmem_shadow.gmem_save[2] << 2, true);
- ret = adreno_ringbuffer_issuecmds(device, context,
+ adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_PMODE,
context->context_gmem_shadow.
gmem_save, 3);
- if (ret)
- return ret;
-
context->flags |= CTXT_FLAGS_GMEM_RESTORE;
}
-
- return 0;
}
-static int a3xx_drawctxt_restore(struct adreno_device *adreno_dev,
+static void a3xx_drawctxt_restore(struct adreno_device *adreno_dev,
struct adreno_context *context)
{
struct kgsl_device *device = &adreno_dev->dev;
unsigned int cmds[5];
- int ret = 0;
if (context == NULL) {
/* No context - set the default pagetable and thats it */
@@ -2464,7 +2452,7 @@
: KGSL_CONTEXT_INVALID;
kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
id);
- return 0;
+ return;
}
cmds[0] = cp_nop_packet(1);
@@ -2473,11 +2461,8 @@
cmds[3] = device->memstore.gpuaddr +
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
cmds[4] = context->base.id;
- ret = adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
+ adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
cmds, 5);
- if (ret)
- return ret;
-
kgsl_mmu_setstate(&device->mmu, context->base.pagetable,
context->base.id);
@@ -2493,47 +2478,36 @@
context->context_gmem_shadow.gmem_restore[2] << 2,
true);
- ret = adreno_ringbuffer_issuecmds(device, context,
+ adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_PMODE,
context->context_gmem_shadow.
gmem_restore, 3);
- if (ret)
- return ret;
context->flags &= ~CTXT_FLAGS_GMEM_RESTORE;
}
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
- ret = adreno_ringbuffer_issuecmds(device, context,
+ adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE, context->reg_restore, 3);
- if (ret)
- return ret;
/* Fixup self modifying IBs for restore operations */
- ret = adreno_ringbuffer_issuecmds(device, context,
+ adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->restore_fixup, 3);
- if (ret)
- return ret;
- ret = adreno_ringbuffer_issuecmds(device, context,
+ adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->constant_restore, 3);
- if (ret)
- return ret;
if (context->flags & CTXT_FLAGS_SHADER_RESTORE)
- ret = adreno_ringbuffer_issuecmds(device, context,
+ adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->shader_restore, 3);
- if (ret)
- return ret;
+
/* Restore HLSQ_CONTROL_0 register */
- ret = adreno_ringbuffer_issuecmds(device, context,
+ adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->hlsqcontrol_restore, 3);
}
-
- return ret;
}
static int a3xx_rb_init(struct adreno_device *adreno_dev,
@@ -2596,7 +2570,7 @@
/* Clear the error */
kgsl_regwrite(device, A3XX_RBBM_AHB_CMD, (1 << 3));
- goto done;
+ return;
}
case A3XX_INT_RBBM_REG_TIMEOUT:
err = "RBBM: AHB register timeout";
@@ -2637,23 +2611,21 @@
case A3XX_INT_UCHE_OOB_ACCESS:
err = "UCHE: Out of bounds access";
break;
- default:
- return;
}
+
KGSL_DRV_CRIT(device, "%s\n", err);
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
-
-done:
- /* Trigger a fault in the dispatcher - this will effect a restart */
- adreno_dispatcher_irq_fault(device);
}
static void a3xx_cp_callback(struct adreno_device *adreno_dev, int irq)
{
struct kgsl_device *device = &adreno_dev->dev;
+ /* Wake up everybody waiting for the interrupt */
+ wake_up_interruptible_all(&device->wait_queue);
+
+ /* Schedule work to free mem and issue ibs */
queue_work(device->work_queue, &device->ts_expired_ws);
- adreno_dispatcher_schedule(device);
}
/**
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
deleted file mode 100644
index 4d3172b..0000000
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ /dev/null
@@ -1,1646 +0,0 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/wait.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/jiffies.h>
-#include <linux/err.h>
-
-#include "kgsl.h"
-#include "adreno.h"
-#include "adreno_ringbuffer.h"
-#include "adreno_trace.h"
-
-#define ADRENO_DISPATCHER_ACTIVE 0
-#define ADRENO_DISPATCHER_PAUSE 1
-
-#define CMDQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
-
-/* Number of commands that can be queued in a context before it sleeps */
-static unsigned int _context_cmdqueue_size = 50;
-
-/* Number of milliseconds to wait for the context queue to clear */
-static unsigned int _context_queue_wait = 10000;
-
-/* Number of command batches sent at a time from a single context */
-static unsigned int _context_cmdbatch_burst = 5;
-
-/* Number of command batches inflight in the ringbuffer at any time */
-static unsigned int _dispatcher_inflight = 15;
-
-/* Command batch timeout (in milliseconds) */
-static unsigned int _cmdbatch_timeout = 2000;
-
-/* Interval for reading and comparing fault detection registers */
-static unsigned int _fault_timer_interval = 50;
-
-/* Local array for the current set of fault detect registers */
-static unsigned int fault_detect_regs[FT_DETECT_REGS_COUNT];
-
-/* The last retired global timestamp read during fault detect */
-static unsigned int fault_detect_ts;
-
-/**
- * fault_detect_read() - Read the set of fault detect registers
- * @device: Pointer to the KGSL device struct
- *
- * Read the set of fault detect registers and store them in the local array.
- * This is for the initial values that are compared later with
- * fault_detect_read_compare
- */
-static void fault_detect_read(struct kgsl_device *device)
-{
- int i;
-
- fault_detect_ts = kgsl_readtimestamp(device, NULL,
- KGSL_TIMESTAMP_RETIRED);
-
- for (i = 0; i < FT_DETECT_REGS_COUNT; i++) {
- if (ft_detect_regs[i] == 0)
- continue;
- kgsl_regread(device, ft_detect_regs[i],
- &fault_detect_regs[i]);
- }
-}
-
-/*
- * Check to see if the device is idle and that the global timestamp is up to
- * date
- */
-static inline bool _isidle(struct kgsl_device *device)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- unsigned int ts;
-
- ts = kgsl_readtimestamp(device, NULL, KGSL_TIMESTAMP_RETIRED);
-
- if (adreno_isidle(device) == true &&
- (ts >= adreno_dev->ringbuffer.global_ts))
- return true;
-
- return false;
-}
-
-/**
- * fault_detect_read_compare() - Read the fault detect registers and compare
- * them to the current value
- * @device: Pointer to the KGSL device struct
- *
- * Read the set of fault detect registers and compare them to the current set
- * of registers. Return 1 if any of the register values changed
- */
-static int fault_detect_read_compare(struct kgsl_device *device)
-{
- int i, ret = 0;
- unsigned int ts;
-
- /* Check to see if the device is idle - if so report no hang */
- if (_isidle(device) == true)
- ret = 1;
-
- for (i = 0; i < FT_DETECT_REGS_COUNT; i++) {
- unsigned int val;
-
- if (ft_detect_regs[i] == 0)
- continue;
- kgsl_regread(device, ft_detect_regs[i], &val);
- if (val != fault_detect_regs[i])
- ret = 1;
- fault_detect_regs[i] = val;
- }
-
- ts = kgsl_readtimestamp(device, NULL, KGSL_TIMESTAMP_RETIRED);
- if (ts != fault_detect_ts)
- ret = 1;
-
- fault_detect_ts = ts;
-
- return ret;
-}
-
-/**
- * adreno_dispatcher_get_cmdbatch() - Get a new command from a context queue
- * @drawctxt: Pointer to the adreno draw context
- *
- * Dequeue a new command batch from the context list
- */
-static inline struct kgsl_cmdbatch *adreno_dispatcher_get_cmdbatch(
- struct adreno_context *drawctxt)
-{
- struct kgsl_cmdbatch *cmdbatch = NULL;
-
- mutex_lock(&drawctxt->mutex);
- if (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
- cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head];
-
- /*
- * Don't dequeue a cmdbatch that is still waiting for other
- * events
- */
- if (kgsl_cmdbatch_sync_pending(cmdbatch)) {
- cmdbatch = ERR_PTR(-EAGAIN);
- goto done;
- }
-
- drawctxt->cmdqueue_head =
- CMDQUEUE_NEXT(drawctxt->cmdqueue_head,
- ADRENO_CONTEXT_CMDQUEUE_SIZE);
- drawctxt->queued--;
- }
-
-done:
- mutex_unlock(&drawctxt->mutex);
-
- return cmdbatch;
-}
-
-/**
- * adreno_dispatcher_requeue_cmdbatch() - Put a command back on the context
- * queue
- * @drawctxt: Pointer to the adreno draw context
- * @cmdbatch: Pointer to the KGSL cmdbatch to requeue
- *
- * Failure to submit a command to the ringbuffer isn't the fault of the command
- * being submitted so if a failure happens, push it back on the head of the the
- * context queue to be reconsidered again
- */
-static inline void adreno_dispatcher_requeue_cmdbatch(
- struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch)
-{
- unsigned int prev;
- mutex_lock(&drawctxt->mutex);
-
- if (kgsl_context_detached(&drawctxt->base) ||
- drawctxt->state == ADRENO_CONTEXT_STATE_INVALID) {
- mutex_unlock(&drawctxt->mutex);
- return;
- }
-
- prev = drawctxt->cmdqueue_head - 1;
-
- if (prev < 0)
- prev = ADRENO_CONTEXT_CMDQUEUE_SIZE - 1;
-
- /*
- * The maximum queue size always needs to be one less then the size of
- * the ringbuffer queue so there is "room" to put the cmdbatch back in
- */
-
- BUG_ON(prev == drawctxt->cmdqueue_tail);
-
- drawctxt->cmdqueue[prev] = cmdbatch;
- drawctxt->queued++;
-
- /* Reset the command queue head to reflect the newly requeued change */
- drawctxt->cmdqueue_head = prev;
- mutex_unlock(&drawctxt->mutex);
-}
-
-/**
- * dispatcher_queue_context() - Queue a context in the dispatcher pending list
- * @dispatcher: Pointer to the adreno dispatcher struct
- * @drawctxt: Pointer to the adreno draw context
- *
- * Add a context to the dispatcher pending list.
- */
-static void dispatcher_queue_context(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt)
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
-
- spin_lock(&dispatcher->plist_lock);
-
- if (plist_node_empty(&drawctxt->pending)) {
- /* Get a reference to the context while it sits on the list */
- _kgsl_context_get(&drawctxt->base);
- trace_dispatch_queue_context(drawctxt);
- plist_add(&drawctxt->pending, &dispatcher->pending);
- }
-
- spin_unlock(&dispatcher->plist_lock);
-}
-
-/**
- * sendcmd() - Send a command batch to the GPU hardware
- * @dispatcher: Pointer to the adreno dispatcher struct
- * @cmdbatch: Pointer to the KGSL cmdbatch being sent
- *
- * Send a KGSL command batch to the GPU hardware
- */
-static int sendcmd(struct adreno_device *adreno_dev,
- struct kgsl_cmdbatch *cmdbatch)
-{
- struct kgsl_device *device = &adreno_dev->dev;
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- int ret;
-
- dispatcher->inflight++;
-
- mutex_lock(&device->mutex);
-
- if (dispatcher->inflight == 1) {
- /* Time to make the donuts. Turn on the GPU */
- ret = kgsl_active_count_get(device);
- if (ret) {
- dispatcher->inflight--;
- mutex_unlock(&device->mutex);
- return ret;
- }
- }
-
- ret = adreno_ringbuffer_submitcmd(adreno_dev, cmdbatch);
-
- /*
- * On the first command, if the submission was successful, then read the
- * fault registers. If it failed then turn off the GPU. Sad face.
- */
-
- if (dispatcher->inflight == 1) {
- if (ret == 0)
- fault_detect_read(device);
- else
- kgsl_active_count_put(device);
- }
-
- mutex_unlock(&device->mutex);
-
- if (ret) {
- dispatcher->inflight--;
- KGSL_DRV_ERR(device,
- "Unable to submit command to the ringbuffer\n");
- return ret;
- }
-
- trace_adreno_cmdbatch_submitted(cmdbatch, dispatcher->inflight);
-
- dispatcher->cmdqueue[dispatcher->tail] = cmdbatch;
- dispatcher->tail = (dispatcher->tail + 1) %
- ADRENO_DISPATCH_CMDQUEUE_SIZE;
-
- /*
- * If this is the first command in the pipe then the GPU will
- * immediately start executing it so we can start the expiry timeout on
- * the command batch here. Subsequent command batches will have their
- * timer started when the previous command batch is retired
- */
- if (dispatcher->inflight == 1) {
- cmdbatch->expires = jiffies +
- msecs_to_jiffies(_cmdbatch_timeout);
- mod_timer(&dispatcher->timer, cmdbatch->expires);
-
- /* Start the fault detection timer */
- if (adreno_dev->fast_hang_detect)
- mod_timer(&dispatcher->fault_timer,
- jiffies +
- msecs_to_jiffies(_fault_timer_interval));
- }
-
- return 0;
-}
-
-/**
- * dispatcher_context_sendcmds() - Send commands from a context to the GPU
- * @adreno_dev: Pointer to the adreno device struct
- * @drawctxt: Pointer to the adreno context to dispatch commands from
- *
- * Dequeue and send a burst of commands from the specified context to the GPU
- */
-static int dispatcher_context_sendcmds(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt)
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- int count = 0;
- int requeued = 0;
-
- /*
- * Each context can send a specific number of command batches per cycle
- */
- while ((count < _context_cmdbatch_burst) &&
- (dispatcher->inflight < _dispatcher_inflight)) {
- int ret;
- struct kgsl_cmdbatch *cmdbatch;
-
- if (dispatcher->state != ADRENO_DISPATCHER_ACTIVE)
- break;
-
- if (adreno_gpu_fault(adreno_dev) != 0)
- break;
-
- cmdbatch = adreno_dispatcher_get_cmdbatch(drawctxt);
-
- if (cmdbatch == NULL)
- break;
-
- /*
- * adreno_context_get_cmdbatch returns -EAGAIN if the current
- * cmdbatch has pending sync points so no more to do here.
- * When the sync points are satisfied then the context will get
- * reqeueued
- */
-
- if (IS_ERR(cmdbatch) && PTR_ERR(cmdbatch) == -EAGAIN) {
- requeued = 1;
- break;
- }
-
- /*
- * If this is a synchronization submission then there are no
- * commands to submit. Discard it and get the next item from
- * the queue. Decrement count so this packet doesn't count
- * against the burst for the context
- */
-
- if (cmdbatch->flags & KGSL_CONTEXT_SYNC) {
- kgsl_cmdbatch_destroy(cmdbatch);
- continue;
- }
-
- ret = sendcmd(adreno_dev, cmdbatch);
-
- /*
- * There are various reasons why we can't submit a command (no
- * memory for the commands, full ringbuffer, etc) but none of
- * these are actually the current command's fault. Requeue it
- * back on the context and let it come back around again if
- * conditions improve
- */
- if (ret) {
- adreno_dispatcher_requeue_cmdbatch(drawctxt, cmdbatch);
- requeued = 1;
- break;
- }
- count++;
- }
-
- /*
- * If the context successfully submitted commands, then
- * unconditionally put it back on the queue to be considered the
- * next time around. This might seem a little wasteful but it is
- * reasonable to think that a busy context will stay busy.
- */
-
- if (count || requeued) {
- dispatcher_queue_context(adreno_dev, drawctxt);
-
- /*
- * If we submitted something there will be room in the
- * context queue so ping the context wait queue on the
- * chance that the context is snoozing
- */
-
- wake_up_interruptible_all(&drawctxt->wq);
- }
-
- /* Return the number of command batches processed */
- if (count > 0)
- return count;
-
- /*
- * If we didn't process anything because of a stall or an error
- * return -1 so the issuecmds loop knows that we shouldn't
- * keep trying to process it
- */
-
- return requeued ? -1 : 0;
-}
-
-static void plist_move(struct plist_head *old, struct plist_head *new)
-{
- plist_head_init(new);
- list_splice_tail(&old->node_list, &new->node_list);
- plist_head_init(old);
-}
-
-/**
- * _adreno_dispatcher_issuecmds() - Issue commmands from pending contexts
- * @adreno_dev: Pointer to the adreno device struct
- *
- * Issue as many commands as possible (up to inflight) from the pending contexts
- * This function assumes the dispatcher mutex has been locked.
- */
-static int _adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct plist_head tmp;
- struct adreno_context *drawctxt, *next;
-
- /* Leave early if the dispatcher isn't in a happy state */
- if ((dispatcher->state != ADRENO_DISPATCHER_ACTIVE) ||
- adreno_gpu_fault(adreno_dev) != 0)
- return 0;
-
- /* Copy the current context list to a temporary list */
- spin_lock(&dispatcher->plist_lock);
- plist_move(&dispatcher->pending, &tmp);
- spin_unlock(&dispatcher->plist_lock);
-
- /* Try to fill the ringbuffer as much as possible */
- while (dispatcher->inflight < _dispatcher_inflight) {
-
- /* Stop doing things if the dispatcher is paused or faulted */
- if ((dispatcher->state != ADRENO_DISPATCHER_ACTIVE) ||
- adreno_gpu_fault(adreno_dev) != 0)
- break;
-
- if (plist_head_empty(&tmp))
- break;
-
- /* Get the next entry on the list */
- drawctxt = plist_first_entry(&tmp, struct adreno_context,
- pending);
-
- /* Remove it from the list */
- plist_del(&drawctxt->pending, &tmp);
-
- if (kgsl_context_detached(&drawctxt->base) ||
- drawctxt->state == ADRENO_CONTEXT_STATE_INVALID) {
- kgsl_context_put(&drawctxt->base);
- continue;
- }
-
- dispatcher_context_sendcmds(adreno_dev, drawctxt);
- kgsl_context_put(&drawctxt->base);
- }
-
- /* Requeue any remaining contexts for the next go around */
-
- spin_lock(&dispatcher->plist_lock);
-
- plist_for_each_entry_safe(drawctxt, next, &tmp, pending) {
- int prio = drawctxt->pending.prio;
-
- /* Reset the context node */
- plist_node_init(&drawctxt->pending, prio);
-
- /* And put it back in the master list */
- plist_add(&drawctxt->pending, &dispatcher->pending);
- }
-
- spin_unlock(&dispatcher->plist_lock);
-
- return 0;
-}
-
-/**
- * adreno_dispatcher_issuecmds() - Issue commmands from pending contexts
- * @adreno_dev: Pointer to the adreno device struct
- *
- * Lock the dispatcher and call _adreno_dispatcher_issueibcmds
- */
-int adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- int ret;
-
- mutex_lock(&dispatcher->mutex);
- ret = _adreno_dispatcher_issuecmds(adreno_dev);
- mutex_unlock(&dispatcher->mutex);
-
- return ret;
-}
-
-static int _check_context_queue(struct adreno_context *drawctxt)
-{
- int ret;
-
- mutex_lock(&drawctxt->mutex);
-
- /*
- * Wake up if there is room in the context or if the whole thing got
- * invalidated while we were asleep
- */
-
- if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
- ret = 1;
- else
- ret = drawctxt->queued < _context_cmdqueue_size ? 1 : 0;
-
- mutex_unlock(&drawctxt->mutex);
-
- return ret;
-}
-
-/**
- * get_timestamp() - Return the next timestamp for the context
- * @drawctxt - Pointer to an adreno draw context struct
- * @cmdbatch - Pointer to a command batch
- * @timestamp - Pointer to a timestamp value possibly passed from the user
- *
- * Assign a timestamp based on the settings of the draw context and the command
- * batch.
- */
-static int get_timestamp(struct adreno_context *drawctxt,
- struct kgsl_cmdbatch *cmdbatch, unsigned int *timestamp)
-{
- /* Synchronization commands don't get a timestamp */
- if (cmdbatch->flags & KGSL_CONTEXT_SYNC) {
- *timestamp = 0;
- return 0;
- }
-
- if (drawctxt->flags & CTXT_FLAGS_USER_GENERATED_TS) {
- /*
- * User specified timestamps need to be greater than the last
- * issued timestamp in the context
- */
- if (timestamp_cmp(drawctxt->timestamp, *timestamp) >= 0)
- return -ERANGE;
-
- drawctxt->timestamp = *timestamp;
- } else
- drawctxt->timestamp++;
-
- *timestamp = drawctxt->timestamp;
- return 0;
-}
-
-/**
- * adreno_dispactcher_queue_cmd() - Queue a new command in the context
- * @adreno_dev: Pointer to the adreno device struct
- * @drawctxt: Pointer to the adreno draw context
- * @cmdbatch: Pointer to the command batch being submitted
- * @timestamp: Pointer to the requested timestamp
- *
- * Queue a command in the context - if there isn't any room in the queue, then
- * block until there is
- */
-int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch,
- uint32_t *timestamp)
-{
- int ret;
-
- mutex_lock(&drawctxt->mutex);
-
- if (drawctxt->flags & CTXT_FLAGS_BEING_DESTROYED) {
- mutex_unlock(&drawctxt->mutex);
- return -EINVAL;
- }
-
- /*
- * After skipping to the end of the frame we need to force the preamble
- * to run (if it exists) regardless of the context state.
- */
-
- if (drawctxt->flags & CTXT_FLAGS_FORCE_PREAMBLE) {
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv);
- drawctxt->flags &= ~CTXT_FLAGS_FORCE_PREAMBLE;
- }
-
- /*
- * If we are waiting for the end of frame and it hasn't appeared yet,
- * then mark the command batch as skipped. It will still progress
- * through the pipeline but it won't actually send any commands
- */
-
- if (drawctxt->flags & CTXT_FLAGS_SKIP_EOF) {
- set_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv);
-
- /*
- * If this command batch represents the EOF then clear the way
- * for the dispatcher to continue submitting
- */
-
- if (cmdbatch->flags & KGSL_CONTEXT_END_OF_FRAME) {
- drawctxt->flags &= ~CTXT_FLAGS_SKIP_EOF;
-
- /*
- * Force the preamble on the next command to ensure that
- * the state is correct
- */
-
- drawctxt->flags |= CTXT_FLAGS_FORCE_PREAMBLE;
- }
- }
-
- /* Wait for room in the context queue */
-
- while (drawctxt->queued >= _context_cmdqueue_size) {
- trace_adreno_drawctxt_sleep(drawctxt);
- mutex_unlock(&drawctxt->mutex);
-
- ret = wait_event_interruptible_timeout(drawctxt->wq,
- _check_context_queue(drawctxt),
- msecs_to_jiffies(_context_queue_wait));
-
- mutex_lock(&drawctxt->mutex);
- trace_adreno_drawctxt_wake(drawctxt);
-
- if (ret <= 0) {
- mutex_unlock(&drawctxt->mutex);
- return (ret == 0) ? -ETIMEDOUT : (int) ret;
- }
-
- /*
- * Account for the possiblity that the context got invalidated
- * while we were sleeping
- */
-
- if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID) {
- mutex_unlock(&drawctxt->mutex);
- return -EDEADLK;
- }
- }
-
- ret = get_timestamp(drawctxt, cmdbatch, timestamp);
- if (ret) {
- mutex_unlock(&drawctxt->mutex);
- return ret;
- }
-
- cmdbatch->timestamp = *timestamp;
-
- /*
- * Set the fault tolerance policy for the command batch - assuming the
- * context hsn't disabled FT use the current device policy
- */
-
- if (drawctxt->flags & CTXT_FLAGS_NO_FAULT_TOLERANCE)
- set_bit(KGSL_FT_DISABLE, &cmdbatch->fault_policy);
- else
- cmdbatch->fault_policy = adreno_dev->ft_policy;
-
- /* Put the command into the queue */
- drawctxt->cmdqueue[drawctxt->cmdqueue_tail] = cmdbatch;
- drawctxt->cmdqueue_tail = (drawctxt->cmdqueue_tail + 1) %
- ADRENO_CONTEXT_CMDQUEUE_SIZE;
-
- drawctxt->queued++;
- trace_adreno_cmdbatch_queued(cmdbatch, drawctxt->queued);
-
-
- mutex_unlock(&drawctxt->mutex);
-
- /* Add the context to the dispatcher pending list */
- dispatcher_queue_context(adreno_dev, drawctxt);
-
- /*
- * Only issue commands if inflight is less than burst -this prevents us
- * from sitting around waiting for the mutex on a busy system - the work
- * loop will schedule it for us. Inflight is mutex protected but the
- * worse that can happen is that it will go to 0 after we check and if
- * it goes to 0 it is because the work loop decremented it and the work
- * queue will try to schedule new commands anyway.
- */
-
- if (adreno_dev->dispatcher.inflight < _context_cmdbatch_burst)
- adreno_dispatcher_issuecmds(adreno_dev);
-
- return 0;
-}
-
-/*
- * If an IB inside of the command batch has a gpuaddr that matches the base
- * passed in then zero the size which effectively skips it when it is submitted
- * in the ringbuffer.
- */
-static void cmdbatch_skip_ib(struct kgsl_cmdbatch *cmdbatch, unsigned int base)
-{
- int i;
-
- for (i = 0; i < cmdbatch->ibcount; i++) {
- if (cmdbatch->ibdesc[i].gpuaddr == base) {
- cmdbatch->ibdesc[i].sizedwords = 0;
- return;
- }
- }
-}
-
-static void cmdbatch_skip_frame(struct kgsl_cmdbatch *cmdbatch,
- struct kgsl_cmdbatch **replay, int count)
-{
- struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
- int skip = 1;
- int i;
-
- for (i = 0; i < count; i++) {
-
- /*
- * Only operate on command batches that belong to the
- * faulting context
- */
-
- if (replay[i]->context->id != cmdbatch->context->id)
- continue;
-
- /*
- * Skip all the command batches in this context until
- * the EOF flag is seen. If the EOF flag is seen then
- * force the preamble for the next command.
- */
-
- if (skip) {
- set_bit(CMDBATCH_FLAG_SKIP, &replay[i]->priv);
-
- if (replay[i]->flags & KGSL_CONTEXT_END_OF_FRAME)
- skip = 0;
- } else {
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv);
- return;
- }
- }
-
- /*
- * If the EOF flag hasn't been seen yet then set the flag in the
- * drawctxt to keep looking for it
- */
-
- if (skip && drawctxt)
- drawctxt->flags |= CTXT_FLAGS_SKIP_EOF;
-
- /*
- * If we did see the EOF flag then force the preamble on for the
- * next command issued on this context
- */
-
- if (!skip && drawctxt)
- drawctxt->flags |= CTXT_FLAGS_FORCE_PREAMBLE;
-}
-
-static void remove_invalidated_cmdbatches(struct kgsl_device *device,
- struct kgsl_cmdbatch **replay, int count)
-{
- int i;
-
- for (i = 0; i < count; i++) {
- struct kgsl_cmdbatch *cmd = replay[i];
- struct adreno_context *drawctxt;
-
- if (cmd == NULL)
- continue;
-
- drawctxt = ADRENO_CONTEXT(cmd->context);
-
- if (kgsl_context_detached(cmd->context) ||
- drawctxt->state == ADRENO_CONTEXT_STATE_INVALID) {
- replay[i] = NULL;
-
- mutex_lock(&device->mutex);
- kgsl_cancel_events_timestamp(device, cmd->context,
- cmd->timestamp);
- mutex_unlock(&device->mutex);
-
- kgsl_cmdbatch_destroy(cmd);
- }
- }
-}
-
-static char _pidname[TASK_COMM_LEN];
-
-static inline const char *_kgsl_context_comm(struct kgsl_context *context)
-{
- struct task_struct *task = NULL;
-
- if (context)
- task = find_task_by_vpid(context->pid);
-
- if (task)
- get_task_comm(_pidname, task);
- else
- snprintf(_pidname, TASK_COMM_LEN, "unknown");
-
- return _pidname;
-}
-
-#define pr_fault(_d, _c, fmt, args...) \
- dev_err((_d)->dev, "%s[%d]: " fmt, \
- _kgsl_context_comm((_c)->context), \
- (_c)->context->pid, ##args)
-
-
-static void adreno_fault_header(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- unsigned int status, base, rptr, wptr, ib1base, ib2base, ib1sz, ib2sz;
-
- kgsl_regread(device,
- adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS),
- &status);
- kgsl_regread(device,
- adreno_getreg(adreno_dev, ADRENO_REG_CP_RB_BASE),
- &base);
- kgsl_regread(device,
- adreno_getreg(adreno_dev, ADRENO_REG_CP_RB_RPTR),
- &rptr);
- kgsl_regread(device,
- adreno_getreg(adreno_dev, ADRENO_REG_CP_RB_WPTR),
- &wptr);
- kgsl_regread(device,
- adreno_getreg(adreno_dev, ADRENO_REG_CP_IB1_BASE),
- &ib1base);
- kgsl_regread(device,
- adreno_getreg(adreno_dev, ADRENO_REG_CP_IB1_BUFSZ),
- &ib1sz);
- kgsl_regread(device,
- adreno_getreg(adreno_dev, ADRENO_REG_CP_IB2_BASE),
- &ib2base);
- kgsl_regread(device,
- adreno_getreg(adreno_dev, ADRENO_REG_CP_IB2_BUFSZ),
- &ib2sz);
-
- trace_adreno_gpu_fault(cmdbatch->context->id, cmdbatch->timestamp,
- status, rptr, wptr, ib1base, ib1sz, ib2base, ib2sz);
-
- pr_fault(device, cmdbatch,
- "gpu fault ctx %d ts %d status %8.8X rb %4.4x/%4.4x ib1 %8.8x/%4.4x ib2 %8.8x/%4.4x\n",
- cmdbatch->context->id, cmdbatch->timestamp, status,
- rptr, wptr, ib1base, ib1sz, ib2base, ib2sz);
-}
-
-static int dispatcher_do_fault(struct kgsl_device *device)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- unsigned int ptr;
- unsigned int reg, base;
- struct kgsl_cmdbatch **replay = NULL;
- struct kgsl_cmdbatch *cmdbatch;
- int ret, i, count = 0;
- int fault, first = 0;
- bool pagefault = false;
- BUG_ON(dispatcher->inflight == 0);
-
- fault = atomic_xchg(&dispatcher->fault, 0);
- if (fault == 0)
- return 0;
-
- /* Turn off all the timers */
- del_timer_sync(&dispatcher->timer);
- del_timer_sync(&dispatcher->fault_timer);
-
- mutex_lock(&device->mutex);
-
- cmdbatch = dispatcher->cmdqueue[dispatcher->head];
-
- trace_adreno_cmdbatch_fault(cmdbatch, fault);
-
- /*
- * If the fault was due to a timeout then stop the CP to ensure we don't
- * get activity while we are trying to dump the state of the system
- */
-
- if (fault == ADRENO_TIMEOUT_FAULT) {
- adreno_readreg(adreno_dev, ADRENO_REG_CP_ME_CNTL, ®);
- reg |= (1 << 27) | (1 << 28);
- adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, reg);
-
- /* Skip the PM dump for a timeout because it confuses people */
- set_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy);
- }
-
- adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BASE, &base);
-
- /*
- * Dump the postmortem and snapshot information if this is the first
- * detected fault for the oldest active command batch
- */
-
- if (!test_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy)) {
- adreno_fault_header(device, cmdbatch);
-
- if (device->pm_dump_enable)
- kgsl_postmortem_dump(device, 0);
-
- kgsl_device_snapshot(device, 1);
- }
-
- mutex_unlock(&device->mutex);
-
- /* Allocate memory to store the inflight commands */
- replay = kzalloc(sizeof(*replay) * dispatcher->inflight, GFP_KERNEL);
-
- if (replay == NULL) {
- unsigned int ptr = dispatcher->head;
-
- while (ptr != dispatcher->tail) {
- struct kgsl_context *context =
- dispatcher->cmdqueue[ptr]->context;
-
- adreno_drawctxt_invalidate(device, context);
- kgsl_cmdbatch_destroy(dispatcher->cmdqueue[ptr]);
-
- ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE);
- }
-
- /*
- * Set the replay count to zero - this will ensure that the
- * hardware gets reset but nothing else goes played
- */
-
- count = 0;
- goto replay;
- }
-
- /* Copy the inflight command batches into the temporary storage */
- ptr = dispatcher->head;
-
- while (ptr != dispatcher->tail) {
- replay[count++] = dispatcher->cmdqueue[ptr];
- ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE);
- }
-
- /*
- * For the purposes of replay, we assume that the oldest command batch
- * that hasn't retired a timestamp is "hung".
- */
-
- cmdbatch = replay[0];
-
- /*
- * If FT is disabled for this cmdbatch invalidate immediately
- */
-
- if (test_bit(KGSL_FT_DISABLE, &cmdbatch->fault_policy) ||
- test_bit(KGSL_FT_TEMP_DISABLE, &cmdbatch->fault_policy)) {
- pr_fault(device, cmdbatch, "gpu skipped ctx %d ts %d\n",
- cmdbatch->context->id, cmdbatch->timestamp);
-
- adreno_drawctxt_invalidate(device, cmdbatch->context);
- }
-
- /*
- * Set a flag so we don't print another PM dump if the cmdbatch fails
- * again on replay
- */
-
- set_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy);
-
- /*
- * A hardware fault generally means something was deterministically
- * wrong with the command batch - no point in trying to replay it
- * Clear the replay bit and move on to the next policy level
- */
-
- if (fault == ADRENO_HARD_FAULT)
- clear_bit(KGSL_FT_REPLAY, &(cmdbatch->fault_policy));
-
- /*
- * A timeout fault means the IB timed out - clear the policy and
- * invalidate - this will clear the FT_SKIP_PMDUMP bit but that is okay
- * because we won't see this cmdbatch again
- */
-
- if (fault == ADRENO_TIMEOUT_FAULT)
- bitmap_zero(&cmdbatch->fault_policy, BITS_PER_LONG);
-
- /*
- * If the context had a GPU page fault then it is likely it would fault
- * again if replayed
- */
-
- if (test_bit(KGSL_CONTEXT_PAGEFAULT, &cmdbatch->context->priv)) {
- /* we'll need to resume the mmu later... */
- pagefault = true;
- clear_bit(KGSL_FT_REPLAY, &cmdbatch->fault_policy);
- clear_bit(KGSL_CONTEXT_PAGEFAULT, &cmdbatch->context->priv);
- }
-
- /*
- * Execute the fault tolerance policy. Each command batch stores the
- * current fault policy that was set when it was queued.
- * As the options are tried in descending priority
- * (REPLAY -> SKIPIBS -> SKIPFRAME -> NOTHING) the bits are cleared
- * from the cmdbatch policy so the next thing can be tried if the
- * change comes around again
- */
-
- /* Replay the hanging command batch again */
- if (test_and_clear_bit(KGSL_FT_REPLAY, &cmdbatch->fault_policy)) {
- trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_REPLAY));
- set_bit(KGSL_FT_REPLAY, &cmdbatch->fault_recovery);
- goto replay;
- }
-
- /*
- * Skip the last IB1 that was played but replay everything else.
- * Note that the last IB1 might not be in the "hung" command batch
- * because the CP may have caused a page-fault while it was prefetching
- * the next IB1/IB2. walk all outstanding commands and zap the
- * supposedly bad IB1 where ever it lurks.
- */
-
- if (test_and_clear_bit(KGSL_FT_SKIPIB, &cmdbatch->fault_policy)) {
- trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_SKIPIB));
- set_bit(KGSL_FT_SKIPIB, &cmdbatch->fault_recovery);
-
- for (i = 0; i < count; i++) {
- if (replay[i] != NULL)
- cmdbatch_skip_ib(replay[i], base);
- }
-
- goto replay;
- }
-
- if (test_and_clear_bit(KGSL_FT_SKIPFRAME, &cmdbatch->fault_policy)) {
- trace_adreno_cmdbatch_recovery(cmdbatch,
- BIT(KGSL_FT_SKIPFRAME));
- set_bit(KGSL_FT_SKIPFRAME, &cmdbatch->fault_recovery);
-
- /*
- * Skip all the pending command batches for this context until
- * the EOF frame is seen
- */
- cmdbatch_skip_frame(cmdbatch, replay, count);
- goto replay;
- }
-
- /* If we get here then all the policies failed */
-
- pr_fault(device, cmdbatch, "gpu failed ctx %d ts %d\n",
- cmdbatch->context->id, cmdbatch->timestamp);
-
- /* Invalidate the context */
- adreno_drawctxt_invalidate(device, cmdbatch->context);
-
-
-replay:
- /* Reset the dispatcher queue */
- dispatcher->inflight = 0;
- dispatcher->head = dispatcher->tail = 0;
-
- /* Reset the GPU */
- mutex_lock(&device->mutex);
-
- /* resume the MMU if it is stalled */
- if (pagefault && device->mmu.mmu_ops->mmu_pagefault_resume != NULL)
- device->mmu.mmu_ops->mmu_pagefault_resume(&device->mmu);
-
- ret = adreno_reset(device);
- mutex_unlock(&device->mutex);
-
- /* If adreno_reset() fails then what hope do we have for the future? */
- BUG_ON(ret);
-
- /* Remove any pending command batches that have been invalidated */
- remove_invalidated_cmdbatches(device, replay, count);
-
- /* Replay the pending command buffers */
- for (i = 0; i < count; i++) {
-
- int ret;
-
- if (replay[i] == NULL)
- continue;
-
- /*
- * Force the preamble on the first command (if applicable) to
- * avoid any strange stage issues
- */
-
- if (first == 0) {
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv);
- first = 1;
- }
-
- /*
- * Force each command batch to wait for idle - this avoids weird
- * CP parse issues
- */
-
- set_bit(CMDBATCH_FLAG_WFI, &replay[i]->priv);
-
- ret = sendcmd(adreno_dev, replay[i]);
-
- /*
- * If sending the command fails, then try to recover by
- * invalidating the context
- */
-
- if (ret) {
- pr_fault(device, replay[i],
- "gpu reset failed ctx %d ts %d\n",
- replay[i]->context->id, replay[i]->timestamp);
-
- adreno_drawctxt_invalidate(device, replay[i]->context);
- remove_invalidated_cmdbatches(device, &replay[i],
- count - i);
- }
- }
-
- mutex_lock(&device->mutex);
- kgsl_active_count_put(device);
- mutex_unlock(&device->mutex);
-
- kfree(replay);
-
- return 1;
-}
-
-static inline int cmdbatch_consumed(struct kgsl_cmdbatch *cmdbatch,
- unsigned int consumed, unsigned int retired)
-{
- return ((timestamp_cmp(cmdbatch->timestamp, consumed) >= 0) &&
- (timestamp_cmp(retired, cmdbatch->timestamp) < 0));
-}
-
-static void _print_recovery(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch)
-{
- static struct {
- unsigned int mask;
- const char *str;
- } flags[] = { ADRENO_FT_TYPES };
-
- int i, nr = find_first_bit(&cmdbatch->fault_recovery, BITS_PER_LONG);
- char *result = "unknown";
-
- for (i = 0; i < ARRAY_SIZE(flags); i++) {
- if (flags[i].mask == BIT(nr)) {
- result = (char *) flags[i].str;
- break;
- }
- }
-
- pr_fault(device, cmdbatch,
- "gpu %s ctx %d ts %d policy %lX\n",
- result, cmdbatch->context->id, cmdbatch->timestamp,
- cmdbatch->fault_recovery);
-}
-
-/**
- * adreno_dispatcher_work() - Master work handler for the dispatcher
- * @work: Pointer to the work struct for the current work queue
- *
- * Process expired commands and send new ones.
- */
-static void adreno_dispatcher_work(struct work_struct *work)
-{
- struct adreno_dispatcher *dispatcher =
- container_of(work, struct adreno_dispatcher, work);
- struct adreno_device *adreno_dev =
- container_of(dispatcher, struct adreno_device, dispatcher);
- struct kgsl_device *device = &adreno_dev->dev;
- int count = 0;
-
- mutex_lock(&dispatcher->mutex);
-
- while (dispatcher->head != dispatcher->tail) {
- uint32_t consumed, retired = 0;
- struct kgsl_cmdbatch *cmdbatch =
- dispatcher->cmdqueue[dispatcher->head];
- struct adreno_context *drawctxt;
- BUG_ON(cmdbatch == NULL);
-
- drawctxt = ADRENO_CONTEXT(cmdbatch->context);
-
- /*
- * First try to expire the timestamp. This happens if the
- * context is valid and the timestamp expired normally or if the
- * context was destroyed before the command batch was finished
- * in the GPU. Either way retire the command batch advance the
- * pointers and continue processing the queue
- */
-
- if (!kgsl_context_detached(cmdbatch->context))
- retired = kgsl_readtimestamp(device, cmdbatch->context,
- KGSL_TIMESTAMP_RETIRED);
-
- if (kgsl_context_detached(cmdbatch->context) ||
- (timestamp_cmp(cmdbatch->timestamp, retired) <= 0)) {
-
- /*
- * If the cmdbatch in question had faulted announce its
- * successful completion to the world
- */
-
- if (cmdbatch->fault_recovery != 0)
- _print_recovery(device, cmdbatch);
-
- trace_adreno_cmdbatch_retired(cmdbatch,
- dispatcher->inflight - 1);
-
- /* Reduce the number of inflight command batches */
- dispatcher->inflight--;
-
- /* Zero the old entry*/
- dispatcher->cmdqueue[dispatcher->head] = NULL;
-
- /* Advance the buffer head */
- dispatcher->head = CMDQUEUE_NEXT(dispatcher->head,
- ADRENO_DISPATCH_CMDQUEUE_SIZE);
-
- /* Destroy the retired command batch */
- kgsl_cmdbatch_destroy(cmdbatch);
-
- /* Update the expire time for the next command batch */
-
- if (dispatcher->inflight > 0) {
- cmdbatch =
- dispatcher->cmdqueue[dispatcher->head];
- cmdbatch->expires = jiffies +
- msecs_to_jiffies(_cmdbatch_timeout);
- }
-
- count++;
- continue;
- }
-
- /*
- * If we got a fault from the interrupt handler, this command
- * is to blame. Invalidate it, reset and replay
- */
-
- if (dispatcher_do_fault(device))
- goto done;
-
- /* Get the last consumed timestamp */
- consumed = kgsl_readtimestamp(device, cmdbatch->context,
- KGSL_TIMESTAMP_CONSUMED);
-
- /*
- * Break here if fault detection is disabled for the context or
- * if the long running IB detection is disaled device wide
- * Long running command buffers will be allowed to run to
- * completion - but badly behaving command buffers (infinite
- * shaders etc) can end up running forever.
- */
-
- if (!adreno_dev->long_ib_detect ||
- drawctxt->flags & CTXT_FLAGS_NO_FAULT_TOLERANCE)
- break;
-
- /*
- * The last line of defense is to check if the command batch has
- * timed out. If we get this far but the timeout hasn't expired
- * yet then the GPU is still ticking away
- */
-
- if (time_is_after_jiffies(cmdbatch->expires))
- break;
-
- /* Boom goes the dynamite */
-
- pr_fault(device, cmdbatch,
- "gpu timeout ctx %d ts %d\n",
- cmdbatch->context->id, cmdbatch->timestamp);
-
- adreno_set_gpu_fault(adreno_dev, ADRENO_TIMEOUT_FAULT);
-
- dispatcher_do_fault(device);
- break;
- }
-
- /*
- * Decrement the active count to 0 - this will allow the system to go
- * into suspend even if there are queued command batches
- */
-
- if (count && dispatcher->inflight == 0) {
- mutex_lock(&device->mutex);
- kgsl_active_count_put(device);
- mutex_unlock(&device->mutex);
- }
-
- /* Dispatch new commands if we have the room */
- if (dispatcher->inflight < _dispatcher_inflight)
- _adreno_dispatcher_issuecmds(adreno_dev);
-
-done:
- /* Either update the timer for the next command batch or disable it */
- if (dispatcher->inflight) {
- struct kgsl_cmdbatch *cmdbatch
- = dispatcher->cmdqueue[dispatcher->head];
-
- /* Update the timeout timer for the next command batch */
- mod_timer(&dispatcher->timer, cmdbatch->expires);
- } else {
- del_timer_sync(&dispatcher->timer);
- del_timer_sync(&dispatcher->fault_timer);
- }
-
- /* Before leaving update the pwrscale information */
- mutex_lock(&device->mutex);
- kgsl_pwrscale_idle(device);
- mutex_unlock(&device->mutex);
-
- mutex_unlock(&dispatcher->mutex);
-}
-
-void adreno_dispatcher_schedule(struct kgsl_device *device)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
-
- queue_work(device->work_queue, &dispatcher->work);
-}
-
-/**
- * adreno_dispatcher_queue_context() - schedule a drawctxt in the dispatcher
- * device: pointer to the KGSL device
- * drawctxt: pointer to the drawctxt to schedule
- *
- * Put a draw context on the dispatcher pending queue and schedule the
- * dispatcher. This is used to reschedule changes that might have been blocked
- * for sync points or other concerns
- */
-void adreno_dispatcher_queue_context(struct kgsl_device *device,
- struct adreno_context *drawctxt)
-{
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-
- dispatcher_queue_context(adreno_dev, drawctxt);
- adreno_dispatcher_schedule(device);
-}
-
-/*
- * This is called on a regular basis while command batches are inflight. Fault
- * detection registers are read and compared to the existing values - if they
- * changed then the GPU is still running. If they are the same between
- * subsequent calls then the GPU may have faulted
- */
-
-void adreno_dispatcher_fault_timer(unsigned long data)
-{
- struct adreno_device *adreno_dev = (struct adreno_device *) data;
- struct kgsl_device *device = &adreno_dev->dev;
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
-
- /* Leave if the user decided to turn off fast hang detection */
- if (adreno_dev->fast_hang_detect == 0)
- return;
-
- if (adreno_gpu_fault(adreno_dev)) {
- adreno_dispatcher_schedule(device);
- return;
- }
-
- /*
- * Read the fault registers - if it returns 0 then they haven't changed
- * so mark the dispatcher as faulted and schedule the work loop.
- */
-
- if (!fault_detect_read_compare(device)) {
- adreno_set_gpu_fault(adreno_dev, ADRENO_SOFT_FAULT);
- adreno_dispatcher_schedule(device);
- } else {
- mod_timer(&dispatcher->fault_timer,
- jiffies + msecs_to_jiffies(_fault_timer_interval));
- }
-}
-
-/*
- * This is called when the timer expires - it either means the GPU is hung or
- * the IB is taking too long to execute
- */
-void adreno_dispatcher_timer(unsigned long data)
-{
- struct adreno_device *adreno_dev = (struct adreno_device *) data;
- struct kgsl_device *device = &adreno_dev->dev;
-
- adreno_dispatcher_schedule(device);
-}
-/**
- * adreno_dispatcher_irq_fault() - Trigger a fault in the dispatcher
- * @device: Pointer to the KGSL device
- *
- * Called from an interrupt context this will trigger a fault in the
- * dispatcher for the oldest pending command batch
- */
-void adreno_dispatcher_irq_fault(struct kgsl_device *device)
-{
- adreno_set_gpu_fault(ADRENO_DEVICE(device), ADRENO_HARD_FAULT);
- adreno_dispatcher_schedule(device);
-}
-
-/**
- * adreno_dispatcher_pause() - stop the dispatcher
- * @adreno_dev: pointer to the adreno device structure
- *
- * Pause the dispather so it doesn't accept any new commands
- */
-void adreno_dispatcher_pause(struct adreno_device *adreno_dev)
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
-
- /*
- * This will probably get called while holding other mutexes so don't
- * take the dispatcher mutex. The biggest penalty is that another
- * command might be submitted while we are in here but thats okay
- * because whoever is waiting for the drain will just have another
- * command batch to wait for
- */
-
- dispatcher->state = ADRENO_DISPATCHER_PAUSE;
-}
-
-/**
- * adreno_dispatcher_start() - activate the dispatcher
- * @adreno_dev: pointer to the adreno device structure
- *
- * Set the disaptcher active and start the loop once to get things going
- */
-void adreno_dispatcher_start(struct adreno_device *adreno_dev)
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
-
- dispatcher->state = ADRENO_DISPATCHER_ACTIVE;
-
- /* Schedule the work loop to get things going */
- adreno_dispatcher_schedule(&adreno_dev->dev);
-}
-
-/**
- * adreno_dispatcher_stop() - stop the dispatcher
- * @adreno_dev: pointer to the adreno device structure
- *
- * Stop the dispatcher and close all the timers
- */
-void adreno_dispatcher_stop(struct adreno_device *adreno_dev)
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
-
- del_timer_sync(&dispatcher->timer);
- del_timer_sync(&dispatcher->fault_timer);
-
- dispatcher->state = ADRENO_DISPATCHER_PAUSE;
-}
-
-/**
- * adreno_dispatcher_close() - close the dispatcher
- * @adreno_dev: pointer to the adreno device structure
- *
- * Close the dispatcher and free all the oustanding commands and memory
- */
-void adreno_dispatcher_close(struct adreno_device *adreno_dev)
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
-
- mutex_lock(&dispatcher->mutex);
- del_timer_sync(&dispatcher->timer);
- del_timer_sync(&dispatcher->fault_timer);
-
- while (dispatcher->head != dispatcher->tail) {
- kgsl_cmdbatch_destroy(dispatcher->cmdqueue[dispatcher->head]);
- dispatcher->head = (dispatcher->head + 1)
- % ADRENO_DISPATCH_CMDQUEUE_SIZE;
- }
-
- mutex_unlock(&dispatcher->mutex);
-
- kobject_put(&dispatcher->kobj);
-}
-
-struct dispatcher_attribute {
- struct attribute attr;
- ssize_t (*show)(struct adreno_dispatcher *,
- struct dispatcher_attribute *, char *);
- ssize_t (*store)(struct adreno_dispatcher *,
- struct dispatcher_attribute *, const char *buf,
- size_t count);
- unsigned int max;
- unsigned int *value;
-};
-
-#define DISPATCHER_UINT_ATTR(_name, _mode, _max, _value) \
- struct dispatcher_attribute dispatcher_attr_##_name = { \
- .attr = { .name = __stringify(_name), .mode = _mode }, \
- .show = _show_uint, \
- .store = _store_uint, \
- .max = _max, \
- .value = &(_value), \
- }
-
-#define to_dispatcher_attr(_a) \
- container_of((_a), struct dispatcher_attribute, attr)
-#define to_dispatcher(k) container_of(k, struct adreno_dispatcher, kobj)
-
-static ssize_t _store_uint(struct adreno_dispatcher *dispatcher,
- struct dispatcher_attribute *attr,
- const char *buf, size_t size)
-{
- unsigned long val;
- int ret = kstrtoul(buf, 0, &val);
-
- if (ret)
- return ret;
-
- if (!val || (attr->max && (val > attr->max)))
- return -EINVAL;
-
- *((unsigned int *) attr->value) = val;
- return size;
-}
-
-static ssize_t _show_uint(struct adreno_dispatcher *dispatcher,
- struct dispatcher_attribute *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n",
- *((unsigned int *) attr->value));
-}
-
-static DISPATCHER_UINT_ATTR(inflight, 0644, ADRENO_DISPATCH_CMDQUEUE_SIZE,
- _dispatcher_inflight);
-/*
- * Our code that "puts back" a command from the context is much cleaner
- * if we are sure that there will always be enough room in the
- * ringbuffer so restrict the maximum size of the context queue to
- * ADRENO_CONTEXT_CMDQUEUE_SIZE - 1
- */
-static DISPATCHER_UINT_ATTR(context_cmdqueue_size, 0644,
- ADRENO_CONTEXT_CMDQUEUE_SIZE - 1, _context_cmdqueue_size);
-static DISPATCHER_UINT_ATTR(context_burst_count, 0644, 0,
- _context_cmdbatch_burst);
-static DISPATCHER_UINT_ATTR(cmdbatch_timeout, 0644, 0, _cmdbatch_timeout);
-static DISPATCHER_UINT_ATTR(context_queue_wait, 0644, 0, _context_queue_wait);
-static DISPATCHER_UINT_ATTR(fault_detect_interval, 0644, 0,
- _fault_timer_interval);
-
-static struct attribute *dispatcher_attrs[] = {
- &dispatcher_attr_inflight.attr,
- &dispatcher_attr_context_cmdqueue_size.attr,
- &dispatcher_attr_context_burst_count.attr,
- &dispatcher_attr_cmdbatch_timeout.attr,
- &dispatcher_attr_context_queue_wait.attr,
- &dispatcher_attr_fault_detect_interval.attr,
- NULL,
-};
-
-static ssize_t dispatcher_sysfs_show(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- struct adreno_dispatcher *dispatcher = to_dispatcher(kobj);
- struct dispatcher_attribute *pattr = to_dispatcher_attr(attr);
- ssize_t ret = -EIO;
-
- if (pattr->show)
- ret = pattr->show(dispatcher, pattr, buf);
-
- return ret;
-}
-
-static ssize_t dispatcher_sysfs_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buf, size_t count)
-{
- struct adreno_dispatcher *dispatcher = to_dispatcher(kobj);
- struct dispatcher_attribute *pattr = to_dispatcher_attr(attr);
- ssize_t ret = -EIO;
-
- if (pattr->store)
- ret = pattr->store(dispatcher, pattr, buf, count);
-
- return ret;
-}
-
-static void dispatcher_sysfs_release(struct kobject *kobj)
-{
-}
-
-static const struct sysfs_ops dispatcher_sysfs_ops = {
- .show = dispatcher_sysfs_show,
- .store = dispatcher_sysfs_store
-};
-
-static struct kobj_type ktype_dispatcher = {
- .sysfs_ops = &dispatcher_sysfs_ops,
- .default_attrs = dispatcher_attrs,
- .release = dispatcher_sysfs_release
-};
-
-/**
- * adreno_dispatcher_init() - Initialize the dispatcher
- * @adreno_dev: pointer to the adreno device structure
- *
- * Initialize the dispatcher
- */
-int adreno_dispatcher_init(struct adreno_device *adreno_dev)
-{
- struct kgsl_device *device = &adreno_dev->dev;
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- int ret;
-
- memset(dispatcher, 0, sizeof(*dispatcher));
-
- mutex_init(&dispatcher->mutex);
-
- setup_timer(&dispatcher->timer, adreno_dispatcher_timer,
- (unsigned long) adreno_dev);
-
- setup_timer(&dispatcher->fault_timer, adreno_dispatcher_fault_timer,
- (unsigned long) adreno_dev);
-
- INIT_WORK(&dispatcher->work, adreno_dispatcher_work);
-
- plist_head_init(&dispatcher->pending);
- spin_lock_init(&dispatcher->plist_lock);
-
- dispatcher->state = ADRENO_DISPATCHER_ACTIVE;
-
- ret = kobject_init_and_add(&dispatcher->kobj, &ktype_dispatcher,
- &device->dev->kobj, "dispatch");
-
- return ret;
-}
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index 907d41f..0af4c12e 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -13,12 +13,10 @@
#include <linux/slab.h>
#include <linux/msm_kgsl.h>
-#include <linux/sched.h>
#include "kgsl.h"
#include "kgsl_sharedmem.h"
#include "adreno.h"
-#include "adreno_trace.h"
#define KGSL_INIT_REFTIMESTAMP 0x7FFFFFFF
@@ -134,247 +132,6 @@
*incmd = cmd;
}
-static void wait_callback(struct kgsl_device *device, void *priv, u32 id,
- u32 timestamp, u32 type)
-{
- struct adreno_context *drawctxt = priv;
- wake_up_interruptible_all(&drawctxt->waiting);
-}
-
-#define adreno_wait_event_interruptible_timeout(wq, condition, timeout, io) \
-({ \
- long __ret = timeout; \
- if (io) \
- __wait_io_event_interruptible_timeout(wq, condition, __ret); \
- else \
- __wait_event_interruptible_timeout(wq, condition, __ret); \
- __ret; \
-})
-
-#define adreno_wait_event_interruptible(wq, condition, io) \
-({ \
- long __ret; \
- if (io) \
- __wait_io_event_interruptible(wq, condition, __ret); \
- else \
- __wait_event_interruptible(wq, condition, __ret); \
- __ret; \
-})
-
-static int _check_context_timestamp(struct kgsl_device *device,
- struct adreno_context *drawctxt, unsigned int timestamp)
-{
- int ret = 0;
-
- /* Bail if the drawctxt has been invalidated or destroyed */
- if (kgsl_context_detached(&drawctxt->base) ||
- drawctxt->state != ADRENO_CONTEXT_STATE_ACTIVE)
- return 1;
-
- mutex_lock(&device->mutex);
- ret = kgsl_check_timestamp(device, &drawctxt->base, timestamp);
- mutex_unlock(&device->mutex);
-
- return ret;
-}
-
-/**
- * adreno_drawctxt_wait() - sleep until a timestamp expires
- * @adreno_dev: pointer to the adreno_device struct
- * @drawctxt: Pointer to the draw context to sleep for
- * @timetamp: Timestamp to wait on
- * @timeout: Number of jiffies to wait (0 for infinite)
- *
- * Register an event to wait for a timestamp on a context and sleep until it
- * has past. Returns < 0 on error, -ETIMEDOUT if the timeout expires or 0
- * on success
- */
-int adreno_drawctxt_wait(struct adreno_device *adreno_dev,
- struct kgsl_context *context,
- uint32_t timestamp, unsigned int timeout)
-{
- static unsigned int io_cnt;
- struct kgsl_device *device = &adreno_dev->dev;
- struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
- int ret, io;
-
- if (kgsl_context_detached(context))
- return -EINVAL;
-
- if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
- return -EDEADLK;
-
- /* Needs to hold the device mutex */
- BUG_ON(!mutex_is_locked(&device->mutex));
-
- trace_adreno_drawctxt_wait_start(context->id, timestamp);
-
- ret = kgsl_add_event(device, context->id, timestamp,
- wait_callback, drawctxt, NULL);
- if (ret)
- goto done;
-
- /*
- * For proper power accounting sometimes we need to call
- * io_wait_interruptible_timeout and sometimes we need to call
- * plain old wait_interruptible_timeout. We call the regular
- * timeout N times out of 100, where N is a number specified by
- * the current power level
- */
-
- io_cnt = (io_cnt + 1) % 100;
- io = (io_cnt < pwr->pwrlevels[pwr->active_pwrlevel].io_fraction)
- ? 0 : 1;
-
- mutex_unlock(&device->mutex);
-
- if (timeout) {
- ret = (int) adreno_wait_event_interruptible_timeout(
- drawctxt->waiting,
- _check_context_timestamp(device, drawctxt, timestamp),
- msecs_to_jiffies(timeout), io);
-
- if (ret == 0)
- ret = -ETIMEDOUT;
- else if (ret > 0)
- ret = 0;
- } else {
- ret = (int) adreno_wait_event_interruptible(drawctxt->waiting,
- _check_context_timestamp(device, drawctxt, timestamp),
- io);
- }
-
- mutex_lock(&device->mutex);
-
- /* -EDEADLK if the context was invalidated while we were waiting */
- if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
- ret = -EDEADLK;
-
-
- /* Return -EINVAL if the context was detached while we were waiting */
- if (kgsl_context_detached(context))
- ret = -EINVAL;
-
-done:
- trace_adreno_drawctxt_wait_done(context->id, timestamp, ret);
- return ret;
-}
-
-static void global_wait_callback(struct kgsl_device *device, void *priv, u32 id,
- u32 timestamp, u32 type)
-{
- struct adreno_context *drawctxt = priv;
-
- wake_up_interruptible_all(&drawctxt->waiting);
- kgsl_context_put(&drawctxt->base);
-}
-
-static int _check_global_timestamp(struct kgsl_device *device,
- unsigned int timestamp)
-{
- int ret;
-
- mutex_lock(&device->mutex);
- ret = kgsl_check_timestamp(device, NULL, timestamp);
- mutex_unlock(&device->mutex);
-
- return ret;
-}
-
-int adreno_drawctxt_wait_global(struct adreno_device *adreno_dev,
- struct kgsl_context *context,
- uint32_t timestamp, unsigned int timeout)
-{
- struct kgsl_device *device = &adreno_dev->dev;
- struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
- int ret;
-
- /* Needs to hold the device mutex */
- BUG_ON(!mutex_is_locked(&device->mutex));
-
- _kgsl_context_get(context);
-
- trace_adreno_drawctxt_wait_start(KGSL_MEMSTORE_GLOBAL, timestamp);
-
- ret = kgsl_add_event(device, KGSL_MEMSTORE_GLOBAL, timestamp,
- global_wait_callback, drawctxt, NULL);
- if (ret) {
- kgsl_context_put(context);
- goto done;
- }
-
- mutex_unlock(&device->mutex);
-
- if (timeout) {
- ret = (int) wait_event_interruptible_timeout(drawctxt->waiting,
- _check_global_timestamp(device, timestamp),
- msecs_to_jiffies(timeout));
-
- if (ret == 0)
- ret = -ETIMEDOUT;
- else if (ret > 0)
- ret = 0;
- } else {
- ret = (int) wait_event_interruptible(drawctxt->waiting,
- _check_global_timestamp(device, timestamp));
- }
-
- mutex_lock(&device->mutex);
-
- if (ret)
- kgsl_cancel_events_timestamp(device, NULL, timestamp);
-
-done:
- trace_adreno_drawctxt_wait_done(KGSL_MEMSTORE_GLOBAL, timestamp, ret);
- return ret;
-}
-
-/**
- * adreno_drawctxt_invalidate() - Invalidate an adreno draw context
- * @device: Pointer to the KGSL device structure for the GPU
- * @context: Pointer to the KGSL context structure
- *
- * Invalidate the context and remove all queued commands and cancel any pending
- * waiters
- */
-void adreno_drawctxt_invalidate(struct kgsl_device *device,
- struct kgsl_context *context)
-{
- struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
-
- trace_adreno_drawctxt_invalidate(drawctxt);
-
- drawctxt->state = ADRENO_CONTEXT_STATE_INVALID;
-
- /* Clear the pending queue */
- mutex_lock(&drawctxt->mutex);
-
- while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
- struct kgsl_cmdbatch *cmdbatch =
- drawctxt->cmdqueue[drawctxt->cmdqueue_head];
-
- drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) %
- ADRENO_CONTEXT_CMDQUEUE_SIZE;
-
- mutex_unlock(&drawctxt->mutex);
-
- mutex_lock(&device->mutex);
- kgsl_cancel_events_timestamp(device, context,
- cmdbatch->timestamp);
- mutex_unlock(&device->mutex);
-
- kgsl_cmdbatch_destroy(cmdbatch);
- mutex_lock(&drawctxt->mutex);
- }
-
- mutex_unlock(&drawctxt->mutex);
-
- /* Give the bad news to everybody waiting around */
- wake_up_interruptible_all(&drawctxt->waiting);
- wake_up_interruptible_all(&drawctxt->wq);
-}
-
/**
* adreno_drawctxt_create - create a new adreno draw context
* @dev_priv: the owner of the context
@@ -392,7 +149,6 @@
int ret;
drawctxt = kzalloc(sizeof(struct adreno_context), GFP_KERNEL);
-
if (drawctxt == NULL)
return ERR_PTR(-ENOMEM);
@@ -412,30 +168,22 @@
KGSL_CONTEXT_NO_FAULT_TOLERANCE |
KGSL_CONTEXT_TYPE_MASK);
- /* Always enable per-context timestamps */
- *flags |= KGSL_CONTEXT_PER_CONTEXT_TS;
- drawctxt->flags |= CTXT_FLAGS_PER_CONTEXT_TS;
-
if (*flags & KGSL_CONTEXT_PREAMBLE)
drawctxt->flags |= CTXT_FLAGS_PREAMBLE;
if (*flags & KGSL_CONTEXT_NO_GMEM_ALLOC)
drawctxt->flags |= CTXT_FLAGS_NOGMEMALLOC;
- if (*flags & KGSL_CONTEXT_USER_GENERATED_TS)
+ if (*flags & KGSL_CONTEXT_PER_CONTEXT_TS)
+ drawctxt->flags |= CTXT_FLAGS_PER_CONTEXT_TS;
+
+ if (*flags & KGSL_CONTEXT_USER_GENERATED_TS) {
+ if (!(*flags & KGSL_CONTEXT_PER_CONTEXT_TS)) {
+ ret = -EINVAL;
+ goto err;
+ }
drawctxt->flags |= CTXT_FLAGS_USER_GENERATED_TS;
-
- mutex_init(&drawctxt->mutex);
- init_waitqueue_head(&drawctxt->wq);
- init_waitqueue_head(&drawctxt->waiting);
-
- /*
- * Set up the plist node for the dispatcher. For now all contexts have
- * the same priority, but later the priority will be set at create time
- * by the user
- */
-
- plist_node_init(&drawctxt->pending, ADRENO_CONTEXT_DEFAULT_PRIORITY);
+ }
if (*flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
drawctxt->flags |= CTXT_FLAGS_NO_FAULT_TOLERANCE;
@@ -448,6 +196,12 @@
goto err;
kgsl_sharedmem_writel(device, &device->memstore,
+ KGSL_MEMSTORE_OFFSET(drawctxt->base.id, ref_wait_ts),
+ KGSL_INIT_REFTIMESTAMP);
+ kgsl_sharedmem_writel(device, &device->memstore,
+ KGSL_MEMSTORE_OFFSET(drawctxt->base.id, ts_cmp_enable),
+ 0);
+ kgsl_sharedmem_writel(device, &device->memstore,
KGSL_MEMSTORE_OFFSET(drawctxt->base.id, soptimestamp),
0);
kgsl_sharedmem_writel(device, &device->memstore,
@@ -461,39 +215,22 @@
}
/**
- * adreno_drawctxt_sched() - Schedule a previously blocked context
- * @device: pointer to a KGSL device
- * @drawctxt: drawctxt to rechedule
- *
- * This function is called by the core when it knows that a previously blocked
- * context has been unblocked. The default adreno response is to reschedule the
- * context on the dispatcher
- */
-void adreno_drawctxt_sched(struct kgsl_device *device,
- struct kgsl_context *context)
-{
- adreno_dispatcher_queue_context(device, ADRENO_CONTEXT(context));
-}
-
-/**
* adreno_drawctxt_detach(): detach a context from the GPU
* @context: Generic KGSL context container for the context
*
*/
-int adreno_drawctxt_detach(struct kgsl_context *context)
+void adreno_drawctxt_detach(struct kgsl_context *context)
{
struct kgsl_device *device;
struct adreno_device *adreno_dev;
struct adreno_context *drawctxt;
- int ret;
if (context == NULL)
- return 0;
+ return;
device = context->device;
adreno_dev = ADRENO_DEVICE(device);
drawctxt = ADRENO_CONTEXT(context);
-
/* deactivate context */
if (adreno_dev->drawctxt_active == drawctxt) {
/* no need to save GMEM or shader, the context is
@@ -509,39 +246,13 @@
adreno_drawctxt_switch(adreno_dev, NULL, 0);
}
- mutex_lock(&drawctxt->mutex);
-
- while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
- struct kgsl_cmdbatch *cmdbatch =
- drawctxt->cmdqueue[drawctxt->cmdqueue_head];
-
- drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) %
- ADRENO_CONTEXT_CMDQUEUE_SIZE;
-
- mutex_unlock(&drawctxt->mutex);
-
- /*
- * Don't hold the drawctxt mutex while the cmdbatch is being
- * destroyed because the cmdbatch destroy takes the device
- * mutex and the world falls in on itself
- */
-
- kgsl_cmdbatch_destroy(cmdbatch);
- mutex_lock(&drawctxt->mutex);
- }
-
- mutex_unlock(&drawctxt->mutex);
-
- /* Wait for the last global timestamp to pass before continuing */
- ret = adreno_drawctxt_wait_global(adreno_dev, context,
- drawctxt->internal_timestamp, 10 * 1000);
+ if (device->state != KGSL_STATE_HUNG)
+ adreno_idle(device);
adreno_profile_process_results(device);
kgsl_sharedmem_free(&drawctxt->gpustate);
kgsl_sharedmem_free(&drawctxt->context_gmem_shadow.gmemshadow);
-
- return ret;
}
@@ -585,12 +296,11 @@
* Switch the current draw context
*/
-int adreno_drawctxt_switch(struct adreno_device *adreno_dev,
+void adreno_drawctxt_switch(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt,
unsigned int flags)
{
struct kgsl_device *device = &adreno_dev->dev;
- int ret = 0;
if (drawctxt) {
if (flags & KGSL_CONTEXT_SAVE_GMEM)
@@ -606,24 +316,18 @@
if (adreno_dev->drawctxt_active == drawctxt) {
if (adreno_dev->gpudev->ctxt_draw_workaround &&
adreno_is_a225(adreno_dev))
- ret = adreno_dev->gpudev->ctxt_draw_workaround(
+ adreno_dev->gpudev->ctxt_draw_workaround(
adreno_dev, drawctxt);
- return ret;
+ return;
}
- trace_adreno_drawctxt_switch(adreno_dev->drawctxt_active,
- drawctxt, flags);
+ KGSL_CTXT_INFO(device, "from %d to %d flags %d\n",
+ adreno_dev->drawctxt_active ?
+ adreno_dev->drawctxt_active->base.id : 0,
+ drawctxt ? drawctxt->base.id : 0, flags);
/* Save the old context */
- ret = adreno_dev->gpudev->ctxt_save(adreno_dev,
- adreno_dev->drawctxt_active);
-
- if (ret) {
- KGSL_DRV_ERR(device,
- "Error in GPU context %d save: %d\n",
- adreno_dev->drawctxt_active->base.id, ret);
- return ret;
- }
+ adreno_dev->gpudev->ctxt_save(adreno_dev, adreno_dev->drawctxt_active);
/* Put the old instance of the active drawctxt */
if (adreno_dev->drawctxt_active) {
@@ -636,14 +340,6 @@
_kgsl_context_get(&drawctxt->base);
/* Set the new context */
- ret = adreno_dev->gpudev->ctxt_restore(adreno_dev, drawctxt);
- if (ret) {
- KGSL_DRV_ERR(device,
- "Error in GPU context %d restore: %d\n",
- drawctxt->base.id, ret);
- return ret;
- }
-
+ adreno_dev->gpudev->ctxt_restore(adreno_dev, drawctxt);
adreno_dev->drawctxt_active = drawctxt;
- return 0;
}
diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h
index 5c12676..3088099 100644
--- a/drivers/gpu/msm/adreno_drawctxt.h
+++ b/drivers/gpu/msm/adreno_drawctxt.h
@@ -54,8 +54,6 @@
#define CTXT_FLAGS_SKIP_EOF BIT(15)
/* Context no fault tolerance */
#define CTXT_FLAGS_NO_FAULT_TOLERANCE BIT(16)
-/* Force the preamble for the next submission */
-#define CTXT_FLAGS_FORCE_PREAMBLE BIT(17)
/* Symbolic table for the adreno draw context type */
#define ADRENO_DRAWCTXT_TYPES \
@@ -71,13 +69,6 @@
const char *str;
};
-#define ADRENO_CONTEXT_CMDQUEUE_SIZE 128
-
-#define ADRENO_CONTEXT_DEFAULT_PRIORITY 1
-
-#define ADRENO_CONTEXT_STATE_ACTIVE 0
-#define ADRENO_CONTEXT_STATE_INVALID 1
-
struct kgsl_device;
struct adreno_device;
struct kgsl_device_private;
@@ -108,58 +99,18 @@
struct kgsl_memdesc quad_vertices_restore;
};
-/**
- * struct adreno_context - Adreno GPU draw context
- * @id: Unique integer ID of the context
- * @timestamp: Last issued context-specific timestamp
- * @internal_timestamp: Global timestamp of the last issued command
- * @state: Current state of the context
- * @flags: Bitfield controlling behavior of the context
- * @type: Context type (GL, CL, RS)
- * @mutex: Mutex to protect the cmdqueue
- * @pagetable: Pointer to the GPU pagetable for the context
- * @gpustate: Pointer to the GPU scratch memory for context save/restore
- * @reg_restore: Command buffer for restoring context registers
- * @shader_save: Command buffer for saving shaders
- * @shader_restore: Command buffer to restore shaders
- * @context_gmem_shadow: GMEM shadow structure for save/restore
- * @reg_save: A2XX command buffer to save context registers
- * @shader_fixup: A2XX command buffer to "fix" shaders on restore
- * @chicken_restore: A2XX command buffer to "fix" register restore
- * @bin_base_offset: Saved value of the A2XX BIN_BASE_OFFSET register
- * @regconstant_save: A3XX command buffer to save some registers
- * @constant_retore: A3XX command buffer to restore some registers
- * @hslqcontrol_restore: A3XX command buffer to restore HSLSQ registers
- * @save_fixup: A3XX command buffer to "fix" register save
- * @restore_fixup: A3XX cmmand buffer to restore register save fixes
- * @shader_load_commands: A3XX GPU memory descriptor for shader load IB
- * @shader_save_commands: A3XX GPU memory descriptor for shader save IB
- * @constantr_save_commands: A3XX GPU memory descriptor for constant save IB
- * @constant_load_commands: A3XX GPU memory descriptor for constant load IB
- * @cond_execs: A3XX GPU memory descriptor for conditional exec IB
- * @hlsq_restore_commands: A3XX GPU memory descriptor for HLSQ restore IB
- * @cmdqueue: Queue of command batches waiting to be dispatched for this context
- * @cmdqueue_head: Head of the cmdqueue queue
- * @cmdqueue_tail: Tail of the cmdqueue queue
- * @pending: Priority list node for the dispatcher list of pending contexts
- * @wq: Workqueue structure for contexts to sleep pending room in the queue
- * @waiting: Workqueue structure for contexts waiting for a timestamp or event
- * @queued: Number of commands queued in the cmdqueue
- */
struct adreno_context {
struct kgsl_context base;
unsigned int ib_gpu_time_used;
unsigned int timestamp;
- unsigned int internal_timestamp;
- int state;
uint32_t flags;
unsigned int type;
- struct mutex mutex;
struct kgsl_memdesc gpustate;
unsigned int reg_restore[3];
unsigned int shader_save[3];
unsigned int shader_restore[3];
+ /* Information of the GMEM shadow that is created in context create */
struct gmem_shadow_t context_gmem_shadow;
/* A2XX specific items */
@@ -180,44 +131,23 @@
struct kgsl_memdesc constant_load_commands[3];
struct kgsl_memdesc cond_execs[4];
struct kgsl_memdesc hlsqcontrol_restore_commands[1];
-
- /* Dispatcher */
- struct kgsl_cmdbatch *cmdqueue[ADRENO_CONTEXT_CMDQUEUE_SIZE];
- int cmdqueue_head;
- int cmdqueue_tail;
-
- struct plist_node pending;
- wait_queue_head_t wq;
- wait_queue_head_t waiting;
-
- int queued;
};
struct kgsl_context *adreno_drawctxt_create(struct kgsl_device_private *,
uint32_t *flags);
-int adreno_drawctxt_detach(struct kgsl_context *context);
+void adreno_drawctxt_detach(struct kgsl_context *context);
void adreno_drawctxt_destroy(struct kgsl_context *context);
-void adreno_drawctxt_sched(struct kgsl_device *device,
- struct kgsl_context *context);
-
-int adreno_drawctxt_switch(struct adreno_device *adreno_dev,
+void adreno_drawctxt_switch(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt,
unsigned int flags);
void adreno_drawctxt_set_bin_base_offset(struct kgsl_device *device,
struct kgsl_context *context,
unsigned int offset);
-int adreno_drawctxt_wait(struct adreno_device *adreno_dev,
- struct kgsl_context *context,
- uint32_t timestamp, unsigned int timeout);
-
-void adreno_drawctxt_invalidate(struct kgsl_device *device,
- struct kgsl_context *context);
-
/* GPU context switch helper functions */
void build_quad_vtxbuff(struct adreno_context *drawctxt,
diff --git a/drivers/gpu/msm/adreno_postmortem.c b/drivers/gpu/msm/adreno_postmortem.c
index 8fb2830..32dbd51 100644
--- a/drivers/gpu/msm/adreno_postmortem.c
+++ b/drivers/gpu/msm/adreno_postmortem.c
@@ -22,7 +22,6 @@
#include "adreno_ringbuffer.h"
#include "kgsl_cffdump.h"
#include "kgsl_pwrctrl.h"
-#include "adreno_trace.h"
#include "a2xx_reg.h"
#include "a3xx_reg.h"
@@ -396,8 +395,8 @@
int adreno_dump(struct kgsl_device *device, int manual)
{
- unsigned int cp_ib1_base;
- unsigned int cp_ib2_base;
+ unsigned int cp_ib1_base, cp_ib1_bufsz;
+ unsigned int cp_ib2_base, cp_ib2_bufsz;
phys_addr_t pt_base, cur_pt_base;
unsigned int cp_rb_base, cp_rb_ctrl, rb_count;
unsigned int cp_rb_wptr, cp_rb_rptr;
@@ -410,6 +409,7 @@
unsigned int ts_processed = 0xdeaddead;
struct kgsl_context *context;
unsigned int context_id;
+ unsigned int rbbm_status;
static struct ib_list ib_list;
@@ -419,10 +419,16 @@
mb();
- msm_clk_dump_debug_info();
+ if (device->pm_dump_enable) {
+ msm_clk_dump_debug_info();
- if (adreno_dev->gpudev->postmortem_dump)
- adreno_dev->gpudev->postmortem_dump(adreno_dev);
+ if (adreno_dev->gpudev->postmortem_dump)
+ adreno_dev->gpudev->postmortem_dump(adreno_dev);
+ }
+
+ kgsl_regread(device,
+ adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS),
+ &rbbm_status);
pt_base = kgsl_mmu_get_current_ptbase(&device->mmu);
cur_pt_base = pt_base;
@@ -444,8 +450,26 @@
adreno_getreg(adreno_dev, ADRENO_REG_CP_IB1_BASE),
&cp_ib1_base);
kgsl_regread(device,
+ adreno_getreg(adreno_dev, ADRENO_REG_CP_IB1_BUFSZ),
+ &cp_ib1_bufsz);
+ kgsl_regread(device,
adreno_getreg(adreno_dev, ADRENO_REG_CP_IB2_BASE),
&cp_ib2_base);
+ kgsl_regread(device,
+ adreno_getreg(adreno_dev, ADRENO_REG_CP_IB2_BUFSZ),
+ &cp_ib2_bufsz);
+
+ /* If postmortem dump is not enabled, dump minimal set and return */
+ if (!device->pm_dump_enable) {
+
+ KGSL_LOG_DUMP(device,
+ "STATUS %08X | IB1:%08X/%08X | IB2: %08X/%08X"
+ " | RPTR: %04X | WPTR: %04X\n",
+ rbbm_status, cp_ib1_base, cp_ib1_bufsz, cp_ib2_base,
+ cp_ib2_bufsz, cp_rb_rptr, cp_rb_wptr);
+
+ return 0;
+ }
kgsl_sharedmem_readl(&device->memstore,
(unsigned int *) &context_id,
@@ -620,9 +644,5 @@
error_vfree:
vfree(rb_copy);
end:
- /* Restart the dispatcher after a manually triggered dump */
- if (manual)
- adreno_dispatcher_start(adreno_dev);
-
return result;
}
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 1ad90fd..b8cf21f 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -67,8 +67,11 @@
unsigned long wait_time;
unsigned long wait_timeout = msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
unsigned long wait_time_part;
+ unsigned int prev_reg_val[FT_DETECT_REGS_COUNT];
unsigned int rptr;
+ memset(prev_reg_val, 0, sizeof(prev_reg_val));
+
/* if wptr ahead, fill the remaining with NOPs */
if (wptr_ahead) {
/* -1 for header */
@@ -102,13 +105,43 @@
if (freecmds == 0 || freecmds > numcmds)
break;
+ /* Dont wait for timeout, detect hang faster.
+ */
+ if (time_after(jiffies, wait_time_part)) {
+ wait_time_part = jiffies +
+ msecs_to_jiffies(KGSL_TIMEOUT_PART);
+ if ((adreno_ft_detect(rb->device,
+ prev_reg_val))){
+ KGSL_DRV_ERR(rb->device,
+ "Hang detected while waiting for freespace in"
+ "ringbuffer rptr: 0x%x, wptr: 0x%x\n",
+ rptr, rb->wptr);
+ goto err;
+ }
+ }
+
if (time_after(jiffies, wait_time)) {
KGSL_DRV_ERR(rb->device,
"Timed out while waiting for freespace in ringbuffer "
"rptr: 0x%x, wptr: 0x%x\n", rptr, rb->wptr);
- return -ETIMEDOUT;
+ goto err;
}
+ continue;
+
+err:
+ if (!adreno_dump_and_exec_ft(rb->device)) {
+ if (context && context->flags & CTXT_FLAGS_GPU_HANG) {
+ KGSL_CTXT_WARN(rb->device,
+ "Context %p caused a gpu hang. Will not accept commands for context %d\n",
+ context, context->base.id);
+ return -EDEADLK;
+ }
+ wait_time = jiffies + wait_timeout;
+ } else {
+ /* GPU is hung and fault tolerance failed */
+ BUG();
+ }
}
return 0;
}
@@ -147,8 +180,7 @@
if (!ret) {
ptr = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
rb->wptr += numcmds;
- } else
- ptr = ERR_PTR(ret);
+ }
return ptr;
}
@@ -315,6 +347,7 @@
int _ringbuffer_start_common(struct adreno_ringbuffer *rb)
{
int status;
+ /*cp_rb_cntl_u cp_rb_cntl; */
union reg_cp_rb_cntl cp_rb_cntl;
unsigned int rb_cntl;
struct kgsl_device *device = rb->device;
@@ -535,17 +568,18 @@
static int
adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
- struct adreno_context *drawctxt,
+ struct adreno_context *context,
unsigned int flags, unsigned int *cmds,
- int sizedwords, uint32_t timestamp)
+ int sizedwords)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
unsigned int *ringcmds;
unsigned int total_sizedwords = sizedwords;
unsigned int i;
unsigned int rcmd_gpu;
- unsigned int context_id;
+ unsigned int context_id = KGSL_MEMSTORE_GLOBAL;
unsigned int gpuaddr = rb->device->memstore.gpuaddr;
+ unsigned int timestamp;
bool profile_ready;
/*
@@ -560,19 +594,15 @@
adreno_profile_assignments_ready(&adreno_dev->profile) &&
!(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE);
- /* The global timestamp always needs to be incremented */
- rb->global_ts++;
-
- /* If this is a internal IB, use the global timestamp for it */
- if (!drawctxt || (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
- timestamp = rb->global_ts;
- context_id = KGSL_MEMSTORE_GLOBAL;
- } else {
- context_id = drawctxt->base.id;
- }
-
- if (drawctxt)
- drawctxt->internal_timestamp = rb->global_ts;
+ /*
+ * if the context was not created with per context timestamp
+ * support, we must use the global timestamp since issueibcmds
+ * will be returning that one, or if an internal issue then
+ * use global timestamp.
+ */
+ if ((context && (context->flags & CTXT_FLAGS_PER_CONTEXT_TS)) &&
+ !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))
+ context_id = context->base.id;
/* reserve space to temporarily turn off protected mode
* error checking if needed
@@ -583,8 +613,13 @@
/* internal ib command identifier for the ringbuffer */
total_sizedwords += (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) ? 2 : 0;
- /* Add two dwords for the CP_INTERRUPT */
- total_sizedwords += drawctxt ? 2 : 0;
+ /* Add CP_COND_EXEC commands to generate CP_INTERRUPT */
+ total_sizedwords += context ? 13 : 0;
+
+ if ((context) && (context->flags & CTXT_FLAGS_PER_CONTEXT_TS) &&
+ (flags & (KGSL_CMD_FLAGS_INTERNAL_ISSUE |
+ KGSL_CMD_FLAGS_GET_INT)))
+ total_sizedwords += 2;
if (adreno_is_a3xx(adreno_dev))
total_sizedwords += 7;
@@ -592,31 +627,25 @@
if (adreno_is_a2xx(adreno_dev))
total_sizedwords += 2; /* CP_WAIT_FOR_IDLE */
+ total_sizedwords += 2; /* scratchpad ts for fault tolerance */
total_sizedwords += 3; /* sop timestamp */
total_sizedwords += 4; /* eop timestamp */
- if (adreno_is_a20x(adreno_dev))
- total_sizedwords += 2; /* CACHE_FLUSH */
-
- if (drawctxt) {
+ if (KGSL_MEMSTORE_GLOBAL != context_id)
total_sizedwords += 3; /* global timestamp without cache
* flush for non-zero context */
- }
if (adreno_is_a20x(adreno_dev))
total_sizedwords += 2; /* CACHE_FLUSH */
- if (flags & KGSL_CMD_FLAGS_WFI)
- total_sizedwords += 2; /* WFI */
+ if (flags & KGSL_CMD_FLAGS_EOF)
+ total_sizedwords += 2;
if (profile_ready)
total_sizedwords += 6; /* space for pre_ib and post_ib */
- ringcmds = adreno_ringbuffer_allocspace(rb, drawctxt, total_sizedwords);
-
- if (IS_ERR(ringcmds))
- return PTR_ERR(ringcmds);
- if (ringcmds == NULL)
+ ringcmds = adreno_ringbuffer_allocspace(rb, context, total_sizedwords);
+ if (!ringcmds)
return -ENOSPC;
rcmd_gpu = rb->buffer_desc.gpuaddr
@@ -633,9 +662,24 @@
/* Add any IB required for profiling if it is enabled */
if (profile_ready)
- adreno_profile_preib_processing(rb->device, drawctxt->base.id,
+ adreno_profile_preib_processing(rb->device, context->base.id,
&flags, &ringcmds, &rcmd_gpu);
+ /* always increment the global timestamp. once. */
+ rb->global_ts++;
+
+ if (KGSL_MEMSTORE_GLOBAL != context_id)
+ timestamp = context->timestamp;
+ else
+ timestamp = rb->global_ts;
+
+ /* scratchpad ts for fault tolerance */
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+ cp_type0_packet(adreno_getreg(adreno_dev,
+ ADRENO_REG_CP_TIMESTAMP), 1));
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+ rb->global_ts);
+
/* start-of-pipeline timestamp */
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
cp_type3_packet(CP_MEM_WRITE, 2));
@@ -705,7 +749,7 @@
KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp)));
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, timestamp);
- if (drawctxt) {
+ if (KGSL_MEMSTORE_GLOBAL != context_id) {
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
cp_type3_packet(CP_MEM_WRITE, 2));
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, (gpuaddr +
@@ -721,13 +765,56 @@
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, CACHE_FLUSH);
}
- if (drawctxt || (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
+ if (context) {
+ /* Conditional execution based on memory values */
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_COND_EXEC, 4));
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, (gpuaddr +
+ KGSL_MEMSTORE_OFFSET(
+ context_id, ts_cmp_enable)) >> 2);
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, (gpuaddr +
+ KGSL_MEMSTORE_OFFSET(
+ context_id, ref_wait_ts)) >> 2);
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, timestamp);
+ /* # of conditional command DWORDs */
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 8);
+
+ /* Clear the ts_cmp_enable for the context */
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_MEM_WRITE, 2));
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, gpuaddr +
+ KGSL_MEMSTORE_OFFSET(
+ context_id, ts_cmp_enable));
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0x0);
+
+ /* Clear the ts_cmp_enable for the global timestamp */
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_MEM_WRITE, 2));
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, gpuaddr +
+ KGSL_MEMSTORE_OFFSET(
+ KGSL_MEMSTORE_GLOBAL, ts_cmp_enable));
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0x0);
+
+ /* Trigger the interrupt */
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
cp_type3_packet(CP_INTERRUPT, 1));
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
CP_INT_CNTL__RB_INT_MASK);
}
+ /*
+ * If per context timestamps are enabled and any of the kgsl
+ * internal commands want INT to be generated trigger the INT
+ */
+ if ((context) && (context->flags & CTXT_FLAGS_PER_CONTEXT_TS) &&
+ (flags & (KGSL_CMD_FLAGS_INTERNAL_ISSUE |
+ KGSL_CMD_FLAGS_GET_INT))) {
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_INTERRUPT, 1));
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+ CP_INT_CNTL__RB_INT_MASK);
+ }
+
if (adreno_is_a3xx(adreno_dev)) {
/* Dummy set-constant to trigger context rollover */
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
@@ -737,10 +824,10 @@
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0);
}
- if (flags & KGSL_CMD_FLAGS_WFI) {
+ if (flags & KGSL_CMD_FLAGS_EOF) {
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, cp_nop_packet(1));
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
- cp_type3_packet(CP_WAIT_FOR_IDLE, 1));
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0x00000000);
+ KGSL_END_OF_FRAME_IDENTIFIER);
}
adreno_ringbuffer_submit(rb);
@@ -758,10 +845,14 @@
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+ if (device->state & KGSL_STATE_HUNG)
+ return kgsl_readtimestamp(device, KGSL_MEMSTORE_GLOBAL,
+ KGSL_TIMESTAMP_RETIRED);
+
flags |= KGSL_CMD_FLAGS_INTERNAL_ISSUE;
return adreno_ringbuffer_addcmds(rb, drawctxt, flags, cmds,
- sizedwords, 0);
+ sizedwords);
}
static bool _parse_ibs(struct kgsl_device_private *dev_priv, uint gpuaddr,
@@ -954,90 +1045,39 @@
return ret;
}
-/**
- * _ringbuffer_verify_ib() - parse an IB and verify that it is correct
- * @dev_priv: Pointer to the process struct
- * @ibdesc: Pointer to the IB descriptor
- *
- * This function only gets called if debugging is enabled - it walks the IB and
- * does additional level parsing and verification above and beyond what KGSL
- * core does
- */
-static inline bool _ringbuffer_verify_ib(struct kgsl_device_private *dev_priv,
- struct kgsl_ibdesc *ibdesc)
-{
- struct kgsl_device *device = dev_priv->device;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-
- /* Check that the size of the IBs is under the allowable limit */
- if (ibdesc->sizedwords == 0 || ibdesc->sizedwords > 0xFFFFF) {
- KGSL_DRV_ERR(device, "Invalid IB size 0x%X\n",
- ibdesc->sizedwords);
- return false;
- }
-
- if (unlikely(adreno_dev->ib_check_level >= 1) &&
- !_parse_ibs(dev_priv, ibdesc->gpuaddr, ibdesc->sizedwords)) {
- KGSL_DRV_ERR(device, "Could not verify the IBs\n");
- return false;
- }
-
- return true;
-}
-
int
adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
struct kgsl_context *context,
- struct kgsl_cmdbatch *cmdbatch,
- uint32_t *timestamp)
+ struct kgsl_ibdesc *ibdesc,
+ unsigned int numibs,
+ uint32_t *timestamp,
+ unsigned int flags)
{
struct kgsl_device *device = dev_priv->device;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
- int i, ret;
-
- if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
- return -EDEADLK;
-
- /* Verify the IBs before they get queued */
-
- for (i = 0; i < cmdbatch->ibcount; i++) {
- if (!_ringbuffer_verify_ib(dev_priv, &cmdbatch->ibdesc[i]))
- return -EINVAL;
- }
-
- /* Queue the command in the ringbuffer */
- ret = adreno_dispatcher_queue_cmd(adreno_dev, drawctxt, cmdbatch,
- timestamp);
-
- if (ret)
- KGSL_DRV_ERR(device,
- "adreno_dispatcher_queue_cmd returned %d\n", ret);
-
- return ret;
-}
-
-/* adreno_rindbuffer_submitcmd - submit userspace IBs to the GPU */
-int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
- struct kgsl_cmdbatch *cmdbatch)
-{
- struct kgsl_device *device = &adreno_dev->dev;
- struct kgsl_ibdesc *ibdesc;
- unsigned int numibs;
- unsigned int *link;
+ unsigned int *link = 0;
unsigned int *cmds;
unsigned int i;
- struct kgsl_context *context;
- struct adreno_context *drawctxt;
+ struct adreno_context *drawctxt = NULL;
unsigned int start_index = 0;
- int flags = KGSL_CMD_FLAGS_NONE;
int ret;
- context = cmdbatch->context;
+ if (device->state & KGSL_STATE_HUNG) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ if (!(adreno_dev->ringbuffer.flags & KGSL_FLAGS_STARTED) ||
+ context == NULL || ibdesc == 0 || numibs == 0) {
+ ret = -EINVAL;
+ goto done;
+ }
drawctxt = ADRENO_CONTEXT(context);
- ibdesc = cmdbatch->ibdesc;
- numibs = cmdbatch->ibcount;
+ if (drawctxt->flags & CTXT_FLAGS_GPU_HANG) {
+ ret = -EDEADLK;
+ goto done;
+ }
/* process any profiling results that are available into the log_buf */
adreno_profile_process_results(device);
@@ -1046,21 +1086,17 @@
commands are stored in the first node of the IB chain. We can skip that
if a context switch hasn't occured */
- if ((drawctxt->flags & CTXT_FLAGS_PREAMBLE) &&
- !test_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv) &&
- (adreno_dev->drawctxt_active == drawctxt))
+ if (drawctxt->flags & CTXT_FLAGS_PREAMBLE &&
+ adreno_dev->drawctxt_active == drawctxt)
start_index = 1;
- /*
- * In skip mode don't issue the draw IBs but keep all the other
- * accoutrements of a submision (including the interrupt) to keep
- * the accounting sane. Set start_index and numibs to 0 to just
- * generate the start and end markers and skip everything else
- */
-
- if (test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv)) {
- start_index = 0;
- numibs = 0;
+ if (drawctxt->flags & CTXT_FLAGS_SKIP_EOF) {
+ if (flags & KGSL_CMD_FLAGS_EOF)
+ drawctxt->flags &= ~CTXT_FLAGS_SKIP_EOF;
+ if (start_index)
+ numibs = 1;
+ else
+ numibs = 0;
}
cmds = link = kzalloc(sizeof(unsigned int) * (numibs * 3 + 4),
@@ -1081,17 +1117,19 @@
*cmds++ = ibdesc[0].sizedwords;
}
for (i = start_index; i < numibs; i++) {
+ if (unlikely(adreno_dev->ib_check_level >= 1 &&
+ !_parse_ibs(dev_priv, ibdesc[i].gpuaddr,
+ ibdesc[i].sizedwords))) {
+ ret = -EINVAL;
+ goto done;
+ }
- /*
- * Skip 0 sized IBs - these are presumed to have been removed
- * from consideration by the FT policy
- */
+ if (ibdesc[i].sizedwords == 0) {
+ ret = -EINVAL;
+ goto done;
+ }
- if (ibdesc[i].sizedwords == 0)
- *cmds++ = cp_nop_packet(2);
- else
- *cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
-
+ *cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
*cmds++ = ibdesc[i].gpuaddr;
*cmds++ = ibdesc[i].sizedwords;
}
@@ -1099,47 +1137,253 @@
*cmds++ = cp_nop_packet(1);
*cmds++ = KGSL_END_OF_IB_IDENTIFIER;
- ret = kgsl_setstate(&device->mmu, context->id,
+ kgsl_setstate(&device->mmu, context->id,
kgsl_mmu_pt_get_flags(device->mmu.hwpagetable,
device->id));
- if (ret)
- goto done;
+ adreno_drawctxt_switch(adreno_dev, drawctxt, flags);
- ret = adreno_drawctxt_switch(adreno_dev, drawctxt, cmdbatch->flags);
-
- /*
- * In the unlikely event of an error in the drawctxt switch,
- * treat it like a hang
- */
- if (ret)
- goto done;
-
- if (test_bit(CMDBATCH_FLAG_WFI, &cmdbatch->priv))
- flags = KGSL_CMD_FLAGS_WFI;
+ if (drawctxt->flags & CTXT_FLAGS_USER_GENERATED_TS) {
+ if (timestamp_cmp(drawctxt->timestamp, *timestamp) >= 0) {
+ KGSL_DRV_ERR(device,
+ "Invalid user generated ts <%d:0x%x>, "
+ "less than last issued ts <%d:0x%x>\n",
+ context->id, *timestamp, context->id,
+ drawctxt->timestamp);
+ return -ERANGE;
+ }
+ drawctxt->timestamp = *timestamp;
+ } else
+ drawctxt->timestamp++;
ret = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
drawctxt,
- flags,
- &link[0], (cmds - link),
- cmdbatch->timestamp);
-
-#ifdef CONFIG_MSM_KGSL_CFF_DUMP
+ (flags & KGSL_CMD_FLAGS_EOF),
+ &link[0], (cmds - link));
if (ret)
goto done;
+
+ if (drawctxt->flags & CTXT_FLAGS_PER_CONTEXT_TS)
+ *timestamp = drawctxt->timestamp;
+ else
+ *timestamp = adreno_dev->ringbuffer.global_ts;
+
+#ifdef CONFIG_MSM_KGSL_CFF_DUMP
/*
* insert wait for idle after every IB1
* this is conservative but works reliably and is ok
* even for performance simulations
*/
- ret = adreno_idle(device);
+ adreno_idle(device);
#endif
+ /*
+ * If context hung and recovered then return error so that the
+ * application may handle it
+ */
+ if (drawctxt->flags & CTXT_FLAGS_GPU_HANG_FT) {
+ drawctxt->flags &= ~CTXT_FLAGS_GPU_HANG_FT;
+ ret = -EPROTO;
+ } else
+ ret = 0;
+
done:
- kgsl_trace_issueibcmds(device, context->id, cmdbatch,
- cmdbatch->timestamp, cmdbatch->flags, ret,
- drawctxt->type);
+ device->pwrctrl.irq_last = 0;
+ kgsl_trace_issueibcmds(device, context ? context->id : 0, ibdesc,
+ numibs, *timestamp, flags, ret,
+ drawctxt ? drawctxt->type : 0);
kfree(link);
return ret;
}
+
+static void _turn_preamble_on_for_ib_seq(struct adreno_ringbuffer *rb,
+ unsigned int rb_rptr)
+{
+ unsigned int temp_rb_rptr = rb_rptr;
+ unsigned int size = rb->buffer_desc.size;
+ unsigned int val[2];
+ int i = 0;
+ bool check = false;
+ bool cmd_start = false;
+
+ /* Go till the start of the ib sequence and turn on preamble */
+ while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr) {
+ kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], temp_rb_rptr);
+ if (check && KGSL_START_OF_IB_IDENTIFIER == val[i]) {
+ /* decrement i */
+ i = (i + 1) % 2;
+ if (val[i] == cp_nop_packet(4)) {
+ temp_rb_rptr = adreno_ringbuffer_dec_wrapped(
+ temp_rb_rptr, size);
+ kgsl_sharedmem_writel(rb->device,
+ &rb->buffer_desc,
+ temp_rb_rptr, cp_nop_packet(1));
+ }
+ KGSL_FT_INFO(rb->device,
+ "Turned preamble on at offset 0x%x\n",
+ temp_rb_rptr / 4);
+ break;
+ }
+ /* If you reach beginning of next command sequence then exit
+ * First command encountered is the current one so don't break
+ * on that. */
+ if (KGSL_CMD_IDENTIFIER == val[i]) {
+ if (cmd_start)
+ break;
+ cmd_start = true;
+ }
+
+ i = (i + 1) % 2;
+ if (1 == i)
+ check = true;
+ temp_rb_rptr = adreno_ringbuffer_inc_wrapped(temp_rb_rptr,
+ size);
+ }
+}
+
+void adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
+ struct adreno_ft_data *ft_data)
+{
+ struct kgsl_device *device = rb->device;
+ unsigned int rb_rptr = ft_data->start_of_replay_cmds;
+ unsigned int good_rb_idx = 0, bad_rb_idx = 0, temp_rb_idx = 0;
+ unsigned int last_good_cmd_end_idx = 0, last_bad_cmd_end_idx = 0;
+ unsigned int cmd_start_idx = 0;
+ unsigned int val1 = 0;
+ int copy_rb_contents = 0;
+ unsigned int temp_rb_rptr;
+ struct kgsl_context *k_ctxt;
+ struct adreno_context *a_ctxt;
+ unsigned int size = rb->buffer_desc.size;
+ unsigned int *temp_rb_buffer = ft_data->rb_buffer;
+ int *rb_size = &ft_data->rb_size;
+ unsigned int *bad_rb_buffer = ft_data->bad_rb_buffer;
+ int *bad_rb_size = &ft_data->bad_rb_size;
+ unsigned int *good_rb_buffer = ft_data->good_rb_buffer;
+ int *good_rb_size = &ft_data->good_rb_size;
+
+ /*
+ * If the start index from where commands need to be copied is invalid
+ * then no need to save off any commands
+ */
+ if (0xFFFFFFFF == ft_data->start_of_replay_cmds)
+ return;
+
+ k_ctxt = kgsl_context_get(device, ft_data->context_id);
+
+ if (k_ctxt) {
+ a_ctxt = ADRENO_CONTEXT(k_ctxt);
+ if (a_ctxt->flags & CTXT_FLAGS_PREAMBLE)
+ _turn_preamble_on_for_ib_seq(rb, rb_rptr);
+ kgsl_context_put(k_ctxt);
+ }
+ k_ctxt = NULL;
+
+ /* Walk the rb from the context switch. Omit any commands
+ * for an invalid context. */
+ while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
+ kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
+
+ if (KGSL_CMD_IDENTIFIER == val1) {
+ /* Start is the NOP dword that comes before
+ * KGSL_CMD_IDENTIFIER */
+ cmd_start_idx = temp_rb_idx - 1;
+ if ((copy_rb_contents) && (good_rb_idx))
+ last_good_cmd_end_idx = good_rb_idx - 1;
+ if ((!copy_rb_contents) && (bad_rb_idx))
+ last_bad_cmd_end_idx = bad_rb_idx - 1;
+ }
+
+ /* check for context switch indicator */
+ if (val1 == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
+ unsigned int temp_idx, val2;
+ /* increment by 3 to get to the context_id */
+ temp_rb_rptr = rb_rptr + (3 * sizeof(unsigned int)) %
+ size;
+ kgsl_sharedmem_readl(&rb->buffer_desc, &val2,
+ temp_rb_rptr);
+
+ /* if context switches to a context that did not cause
+ * hang then start saving the rb contents as those
+ * commands can be executed */
+ k_ctxt = kgsl_context_get(rb->device, val2);
+
+ if (k_ctxt) {
+ a_ctxt = ADRENO_CONTEXT(k_ctxt);
+
+ /* If we are changing to a good context and were not
+ * copying commands then copy over commands to the good
+ * context */
+ if (!copy_rb_contents && ((k_ctxt &&
+ !(a_ctxt->flags & CTXT_FLAGS_GPU_HANG)) ||
+ !k_ctxt)) {
+ for (temp_idx = cmd_start_idx;
+ temp_idx < temp_rb_idx;
+ temp_idx++)
+ good_rb_buffer[good_rb_idx++] =
+ temp_rb_buffer[temp_idx];
+ ft_data->last_valid_ctx_id = val2;
+ copy_rb_contents = 1;
+ /* remove the good commands from bad buffer */
+ bad_rb_idx = last_bad_cmd_end_idx;
+ } else if (copy_rb_contents && k_ctxt &&
+ (a_ctxt->flags & CTXT_FLAGS_GPU_HANG)) {
+
+ /* If we are changing back to a bad context
+ * from good ctxt and were not copying commands
+ * to bad ctxt then copy over commands to
+ * the bad context */
+ for (temp_idx = cmd_start_idx;
+ temp_idx < temp_rb_idx;
+ temp_idx++)
+ bad_rb_buffer[bad_rb_idx++] =
+ temp_rb_buffer[temp_idx];
+ /* If we are changing to bad context then
+ * remove the dwords we copied for this
+ * sequence from the good buffer */
+ good_rb_idx = last_good_cmd_end_idx;
+ copy_rb_contents = 0;
+ }
+ }
+ kgsl_context_put(k_ctxt);
+ }
+
+ if (copy_rb_contents)
+ good_rb_buffer[good_rb_idx++] = val1;
+ else
+ bad_rb_buffer[bad_rb_idx++] = val1;
+
+ /* Copy both good and bad commands to temp buffer */
+ temp_rb_buffer[temp_rb_idx++] = val1;
+
+ rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr, size);
+ }
+ *good_rb_size = good_rb_idx;
+ *bad_rb_size = bad_rb_idx;
+ *rb_size = temp_rb_idx;
+}
+
+void
+adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
+ int num_rb_contents)
+{
+ int i;
+ unsigned int *ringcmds;
+ unsigned int rcmd_gpu;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
+
+ if (!num_rb_contents)
+ return;
+
+ if (num_rb_contents > (rb->buffer_desc.size - rb->wptr)) {
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_RPTR, 0);
+ BUG_ON(num_rb_contents > rb->buffer_desc.size);
+ }
+ ringcmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
+ rcmd_gpu = rb->buffer_desc.gpuaddr + sizeof(unsigned int) * rb->wptr;
+ for (i = 0; i < num_rb_contents; i++)
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, rb_buff[i]);
+ rb->wptr += num_rb_contents;
+ adreno_ringbuffer_submit(rb);
+}
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index 3aa0101..9634e32 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -27,6 +27,7 @@
struct kgsl_device;
struct kgsl_device_private;
+struct adreno_ft_data;
#define GSL_RB_MEMPTRS_SCRATCH_COUNT 8
struct kgsl_rbmemptrs {
@@ -98,11 +99,10 @@
int adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
struct kgsl_context *context,
- struct kgsl_cmdbatch *cmdbatch,
- uint32_t *timestamp);
-
-int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
- struct kgsl_cmdbatch *cmdbatch);
+ struct kgsl_ibdesc *ibdesc,
+ unsigned int numibs,
+ uint32_t *timestamp,
+ unsigned int flags);
int adreno_ringbuffer_init(struct kgsl_device *device);
@@ -124,6 +124,13 @@
void kgsl_cp_intrcallback(struct kgsl_device *device);
+void adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
+ struct adreno_ft_data *ft_data);
+
+void
+adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
+ int num_rb_contents);
+
unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
struct adreno_context *context,
unsigned int numcmds);
diff --git a/drivers/gpu/msm/adreno_trace.h b/drivers/gpu/msm/adreno_trace.h
deleted file mode 100644
index 6079b61..0000000
--- a/drivers/gpu/msm/adreno_trace.h
+++ /dev/null
@@ -1,270 +0,0 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#if !defined(_ADRENO_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _ADRENO_TRACE_H
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM kgsl
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE adreno_trace
-
-#include <linux/tracepoint.h>
-
-TRACE_EVENT(adreno_cmdbatch_queued,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int queued),
- TP_ARGS(cmdbatch, queued),
- TP_STRUCT__entry(
- __field(unsigned int, id)
- __field(unsigned int, timestamp)
- __field(unsigned int, queued)
- __field(unsigned int, flags)
- ),
- TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
- __entry->queued = queued;
- __entry->flags = cmdbatch->flags;
- ),
- TP_printk(
- "ctx=%u ts=%u queued=%u flags=%s",
- __entry->id, __entry->timestamp, __entry->queued,
- __entry->flags ? __print_flags(__entry->flags, "|",
- { KGSL_CONTEXT_SYNC, "SYNC" },
- { KGSL_CONTEXT_END_OF_FRAME, "EOF" })
- : "none"
- )
-);
-
-DECLARE_EVENT_CLASS(adreno_cmdbatch_template,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight),
- TP_ARGS(cmdbatch, inflight),
- TP_STRUCT__entry(
- __field(unsigned int, id)
- __field(unsigned int, timestamp)
- __field(unsigned int, inflight)
- ),
- TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
- __entry->inflight = inflight;
- ),
- TP_printk(
- "ctx=%u ts=%u inflight=%u",
- __entry->id, __entry->timestamp,
- __entry->inflight
- )
-);
-
-DEFINE_EVENT(adreno_cmdbatch_template, adreno_cmdbatch_submitted,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight),
- TP_ARGS(cmdbatch, inflight)
-);
-
-TRACE_EVENT(adreno_cmdbatch_retired,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight),
- TP_ARGS(cmdbatch, inflight),
- TP_STRUCT__entry(
- __field(unsigned int, id)
- __field(unsigned int, timestamp)
- __field(unsigned int, inflight)
- __field(unsigned int, recovery)
- ),
- TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
- __entry->inflight = inflight;
- __entry->recovery = cmdbatch->fault_recovery;
- ),
- TP_printk(
- "ctx=%u ts=%u inflight=%u recovery=%s",
- __entry->id, __entry->timestamp,
- __entry->inflight,
- __entry->recovery ?
- __print_flags(__entry->recovery, "|",
- ADRENO_FT_TYPES) : "none"
- )
-);
-
-TRACE_EVENT(adreno_cmdbatch_fault,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int fault),
- TP_ARGS(cmdbatch, fault),
- TP_STRUCT__entry(
- __field(unsigned int, id)
- __field(unsigned int, timestamp)
- __field(unsigned int, fault)
- ),
- TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
- __entry->fault = fault;
- ),
- TP_printk(
- "ctx=%u ts=%u type=%s",
- __entry->id, __entry->timestamp,
- __print_symbolic(__entry->fault,
- { 0, "none" },
- { ADRENO_SOFT_FAULT, "soft" },
- { ADRENO_HARD_FAULT, "hard" },
- { ADRENO_TIMEOUT_FAULT, "timeout" })
- )
-);
-
-TRACE_EVENT(adreno_cmdbatch_recovery,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int action),
- TP_ARGS(cmdbatch, action),
- TP_STRUCT__entry(
- __field(unsigned int, id)
- __field(unsigned int, timestamp)
- __field(unsigned int, action)
- ),
- TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
- __entry->action = action;
- ),
- TP_printk(
- "ctx=%u ts=%u action=%s",
- __entry->id, __entry->timestamp,
- __print_symbolic(__entry->action, ADRENO_FT_TYPES)
- )
-);
-
-DECLARE_EVENT_CLASS(adreno_drawctxt_template,
- TP_PROTO(struct adreno_context *drawctxt),
- TP_ARGS(drawctxt),
- TP_STRUCT__entry(
- __field(unsigned int, id)
- ),
- TP_fast_assign(
- __entry->id = drawctxt->base.id;
- ),
- TP_printk("ctx=%u", __entry->id)
-);
-
-DEFINE_EVENT(adreno_drawctxt_template, adreno_drawctxt_sleep,
- TP_PROTO(struct adreno_context *drawctxt),
- TP_ARGS(drawctxt)
-);
-
-DEFINE_EVENT(adreno_drawctxt_template, adreno_drawctxt_wake,
- TP_PROTO(struct adreno_context *drawctxt),
- TP_ARGS(drawctxt)
-);
-
-DEFINE_EVENT(adreno_drawctxt_template, dispatch_queue_context,
- TP_PROTO(struct adreno_context *drawctxt),
- TP_ARGS(drawctxt)
-);
-
-DEFINE_EVENT(adreno_drawctxt_template, adreno_drawctxt_invalidate,
- TP_PROTO(struct adreno_context *drawctxt),
- TP_ARGS(drawctxt)
-);
-
-TRACE_EVENT(adreno_drawctxt_wait_start,
- TP_PROTO(unsigned int id, unsigned int ts),
- TP_ARGS(id, ts),
- TP_STRUCT__entry(
- __field(unsigned int, id)
- __field(unsigned int, ts)
- ),
- TP_fast_assign(
- __entry->id = id;
- __entry->ts = ts;
- ),
- TP_printk(
- "ctx=%u ts=%u",
- __entry->id, __entry->ts
- )
-);
-
-TRACE_EVENT(adreno_drawctxt_wait_done,
- TP_PROTO(unsigned int id, unsigned int ts, int status),
- TP_ARGS(id, ts, status),
- TP_STRUCT__entry(
- __field(unsigned int, id)
- __field(unsigned int, ts)
- __field(int, status)
- ),
- TP_fast_assign(
- __entry->id = id;
- __entry->ts = ts;
- __entry->status = status;
- ),
- TP_printk(
- "ctx=%u ts=%u status=%d",
- __entry->id, __entry->ts, __entry->status
- )
-);
-
-TRACE_EVENT(adreno_drawctxt_switch,
- TP_PROTO(struct adreno_context *oldctx,
- struct adreno_context *newctx,
- unsigned int flags),
- TP_ARGS(oldctx, newctx, flags),
- TP_STRUCT__entry(
- __field(unsigned int, oldctx)
- __field(unsigned int, newctx)
- __field(unsigned int, flags)
- ),
- TP_fast_assign(
- __entry->oldctx = oldctx ? oldctx->base.id : 0;
- __entry->newctx = newctx ? newctx->base.id : 0;
- ),
- TP_printk(
- "oldctx=%u newctx=%u flags=%X",
- __entry->oldctx, __entry->newctx, flags
- )
-);
-
-TRACE_EVENT(adreno_gpu_fault,
- TP_PROTO(unsigned int ctx, unsigned int ts,
- unsigned int status, unsigned int rptr, unsigned int wptr,
- unsigned int ib1base, unsigned int ib1size,
- unsigned int ib2base, unsigned int ib2size),
- TP_ARGS(ctx, ts, status, rptr, wptr, ib1base, ib1size, ib2base,
- ib2size),
- TP_STRUCT__entry(
- __field(unsigned int, ctx)
- __field(unsigned int, ts)
- __field(unsigned int, status)
- __field(unsigned int, rptr)
- __field(unsigned int, wptr)
- __field(unsigned int, ib1base)
- __field(unsigned int, ib1size)
- __field(unsigned int, ib2base)
- __field(unsigned int, ib2size)
- ),
- TP_fast_assign(
- __entry->ctx = ctx;
- __entry->ts = ts;
- __entry->status = status;
- __entry->rptr = rptr;
- __entry->wptr = wptr;
- __entry->ib1base = ib1base;
- __entry->ib1size = ib1size;
- __entry->ib2base = ib2base;
- __entry->ib2size = ib2size;
- ),
- TP_printk("ctx=%d ts=%d status=%X RB=%X/%X IB1=%X/%X IB2=%X/%X",
- __entry->ctx, __entry->ts, __entry->status, __entry->wptr,
- __entry->rptr, __entry->ib1base, __entry->ib1size,
- __entry->ib2base, __entry->ib2size)
-);
-
-#endif /* _ADRENO_TRACE_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 35a03de..2781a34 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -14,7 +14,6 @@
#include <linux/fb.h>
#include <linux/file.h>
#include <linux/fs.h>
-#include <linux/list.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/interrupt.h>
@@ -63,10 +62,59 @@
static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry);
/**
+ * kgsl_hang_check() - Check for GPU hang
+ * data: KGSL device structure
+ *
+ * This function is called every KGSL_TIMEOUT_PART time when
+ * GPU is active to check for hang. If a hang is detected we
+ * trigger fault tolerance.
+ */
+void kgsl_hang_check(struct work_struct *work)
+{
+ struct kgsl_device *device = container_of(work, struct kgsl_device,
+ hang_check_ws);
+ static unsigned int prev_reg_val[FT_DETECT_REGS_COUNT];
+
+ mutex_lock(&device->mutex);
+
+ if (device->state == KGSL_STATE_ACTIVE) {
+
+ /* Check to see if the GPU is hung */
+ if (adreno_ft_detect(device, prev_reg_val))
+ adreno_dump_and_exec_ft(device);
+
+ mod_timer(&device->hang_timer,
+ (jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART)));
+ }
+
+ mutex_unlock(&device->mutex);
+}
+
+/**
+ * hang_timer() - Hang timer function
+ * data: KGSL device structure
+ *
+ * This function is called when hang timer expires, in this
+ * function we check if GPU is in active state and queue the
+ * work on device workqueue to check for the hang. We restart
+ * the timer after KGSL_TIMEOUT_PART time.
+ */
+void hang_timer(unsigned long data)
+{
+ struct kgsl_device *device = (struct kgsl_device *) data;
+
+ if (device->state == KGSL_STATE_ACTIVE) {
+ /* Have work run in a non-interrupt context. */
+ queue_work(device->work_queue, &device->hang_check_ws);
+ }
+}
+
+/**
* kgsl_trace_issueibcmds() - Call trace_issueibcmds by proxy
* device: KGSL device
* id: ID of the context submitting the command
- * cmdbatch: Pointer to kgsl_cmdbatch describing these commands
+ * ibdesc: Pointer to the list of IB descriptors
+ * numib: Number of IBs in the list
* timestamp: Timestamp assigned to the command batch
* flags: Flags sent by the user
* result: Result of the submission attempt
@@ -76,11 +124,11 @@
* GPU specific modules.
*/
void kgsl_trace_issueibcmds(struct kgsl_device *device, int id,
- struct kgsl_cmdbatch *cmdbatch,
+ struct kgsl_ibdesc *ibdesc, int numibs,
unsigned int timestamp, unsigned int flags,
int result, unsigned int type)
{
- trace_kgsl_issueibcmds(device, id, cmdbatch,
+ trace_kgsl_issueibcmds(device, id, ibdesc, numibs,
timestamp, flags, result, type);
}
EXPORT_SYMBOL(kgsl_trace_issueibcmds);
@@ -482,8 +530,8 @@
EXPORT_SYMBOL(kgsl_context_init);
/**
- * kgsl_context_detach() - Release the "master" context reference
- * @context: The context that will be detached
+ * kgsl_context_detach - Release the "master" context reference
+ * @context - The context that will be detached
*
* This is called when a context becomes unusable, because userspace
* has requested for it to be destroyed. The context itself may
@@ -492,12 +540,14 @@
* detached by checking the KGSL_CONTEXT_DETACHED bit in
* context->priv.
*/
-int kgsl_context_detach(struct kgsl_context *context)
+void
+kgsl_context_detach(struct kgsl_context *context)
{
- int ret;
-
+ struct kgsl_device *device;
if (context == NULL)
- return -EINVAL;
+ return;
+
+ device = context->device;
/*
* Mark the context as detached to keep others from using
@@ -505,22 +555,19 @@
* we don't try to detach twice.
*/
if (test_and_set_bit(KGSL_CONTEXT_DETACHED, &context->priv))
- return -EINVAL;
+ return;
- trace_kgsl_context_detach(context->device, context);
+ trace_kgsl_context_detach(device, context);
- ret = context->device->ftbl->drawctxt_detach(context);
-
+ device->ftbl->drawctxt_detach(context);
/*
* Cancel events after the device-specific context is
* detached, to avoid possibly freeing memory while
* it is still in use by the GPU.
*/
- kgsl_context_cancel_events(context->device, context);
+ kgsl_context_cancel_events(device, context);
kgsl_context_put(context);
-
- return ret;
}
void
@@ -532,8 +579,6 @@
trace_kgsl_context_destroy(device, context);
- BUG_ON(!kgsl_context_detached(context));
-
write_lock(&device->context_lock);
if (context->id != KGSL_CONTEXT_INVALID) {
idr_remove(&device->context_idr, context->id);
@@ -604,11 +649,10 @@
policy_saved = device->pwrscale.policy;
device->pwrscale.policy = NULL;
kgsl_pwrctrl_request_state(device, KGSL_STATE_SUSPEND);
-
- /* Tell the device to drain the submission queue */
- device->ftbl->drain(device);
-
- /* Wait for the active count to hit zero */
+ /*
+ * Make sure no user process is waiting for a timestamp
+ * before supending.
+ */
kgsl_active_count_wait(device, 0);
/*
@@ -619,10 +663,13 @@
/* Don't let the timer wake us during suspended sleep. */
del_timer_sync(&device->idle_timer);
+ del_timer_sync(&device->hang_timer);
switch (device->state) {
case KGSL_STATE_INIT:
break;
case KGSL_STATE_ACTIVE:
+ /* Wait for the device to become idle */
+ device->ftbl->idle(device);
case KGSL_STATE_NAP:
case KGSL_STATE_SLEEP:
/* make sure power is on to stop the device */
@@ -948,16 +995,8 @@
if (context == NULL)
break;
- if (context->dev_priv == dev_priv) {
- /*
- * Hold a reference to the context in case somebody
- * tries to put it while we are detaching
- */
-
- _kgsl_context_get(context);
+ if (context->dev_priv == dev_priv)
kgsl_context_detach(context);
- kgsl_context_put(context);
- }
next = next + 1;
}
@@ -971,7 +1010,6 @@
result = kgsl_close_device(device);
mutex_unlock(&device->mutex);
-
kfree(dev_priv);
kgsl_put_process_private(device, private);
@@ -1004,6 +1042,7 @@
* Make sure the gates are open, so they don't block until
* we start suspend or FT.
*/
+ complete_all(&device->ft_gate);
complete_all(&device->hwaccess_gate);
kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
kgsl_active_count_put(device);
@@ -1399,610 +1438,93 @@
return result;
}
-/*
- * KGSL command batch management
- * A command batch is a single submission from userland. The cmdbatch
- * encapsulates everything about the submission : command buffers, flags and
- * sync points.
- *
- * Sync points are events that need to expire before the
- * cmdbatch can be queued to the hardware. For each sync point a
- * kgsl_cmdbatch_sync_event struct is created and added to a list in the
- * cmdbatch. There can be multiple types of events both internal ones (GPU
- * events) and external triggers. As the events expire the struct is deleted
- * from the list. The GPU will submit the command batch as soon as the list
- * goes empty indicating that all the sync points have been met.
- */
-
-/**
- * struct kgsl_cmdbatch_sync_event
- * @type: Syncpoint type
- * @node: Local list node for the cmdbatch sync point list
- * @cmdbatch: Pointer to the cmdbatch that owns the sync event
- * @context: Pointer to the KGSL context that owns the cmdbatch
- * @timestamp: Pending timestamp for the event
- * @handle: Pointer to a sync fence handle
- * @device: Pointer to the KGSL device
- * @lock: Spin lock to protect the sync event list
- */
-struct kgsl_cmdbatch_sync_event {
- int type;
- struct list_head node;
- struct kgsl_cmdbatch *cmdbatch;
- struct kgsl_context *context;
- unsigned int timestamp;
- struct kgsl_sync_fence_waiter *handle;
- struct kgsl_device *device;
- spinlock_t lock;
-};
-
-/**
- * kgsl_cmdbatch_destroy_object() - Destroy a cmdbatch object
- * @kref: Pointer to the kref structure for this object
- *
- * Actually destroy a command batch object. Called from kgsl_cmdbatch_put
- */
-void kgsl_cmdbatch_destroy_object(struct kref *kref)
-{
- struct kgsl_cmdbatch *cmdbatch = container_of(kref,
- struct kgsl_cmdbatch, refcount);
-
- kgsl_context_put(cmdbatch->context);
- kfree(cmdbatch->ibdesc);
-
- kfree(cmdbatch);
-}
-EXPORT_SYMBOL(kgsl_cmdbatch_destroy_object);
-
-/*
- * a generic function to retire a pending sync event and (possibly)
- * kick the dispatcher
- */
-static void kgsl_cmdbatch_sync_expire(struct kgsl_device *device,
- struct kgsl_cmdbatch_sync_event *event)
-{
- int sched = 0;
-
- spin_lock(&event->cmdbatch->lock);
- list_del(&event->node);
- sched = list_empty(&event->cmdbatch->synclist) ? 1 : 0;
- spin_unlock(&event->cmdbatch->lock);
-
- /*
- * if this is the last event in the list then tell
- * the GPU device that the cmdbatch can be submitted
- */
-
- if (sched && device->ftbl->drawctxt_sched)
- device->ftbl->drawctxt_sched(device, event->cmdbatch->context);
-}
-
-
-/*
- * This function is called by the GPU event when the sync event timestamp
- * expires
- */
-static void kgsl_cmdbatch_sync_func(struct kgsl_device *device, void *priv,
- u32 id, u32 timestamp, u32 type)
-{
- struct kgsl_cmdbatch_sync_event *event = priv;
-
- kgsl_cmdbatch_sync_expire(device, event);
-
- kgsl_context_put(event->context);
- kgsl_cmdbatch_put(event->cmdbatch);
-
- kfree(event);
-}
-
-/**
- * kgsl_cmdbatch_destroy() - Destroy a cmdbatch structure
- * @cmdbatch: Pointer to the command batch object to destroy
- *
- * Start the process of destroying a command batch. Cancel any pending events
- * and decrement the refcount.
- */
-void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch)
-{
- struct kgsl_cmdbatch_sync_event *event, *tmp;
-
- spin_lock(&cmdbatch->lock);
-
- /* Delete any pending sync points for this command batch */
- list_for_each_entry_safe(event, tmp, &cmdbatch->synclist, node) {
-
- if (event->type == KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP) {
- /* Cancel the event if it still exists */
- kgsl_cancel_event(cmdbatch->device, event->context,
- event->timestamp, kgsl_cmdbatch_sync_func,
- event);
- } else if (event->type == KGSL_CMD_SYNCPOINT_TYPE_FENCE) {
- if (kgsl_sync_fence_async_cancel(event->handle)) {
- list_del(&event->node);
- kfree(event);
- kgsl_cmdbatch_put(cmdbatch);
- }
- }
- }
-
- spin_unlock(&cmdbatch->lock);
- kgsl_cmdbatch_put(cmdbatch);
-}
-EXPORT_SYMBOL(kgsl_cmdbatch_destroy);
-
-/*
- * A callback that gets registered with kgsl_sync_fence_async_wait and is fired
- * when a fence is expired
- */
-static void kgsl_cmdbatch_sync_fence_func(void *priv)
-{
- struct kgsl_cmdbatch_sync_event *event = priv;
-
- spin_lock(&event->lock);
- kgsl_cmdbatch_sync_expire(event->device, event);
- kgsl_cmdbatch_put(event->cmdbatch);
- spin_unlock(&event->lock);
- kfree(event);
-}
-
-/* kgsl_cmdbatch_add_sync_fence() - Add a new sync fence syncpoint
- * @device: KGSL device
- * @cmdbatch: KGSL cmdbatch to add the sync point to
- * @priv: Private sructure passed by the user
- *
- * Add a new fence sync syncpoint to the cmdbatch.
- */
-static int kgsl_cmdbatch_add_sync_fence(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void *priv)
-{
- struct kgsl_cmd_syncpoint_fence *sync = priv;
- struct kgsl_cmdbatch_sync_event *event;
-
- event = kzalloc(sizeof(*event), GFP_KERNEL);
-
- if (event == NULL)
- return -ENOMEM;
-
- kref_get(&cmdbatch->refcount);
-
- event->type = KGSL_CMD_SYNCPOINT_TYPE_FENCE;
- event->cmdbatch = cmdbatch;
- event->device = device;
- spin_lock_init(&event->lock);
-
- /*
- * Add it to the list first to account for the possiblity that the
- * callback will happen immediately after the call to
- * kgsl_sync_fence_async_wait
- */
-
- spin_lock(&cmdbatch->lock);
- list_add(&event->node, &cmdbatch->synclist);
- spin_unlock(&cmdbatch->lock);
-
- /*
- * There is a distinct race condition that can occur if the fence
- * callback is fired before the function has a chance to return. The
- * event struct would be freed before we could write event->handle and
- * hilarity ensued. Protect against this by protecting the call to
- * kgsl_sync_fence_async_wait and the kfree in the callback with a lock.
- */
-
- spin_lock(&event->lock);
-
- event->handle = kgsl_sync_fence_async_wait(sync->fd,
- kgsl_cmdbatch_sync_fence_func, event);
-
-
- if (IS_ERR_OR_NULL(event->handle)) {
- int ret = PTR_ERR(event->handle);
-
- spin_lock(&cmdbatch->lock);
- list_del(&event->node);
- spin_unlock(&cmdbatch->lock);
-
- kgsl_cmdbatch_put(cmdbatch);
- spin_unlock(&event->lock);
- kfree(event);
-
- return ret;
- }
-
- spin_unlock(&event->lock);
- return 0;
-}
-
-/* kgsl_cmdbatch_add_sync_timestamp() - Add a new sync point for a cmdbatch
- * @device: KGSL device
- * @cmdbatch: KGSL cmdbatch to add the sync point to
- * @priv: Private sructure passed by the user
- *
- * Add a new sync point timestamp event to the cmdbatch.
- */
-static int kgsl_cmdbatch_add_sync_timestamp(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void *priv)
-{
- struct kgsl_cmd_syncpoint_timestamp *sync = priv;
- struct kgsl_context *context = kgsl_context_get(cmdbatch->device,
- sync->context_id);
- struct kgsl_cmdbatch_sync_event *event;
- int ret = -EINVAL;
-
- if (context == NULL)
- return -EINVAL;
-
- /* Sanity check - you can't create a sync point on your own context */
- if (context == cmdbatch->context) {
- KGSL_DRV_ERR(device,
- "Cannot create a sync point on your own context %d\n",
- context->id);
- goto done;
- }
-
- event = kzalloc(sizeof(*event), GFP_KERNEL);
- if (event == NULL) {
- ret = -ENOMEM;
- goto done;
- }
-
- kref_get(&cmdbatch->refcount);
-
- event->type = KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP;
- event->cmdbatch = cmdbatch;
- event->context = context;
- event->timestamp = sync->timestamp;
-
- spin_lock(&cmdbatch->lock);
- list_add(&event->node, &cmdbatch->synclist);
- spin_unlock(&cmdbatch->lock);
-
- mutex_lock(&device->mutex);
- kgsl_active_count_get(device);
- ret = kgsl_add_event(device, context->id, sync->timestamp,
- kgsl_cmdbatch_sync_func, event, NULL);
- kgsl_active_count_put(device);
- mutex_unlock(&device->mutex);
-
- if (ret) {
- spin_lock(&cmdbatch->lock);
- list_del(&event->node);
- spin_unlock(&cmdbatch->lock);
-
- kgsl_cmdbatch_put(cmdbatch);
- kfree(event);
- }
-
-done:
- if (ret)
- kgsl_context_put(context);
-
- return ret;
-}
-
-/**
- * kgsl_cmdbatch_add_sync() - Add a sync point to a command batch
- * @device: Pointer to the KGSL device struct for the GPU
- * @cmdbatch: Pointer to the cmdbatch
- * @sync: Pointer to the user-specified struct defining the syncpoint
- *
- * Create a new sync point in the cmdbatch based on the user specified
- * parameters
- */
-static int kgsl_cmdbatch_add_sync(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch,
- struct kgsl_cmd_syncpoint *sync)
-{
- void *priv;
- int ret, psize;
- int (*func)(struct kgsl_device *device, struct kgsl_cmdbatch *cmdbatch,
- void *priv);
-
- switch (sync->type) {
- case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP:
- psize = sizeof(struct kgsl_cmd_syncpoint_timestamp);
- func = kgsl_cmdbatch_add_sync_timestamp;
- break;
- case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
- psize = sizeof(struct kgsl_cmd_syncpoint_fence);
- func = kgsl_cmdbatch_add_sync_fence;
- break;
- default:
- KGSL_DRV_ERR(device, "Invalid sync type 0x%x\n", sync->type);
- return -EINVAL;
- }
-
- if (sync->size != psize) {
- KGSL_DRV_ERR(device, "Invalid sync size %d\n", sync->size);
- return -EINVAL;
- }
-
- priv = kzalloc(sync->size, GFP_KERNEL);
- if (priv == NULL)
- return -ENOMEM;
-
- if (copy_from_user(priv, sync->priv, sync->size)) {
- kfree(priv);
- return -EFAULT;
- }
-
- ret = func(device, cmdbatch, priv);
- kfree(priv);
-
- return ret;
-}
-
-/**
- * kgsl_cmdbatch_create() - Create a new cmdbatch structure
- * @device: Pointer to a KGSL device struct
- * @context: Pointer to a KGSL context struct
- * @numibs: Number of indirect buffers to make room for in the cmdbatch
- *
- * Allocate an new cmdbatch structure and add enough room to store the list of
- * indirect buffers
- */
-static struct kgsl_cmdbatch *kgsl_cmdbatch_create(struct kgsl_device *device,
- struct kgsl_context *context, unsigned int flags,
- unsigned int numibs)
-{
- struct kgsl_cmdbatch *cmdbatch = kzalloc(sizeof(*cmdbatch), GFP_KERNEL);
- if (cmdbatch == NULL)
- return ERR_PTR(-ENOMEM);
-
- if (!(flags & KGSL_CONTEXT_SYNC)) {
- cmdbatch->ibdesc = kzalloc(sizeof(*cmdbatch->ibdesc) * numibs,
- GFP_KERNEL);
- if (cmdbatch->ibdesc == NULL) {
- kfree(cmdbatch);
- return ERR_PTR(-ENOMEM);
- }
- }
-
- kref_init(&cmdbatch->refcount);
- INIT_LIST_HEAD(&cmdbatch->synclist);
- spin_lock_init(&cmdbatch->lock);
-
- cmdbatch->device = device;
- cmdbatch->ibcount = (flags & KGSL_CONTEXT_SYNC) ? 0 : numibs;
- cmdbatch->context = context;
- cmdbatch->flags = flags & ~KGSL_CONTEXT_SUBMIT_IB_LIST;
-
- /*
- * Increase the reference count on the context so it doesn't disappear
- * during the lifetime of this command batch
- */
- _kgsl_context_get(context);
-
- return cmdbatch;
-}
-
-/**
- * _kgsl_cmdbatch_verify() - Perform a quick sanity check on a command batch
- * @device: Pointer to a KGSL instance that owns the command batch
- * @pagetable: Pointer to the pagetable for the current process
- * @cmdbatch: Number of indirect buffers to make room for in the cmdbatch
- *
- * Do a quick sanity test on the list of indirect buffers in a command batch
- * verifying that the size and GPU address
- */
-static bool _kgsl_cmdbatch_verify(struct kgsl_device_private *dev_priv,
- struct kgsl_cmdbatch *cmdbatch)
-{
- int i;
- struct kgsl_process_private *private = dev_priv->process_priv;
-
- for (i = 0; i < cmdbatch->ibcount; i++) {
- if (cmdbatch->ibdesc[i].sizedwords == 0) {
- KGSL_DRV_ERR(dev_priv->device,
- "invalid size ctx %d ib(%d) %X/%X\n",
- cmdbatch->context->id, i,
- cmdbatch->ibdesc[i].gpuaddr,
- cmdbatch->ibdesc[i].sizedwords);
-
- return false;
- }
-
- if (!kgsl_mmu_gpuaddr_in_range(private->pagetable,
- cmdbatch->ibdesc[i].gpuaddr)) {
- KGSL_DRV_ERR(dev_priv->device,
- "Invalid address ctx %d ib(%d) %X/%X\n",
- cmdbatch->context->id, i,
- cmdbatch->ibdesc[i].gpuaddr,
- cmdbatch->ibdesc[i].sizedwords);
-
- return false;
- }
- }
-
- return true;
-}
-
-/**
- * _kgsl_cmdbatch_create_legacy() - Create a cmdbatch from a legacy ioctl struct
- * @device: Pointer to the KGSL device struct for the GPU
- * @context: Pointer to the KGSL context that issued the command batch
- * @param: Pointer to the kgsl_ringbuffer_issueibcmds struct that the user sent
- *
- * Create a command batch from the legacy issueibcmds format.
- */
-static struct kgsl_cmdbatch *_kgsl_cmdbatch_create_legacy(
- struct kgsl_device *device,
- struct kgsl_context *context,
- struct kgsl_ringbuffer_issueibcmds *param)
-{
- struct kgsl_cmdbatch *cmdbatch =
- kgsl_cmdbatch_create(device, context, param->flags, 1);
-
- if (IS_ERR(cmdbatch))
- return cmdbatch;
-
- cmdbatch->ibdesc[0].gpuaddr = param->ibdesc_addr;
- cmdbatch->ibdesc[0].sizedwords = param->numibs;
- cmdbatch->ibcount = 1;
- cmdbatch->flags = param->flags;
-
- return cmdbatch;
-}
-
-/**
- * _kgsl_cmdbatch_create() - Create a cmdbatch from a ioctl struct
- * @device: Pointer to the KGSL device struct for the GPU
- * @context: Pointer to the KGSL context that issued the command batch
- * @flags: Flags passed in from the user command
- * @cmdlist: Pointer to the list of commands from the user
- * @numcmds: Number of commands in the list
- * @synclist: Pointer to the list of syncpoints from the user
- * @numsyncs: Number of syncpoints in the list
- *
- * Create a command batch from the standard issueibcmds format sent by the user.
- */
-static struct kgsl_cmdbatch *_kgsl_cmdbatch_create(struct kgsl_device *device,
- struct kgsl_context *context,
- unsigned int flags,
- unsigned int cmdlist, unsigned int numcmds,
- unsigned int synclist, unsigned int numsyncs)
-{
- struct kgsl_cmdbatch *cmdbatch =
- kgsl_cmdbatch_create(device, context, flags, numcmds);
- int ret = 0;
-
- if (IS_ERR(cmdbatch))
- return cmdbatch;
-
- if (!(flags & KGSL_CONTEXT_SYNC)) {
- if (copy_from_user(cmdbatch->ibdesc, (void __user *) cmdlist,
- sizeof(struct kgsl_ibdesc) * numcmds)) {
- ret = -EFAULT;
- goto done;
- }
- }
-
- if (synclist && numsyncs) {
- struct kgsl_cmd_syncpoint sync;
- void __user *uptr = (void __user *) synclist;
- int i;
-
- for (i = 0; i < numsyncs; i++) {
- memset(&sync, 0, sizeof(sync));
-
- if (copy_from_user(&sync, uptr, sizeof(sync))) {
- ret = -EFAULT;
- break;
- }
-
- ret = kgsl_cmdbatch_add_sync(device, cmdbatch, &sync);
-
- if (ret)
- break;
-
- uptr += sizeof(sync);
- }
- }
-
-done:
- if (ret) {
- kgsl_cmdbatch_destroy(cmdbatch);
- return ERR_PTR(ret);
- }
-
- return cmdbatch;
-}
-
static long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
+ int result = 0;
+ int i = 0;
struct kgsl_ringbuffer_issueibcmds *param = data;
- struct kgsl_device *device = dev_priv->device;
+ struct kgsl_ibdesc *ibdesc;
struct kgsl_context *context;
- struct kgsl_cmdbatch *cmdbatch;
- long result = -EINVAL;
- /* The legacy functions don't support synchronization commands */
- if (param->flags & KGSL_CONTEXT_SYNC)
- return -EINVAL;
-
- /* Get the context */
context = kgsl_context_get_owner(dev_priv, param->drawctxt_id);
- if (context == NULL)
+ if (context == NULL) {
+ result = -EINVAL;
goto done;
+ }
if (param->flags & KGSL_CONTEXT_SUBMIT_IB_LIST) {
+ if (!param->numibs) {
+ result = -EINVAL;
+ goto done;
+ }
+
/*
- * Do a quick sanity check on the number of IBs in the
- * submission
+ * Put a reasonable upper limit on the number of IBs that can be
+ * submitted
*/
- if (param->numibs == 0 || param->numibs > KGSL_MAX_NUMIBS)
+ if (param->numibs > 10000) {
+ result = -EINVAL;
goto done;
+ }
- cmdbatch = _kgsl_cmdbatch_create(device, context, param->flags,
- param->ibdesc_addr, param->numibs, 0, 0);
- } else
- cmdbatch = _kgsl_cmdbatch_create_legacy(device, context, param);
+ ibdesc = kzalloc(sizeof(struct kgsl_ibdesc) * param->numibs,
+ GFP_KERNEL);
+ if (!ibdesc) {
+ KGSL_MEM_ERR(dev_priv->device,
+ "kzalloc(%d) failed\n",
+ sizeof(struct kgsl_ibdesc) * param->numibs);
+ result = -ENOMEM;
+ goto done;
+ }
- if (IS_ERR(cmdbatch)) {
- result = PTR_ERR(cmdbatch);
- goto done;
+ if (copy_from_user(ibdesc, (void *)param->ibdesc_addr,
+ sizeof(struct kgsl_ibdesc) * param->numibs)) {
+ result = -EFAULT;
+ KGSL_DRV_ERR(dev_priv->device,
+ "copy_from_user failed\n");
+ goto free_ibdesc;
+ }
+ } else {
+ KGSL_DRV_INFO(dev_priv->device,
+ "Using single IB submission mode for ib submission\n");
+ /* If user space driver is still using the old mode of
+ * submitting single ib then we need to support that as well */
+ ibdesc = kzalloc(sizeof(struct kgsl_ibdesc), GFP_KERNEL);
+ if (!ibdesc) {
+ KGSL_MEM_ERR(dev_priv->device,
+ "kzalloc(%d) failed\n",
+ sizeof(struct kgsl_ibdesc));
+ result = -ENOMEM;
+ goto done;
+ }
+ ibdesc[0].gpuaddr = param->ibdesc_addr;
+ ibdesc[0].sizedwords = param->numibs;
+ param->numibs = 1;
}
- /* Run basic sanity checking on the command */
- if (!_kgsl_cmdbatch_verify(dev_priv, cmdbatch))
- goto free_cmdbatch;
+ for (i = 0; i < param->numibs; i++) {
+ struct kgsl_pagetable *pt = dev_priv->process_priv->pagetable;
- result = dev_priv->device->ftbl->issueibcmds(dev_priv, context,
- cmdbatch, ¶m->timestamp);
-
-free_cmdbatch:
- if (result)
- kgsl_cmdbatch_destroy(cmdbatch);
-
-done:
- kgsl_context_put(context);
- return result;
-}
-
-static long kgsl_ioctl_submit_commands(struct kgsl_device_private *dev_priv,
- unsigned int cmd, void *data)
-{
- struct kgsl_submit_commands *param = data;
- struct kgsl_device *device = dev_priv->device;
- struct kgsl_context *context;
- struct kgsl_cmdbatch *cmdbatch;
-
- long result = -EINVAL;
-
- /* The number of IBs are completely ignored for sync commands */
- if (!(param->flags & KGSL_CONTEXT_SYNC)) {
- if (param->numcmds == 0 || param->numcmds > KGSL_MAX_NUMIBS)
- return -EINVAL;
- } else if (param->numcmds != 0) {
- KGSL_DEV_ERR_ONCE(device,
- "Commands specified with the SYNC flag. They will be ignored\n");
+ if (!kgsl_mmu_gpuaddr_in_range(pt, ibdesc[i].gpuaddr)) {
+ result = -ERANGE;
+ KGSL_DRV_ERR(dev_priv->device,
+ "invalid ib base GPU virtual addr %x\n",
+ ibdesc[i].gpuaddr);
+ goto free_ibdesc;
+ }
}
- context = kgsl_context_get_owner(dev_priv, param->context_id);
- if (context == NULL)
- return -EINVAL;
+ result = dev_priv->device->ftbl->issueibcmds(dev_priv,
+ context,
+ ibdesc,
+ param->numibs,
+ ¶m->timestamp,
+ param->flags);
- cmdbatch = _kgsl_cmdbatch_create(device, context, param->flags,
- (unsigned int) param->cmdlist, param->numcmds,
- (unsigned int) param->synclist, param->numsyncs);
-
- if (IS_ERR(cmdbatch)) {
- result = PTR_ERR(cmdbatch);
- goto done;
- }
-
- /* Run basic sanity checking on the command */
- if (!_kgsl_cmdbatch_verify(dev_priv, cmdbatch))
- goto free_cmdbatch;
-
- result = dev_priv->device->ftbl->issueibcmds(dev_priv, context,
- cmdbatch, ¶m->timestamp);
-
-free_cmdbatch:
- if (result)
- kgsl_cmdbatch_destroy(cmdbatch);
-
+free_ibdesc:
+ kfree(ibdesc);
done:
kgsl_context_put(context);
return result;
@@ -2143,11 +1665,14 @@
{
struct kgsl_drawctxt_destroy *param = data;
struct kgsl_context *context;
- long result;
+ long result = -EINVAL;
context = kgsl_context_get_owner(dev_priv, param->drawctxt_id);
- result = kgsl_context_detach(context);
+ if (context) {
+ kgsl_context_detach(context);
+ result = 0;
+ }
kgsl_context_put(context);
return result;
@@ -3246,7 +2771,7 @@
} kgsl_ioctl_funcs[] = {
KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_GETPROPERTY,
kgsl_ioctl_device_getproperty,
- KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
+ KGSL_IOCTL_LOCK),
KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP,
kgsl_ioctl_device_waittimestamp,
KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
@@ -3254,9 +2779,8 @@
kgsl_ioctl_device_waittimestamp_ctxtid,
KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
KGSL_IOCTL_FUNC(IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS,
- kgsl_ioctl_rb_issueibcmds, 0),
- KGSL_IOCTL_FUNC(IOCTL_KGSL_SUBMIT_COMMANDS,
- kgsl_ioctl_submit_commands, 0),
+ kgsl_ioctl_rb_issueibcmds,
+ KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP,
kgsl_ioctl_cmdstream_readtimestamp,
KGSL_IOCTL_LOCK),
@@ -3265,13 +2789,13 @@
KGSL_IOCTL_LOCK),
KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP,
kgsl_ioctl_cmdstream_freememontimestamp,
- KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
+ KGSL_IOCTL_LOCK),
KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID,
kgsl_ioctl_cmdstream_freememontimestamp_ctxtid,
- KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
+ KGSL_IOCTL_LOCK),
KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_CREATE,
kgsl_ioctl_drawctxt_create,
- KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
+ KGSL_IOCTL_LOCK),
KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_DESTROY,
kgsl_ioctl_drawctxt_destroy,
KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
@@ -3291,10 +2815,10 @@
kgsl_ioctl_cff_user_event, 0),
KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMESTAMP_EVENT,
kgsl_ioctl_timestamp_event,
- KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
+ KGSL_IOCTL_LOCK),
KGSL_IOCTL_FUNC(IOCTL_KGSL_SETPROPERTY,
kgsl_ioctl_device_setproperty,
- KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
+ KGSL_IOCTL_LOCK),
KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_ALLOC_ID,
kgsl_ioctl_gpumem_alloc_id, 0),
KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_FREE_ID,
@@ -3938,6 +3462,7 @@
setup_timer(&device->idle_timer, kgsl_timer, (unsigned long) device);
+ setup_timer(&device->hang_timer, hang_timer, (unsigned long) device);
status = kgsl_create_device_workqueue(device);
if (status)
goto error_pwrctrl_close;
@@ -3997,6 +3522,7 @@
if (device->state == KGSL_STATE_ACTIVE)
kgsl_idle(device);
+
}
if (device->pm_dump_enable) {
@@ -4010,12 +3536,13 @@
pwr->power_flags, pwr->active_pwrlevel);
KGSL_LOG_DUMP(device, "POWER: INTERVAL TIMEOUT = %08X ",
- pwr->interval_timeout);
+ pwr->interval_timeout);
}
/* Disable the idle timer so we don't get interrupted */
del_timer_sync(&device->idle_timer);
+ del_timer_sync(&device->hang_timer);
/* Force on the clocks */
kgsl_pwrctrl_wake(device);
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index 2e9d52e..8d390a9 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -78,8 +78,6 @@
#define KGSL_MEMFREE_HIST_SIZE ((int)(PAGE_SIZE * 2))
-#define KGSL_MAX_NUMIBS 100000
-
struct kgsl_memfree_hist_elem {
unsigned int pid;
unsigned int gpuaddr;
@@ -143,7 +141,6 @@
struct kgsl_pagetable;
struct kgsl_memdesc;
-struct kgsl_cmdbatch;
struct kgsl_memdesc_ops {
int (*vmflags)(struct kgsl_memdesc *);
@@ -208,6 +205,7 @@
#define MMU_CONFIG 1
#endif
+void kgsl_hang_check(struct work_struct *work);
void kgsl_mem_entry_destroy(struct kref *kref);
int kgsl_postmortem_dump(struct kgsl_device *device, int manual);
@@ -239,7 +237,7 @@
unsigned int value);
void kgsl_trace_issueibcmds(struct kgsl_device *device, int id,
- struct kgsl_cmdbatch *cmdbatch,
+ struct kgsl_ibdesc *ibdesc, int numibs,
unsigned int timestamp, unsigned int flags,
int result, unsigned int type);
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
index 110264b..9ab8d22 100644
--- a/drivers/gpu/msm/kgsl_debugfs.c
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -123,6 +123,7 @@
KGSL_DEBUGFS_LOG(ctxt_log);
KGSL_DEBUGFS_LOG(mem_log);
KGSL_DEBUGFS_LOG(pwr_log);
+KGSL_DEBUGFS_LOG(ft_log);
static int memfree_hist_print(struct seq_file *s, void *unused)
{
@@ -184,6 +185,7 @@
device->drv_log = KGSL_LOG_LEVEL_DEFAULT;
device->mem_log = KGSL_LOG_LEVEL_DEFAULT;
device->pwr_log = KGSL_LOG_LEVEL_DEFAULT;
+ device->ft_log = KGSL_LOG_LEVEL_DEFAULT;
debugfs_create_file("log_level_cmd", 0644, device->d_debugfs, device,
&cmd_log_fops);
@@ -197,6 +199,8 @@
&pwr_log_fops);
debugfs_create_file("memfree_history", 0444, device->d_debugfs, device,
&memfree_hist_fops);
+ debugfs_create_file("log_level_ft", 0644, device->d_debugfs, device,
+ &ft_log_fops);
/* Create postmortem dump control files */
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index cd9c4f7..09a31c9 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -13,7 +13,6 @@
#ifndef __KGSL_DEVICE_H
#define __KGSL_DEVICE_H
-#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/pm_qos.h>
#include <linux/sched.h>
@@ -77,7 +76,6 @@
struct kgsl_context;
struct kgsl_power_stats;
struct kgsl_event;
-struct kgsl_cmdbatch;
struct kgsl_functable {
/* Mandatory functions - these functions must be implemented
@@ -89,7 +87,7 @@
void (*regwrite) (struct kgsl_device *device,
unsigned int offsetwords, unsigned int value);
int (*idle) (struct kgsl_device *device);
- bool (*isidle) (struct kgsl_device *device);
+ unsigned int (*isidle) (struct kgsl_device *device);
int (*suspend_context) (struct kgsl_device *device);
int (*init) (struct kgsl_device *device);
int (*start) (struct kgsl_device *device);
@@ -103,8 +101,9 @@
unsigned int (*readtimestamp) (struct kgsl_device *device,
struct kgsl_context *context, enum kgsl_timestamp_type type);
int (*issueibcmds) (struct kgsl_device_private *dev_priv,
- struct kgsl_context *context, struct kgsl_cmdbatch *cmdbatch,
- uint32_t *timestamps);
+ struct kgsl_context *context, struct kgsl_ibdesc *ibdesc,
+ unsigned int sizedwords, uint32_t *timestamp,
+ unsigned int flags);
int (*setup_pt)(struct kgsl_device *device,
struct kgsl_pagetable *pagetable);
void (*cleanup_pt)(struct kgsl_device *device,
@@ -116,15 +115,14 @@
void * (*snapshot)(struct kgsl_device *device, void *snapshot,
int *remain, int hang);
irqreturn_t (*irq_handler)(struct kgsl_device *device);
- int (*drain)(struct kgsl_device *device);
/* Optional functions - these functions are not mandatory. The
driver will check that the function pointer is not NULL before
calling the hook */
- int (*setstate) (struct kgsl_device *device, unsigned int context_id,
+ void (*setstate) (struct kgsl_device *device, unsigned int context_id,
uint32_t flags);
struct kgsl_context *(*drawctxt_create) (struct kgsl_device_private *,
uint32_t *flags);
- int (*drawctxt_detach) (struct kgsl_context *context);
+ void (*drawctxt_detach) (struct kgsl_context *context);
void (*drawctxt_destroy) (struct kgsl_context *context);
long (*ioctl) (struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data);
@@ -134,8 +132,6 @@
int (*postmortem_dump) (struct kgsl_device *device, int manual);
int (*next_event)(struct kgsl_device *device,
struct kgsl_event *event);
- void (*drawctxt_sched)(struct kgsl_device *device,
- struct kgsl_context *context);
};
/* MH register values */
@@ -159,56 +155,6 @@
unsigned int created;
};
-/**
- * struct kgsl_cmdbatch - KGSl command descriptor
- * @device: KGSL GPU device that the command was created for
- * @context: KGSL context that created the command
- * @timestamp: Timestamp assigned to the command
- * @flags: flags
- * @priv: Internal flags
- * @fault_policy: Internal policy describing how to handle this command in case
- * of a fault
- * @fault_recovery: recovery actions actually tried for this batch
- * @ibcount: Number of IBs in the command list
- * @ibdesc: Pointer to the list of IBs
- * @expires: Point in time when the cmdbatch is considered to be hung
- * @invalid: non-zero if the dispatcher determines the command and the owning
- * context should be invalidated
- * @refcount: kref structure to maintain the reference count
- * @synclist: List of context/timestamp tuples to wait for before issuing
- *
- * This struture defines an atomic batch of command buffers issued from
- * userspace.
- */
-struct kgsl_cmdbatch {
- struct kgsl_device *device;
- struct kgsl_context *context;
- spinlock_t lock;
- uint32_t timestamp;
- uint32_t flags;
- unsigned long priv;
- unsigned long fault_policy;
- unsigned long fault_recovery;
- uint32_t ibcount;
- struct kgsl_ibdesc *ibdesc;
- unsigned long expires;
- int invalid;
- struct kref refcount;
- struct list_head synclist;
-};
-
-/**
- * enum kgsl_cmdbatch_priv - Internal cmdbatch flags
- * @CMDBATCH_FLAG_SKIP - skip the entire command batch
- * @CMDBATCH_FLAG_FORCE_PREAMBLE - Force the preamble on for the cmdbatch
- * @CMDBATCH_FLAG_WFI - Force wait-for-idle for the submission
- */
-
-enum kgsl_cmdbatch_priv {
- CMDBATCH_FLAG_SKIP = 0,
- CMDBATCH_FLAG_FORCE_PREAMBLE,
- CMDBATCH_FLAG_WFI,
-};
struct kgsl_device {
struct device *dev;
@@ -244,7 +190,9 @@
struct completion hwaccess_gate;
const struct kgsl_functable *ftbl;
struct work_struct idle_check_ws;
+ struct work_struct hang_check_ws;
struct timer_list idle_timer;
+ struct timer_list hang_timer;
struct kgsl_pwrctrl pwrctrl;
int open_count;
@@ -258,6 +206,7 @@
wait_queue_head_t active_cnt_wq;
struct workqueue_struct *work_queue;
struct device *parentdev;
+ struct completion ft_gate;
struct dentry *d_debugfs;
struct idr context_idr;
rwlock_t context_lock;
@@ -284,13 +233,13 @@
int drv_log;
int mem_log;
int pwr_log;
+ int ft_log;
int pm_dump_enable;
struct kgsl_pwrscale pwrscale;
struct kobject pwrscale_kobj;
struct work_struct ts_expired_ws;
struct list_head events;
struct list_head events_pending_list;
- unsigned int events_last_timestamp;
s64 on_time;
/* Postmortem Control switches */
@@ -305,8 +254,11 @@
#define KGSL_DEVICE_COMMON_INIT(_dev) \
.hwaccess_gate = COMPLETION_INITIALIZER((_dev).hwaccess_gate),\
+ .ft_gate = COMPLETION_INITIALIZER((_dev).ft_gate),\
.idle_check_ws = __WORK_INITIALIZER((_dev).idle_check_ws,\
kgsl_idle_check),\
+ .hang_check_ws = __WORK_INITIALIZER((_dev).hang_check_ws,\
+ kgsl_hang_check),\
.ts_expired_ws = __WORK_INITIALIZER((_dev).ts_expired_ws,\
kgsl_process_events),\
.context_idr = IDR_INIT((_dev).context_idr),\
@@ -401,9 +353,6 @@
int kgsl_add_event(struct kgsl_device *device, u32 id, u32 ts,
kgsl_event_func func, void *priv, void *owner);
-void kgsl_cancel_event(struct kgsl_device *device, struct kgsl_context *context,
- unsigned int timestamp, kgsl_event_func func, void *priv);
-
static inline void kgsl_process_add_stats(struct kgsl_process_private *priv,
unsigned int type, size_t size)
{
@@ -491,6 +440,8 @@
return 0;
}
+
+
int kgsl_check_timestamp(struct kgsl_device *device,
struct kgsl_context *context, unsigned int timestamp);
@@ -646,40 +597,4 @@
{
kgsl_signal_event(device, context, timestamp, KGSL_EVENT_CANCELLED);
}
-
-void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch);
-
-void kgsl_cmdbatch_destroy_object(struct kref *kref);
-
-/**
- * kgsl_cmdbatch_put() - Decrement the refcount for a command batch object
- * @cmdbatch: Pointer to the command batch object
- */
-static inline void kgsl_cmdbatch_put(struct kgsl_cmdbatch *cmdbatch)
-{
- if (cmdbatch)
- kref_put(&cmdbatch->refcount, kgsl_cmdbatch_destroy_object);
-}
-
-/**
- * kgsl_cmdbatch_sync_pending() - return true if the cmdbatch is waiting
- * @cmdbatch: Pointer to the command batch object to check
- *
- * Return non-zero if the specified command batch is still waiting for sync
- * point dependencies to be satisfied
- */
-static inline int kgsl_cmdbatch_sync_pending(struct kgsl_cmdbatch *cmdbatch)
-{
- int ret;
-
- if (cmdbatch == NULL)
- return 0;
-
- spin_lock(&cmdbatch->lock);
- ret = list_empty(&cmdbatch->synclist) ? 0 : 1;
- spin_unlock(&cmdbatch->lock);
-
- return ret;
-}
-
#endif /* __KGSL_DEVICE_H */
diff --git a/drivers/gpu/msm/kgsl_events.c b/drivers/gpu/msm/kgsl_events.c
index c7ac0ad..9e8f6d0 100644
--- a/drivers/gpu/msm/kgsl_events.c
+++ b/drivers/gpu/msm/kgsl_events.c
@@ -48,8 +48,7 @@
{
int id = event->context ? event->context->id : KGSL_MEMSTORE_GLOBAL;
- trace_kgsl_fire_event(id, timestamp, type, jiffies - event->created,
- event->func);
+ trace_kgsl_fire_event(id, timestamp, type, jiffies - event->created);
if (event->func)
event->func(device, event->priv, id, timestamp, type);
@@ -236,7 +235,7 @@
*/
if (timestamp_cmp(cur_ts, ts) >= 0) {
- trace_kgsl_fire_event(id, cur_ts, ts, 0, func);
+ trace_kgsl_fire_event(id, cur_ts, ts, 0);
func(device, priv, id, ts, KGSL_EVENT_TIMESTAMP_RETIRED);
kgsl_context_put(context);
@@ -253,7 +252,7 @@
* Increase the active count on the device to avoid going into power
* saving modes while events are pending
*/
- ret = kgsl_active_count_get_light(device);
+ ret = kgsl_active_count_get(device);
if (ret < 0) {
kgsl_context_put(context);
kfree(event);
@@ -267,7 +266,7 @@
event->owner = owner;
event->created = jiffies;
- trace_kgsl_register_event(id, ts, func);
+ trace_kgsl_register_event(id, ts);
/* Add the event to either the owning context or the global list */
@@ -334,11 +333,7 @@
void *priv)
{
struct kgsl_event *event;
- struct list_head *head;
-
- BUG_ON(!mutex_is_locked(&device->mutex));
-
- head = _get_list_head(device, context);
+ struct list_head *head = _get_list_head(device, context);
event = _find_event(device, head, timestamp, func, priv);
@@ -405,19 +400,10 @@
struct kgsl_context *context, *tmp;
uint32_t timestamp;
- /*
- * Bail unless the global timestamp has advanced. We can safely do this
- * outside of the mutex for speed
- */
-
- timestamp = kgsl_readtimestamp(device, NULL, KGSL_TIMESTAMP_RETIRED);
- if (timestamp == device->events_last_timestamp)
- return;
-
mutex_lock(&device->mutex);
- device->events_last_timestamp = timestamp;
-
+ /* Process expired global events */
+ timestamp = kgsl_readtimestamp(device, NULL, KGSL_TIMESTAMP_RETIRED);
_retire_events(device, &device->events, timestamp);
_mark_next_event(device, &device->events);
diff --git a/drivers/gpu/msm/kgsl_gpummu.c b/drivers/gpu/msm/kgsl_gpummu.c
index 2634e4f..68052b1 100644
--- a/drivers/gpu/msm/kgsl_gpummu.c
+++ b/drivers/gpu/msm/kgsl_gpummu.c
@@ -482,17 +482,15 @@
return NULL;
}
-static int kgsl_gpummu_default_setstate(struct kgsl_mmu *mmu,
+static void kgsl_gpummu_default_setstate(struct kgsl_mmu *mmu,
uint32_t flags)
{
struct kgsl_gpummu_pt *gpummu_pt;
if (!kgsl_mmu_enabled())
- return 0;
+ return;
if (flags & KGSL_MMUFLAGS_PTUPDATE) {
- int ret = kgsl_idle(mmu->device);
- if (ret)
- return ret;
+ kgsl_idle(mmu->device);
gpummu_pt = mmu->hwpagetable->priv;
kgsl_regwrite(mmu->device, MH_MMU_PT_BASE,
gpummu_pt->base.gpuaddr);
@@ -502,16 +500,12 @@
/* Invalidate all and tc */
kgsl_regwrite(mmu->device, MH_MMU_INVALIDATE, 0x00000003);
}
-
- return 0;
}
-static int kgsl_gpummu_setstate(struct kgsl_mmu *mmu,
+static void kgsl_gpummu_setstate(struct kgsl_mmu *mmu,
struct kgsl_pagetable *pagetable,
unsigned int context_id)
{
- int ret = 0;
-
if (mmu->flags & KGSL_FLAGS_STARTED) {
/* page table not current, then setup mmu to use new
* specified page table
@@ -524,13 +518,10 @@
kgsl_mmu_pt_get_flags(pagetable, mmu->device->id);
/* call device specific set page table */
- ret = kgsl_setstate(mmu, context_id,
- KGSL_MMUFLAGS_TLBFLUSH |
+ kgsl_setstate(mmu, context_id, KGSL_MMUFLAGS_TLBFLUSH |
KGSL_MMUFLAGS_PTUPDATE);
}
}
-
- return ret;
}
static int kgsl_gpummu_init(struct kgsl_mmu *mmu)
@@ -572,7 +563,6 @@
struct kgsl_device *device = mmu->device;
struct kgsl_gpummu_pt *gpummu_pt;
- int ret;
if (mmu->flags & KGSL_FLAGS_STARTED)
return 0;
@@ -584,6 +574,9 @@
/* setup MMU and sub-client behavior */
kgsl_regwrite(device, MH_MMU_CONFIG, mmu->config);
+ /* idle device */
+ kgsl_idle(device);
+
/* enable axi interrupts */
kgsl_regwrite(device, MH_INTERRUPT_MASK,
GSL_MMU_INT_MASK | MH_INTERRUPT_MASK__MMU_PAGE_FAULT);
@@ -614,12 +607,10 @@
kgsl_regwrite(mmu->device, MH_MMU_VA_RANGE,
(KGSL_PAGETABLE_BASE |
(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE >> 16)));
+ kgsl_setstate(mmu, KGSL_MEMSTORE_GLOBAL, KGSL_MMUFLAGS_TLBFLUSH);
+ mmu->flags |= KGSL_FLAGS_STARTED;
- ret = kgsl_setstate(mmu, KGSL_MEMSTORE_GLOBAL, KGSL_MMUFLAGS_TLBFLUSH);
- if (!ret)
- mmu->flags |= KGSL_FLAGS_STARTED;
-
- return ret;
+ return 0;
}
static int
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index e296784..ecda5a7 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -423,12 +423,8 @@
* the GPU and trigger a snapshot. To stall the transaction return
* EBUSY error.
*/
- if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE) {
- /* turn off GPU IRQ so we don't get faults from it too */
- kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
- adreno_dispatcher_irq_fault(device);
+ if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)
ret = -EBUSY;
- }
done:
return ret;
}
@@ -1209,12 +1205,10 @@
return 0;
}
-static int kgsl_iommu_setstate(struct kgsl_mmu *mmu,
+static void kgsl_iommu_setstate(struct kgsl_mmu *mmu,
struct kgsl_pagetable *pagetable,
unsigned int context_id)
{
- int ret = 0;
-
if (mmu->flags & KGSL_FLAGS_STARTED) {
/* page table not current, then setup mmu to use new
* specified page table
@@ -1225,12 +1219,10 @@
flags |= kgsl_mmu_pt_get_flags(mmu->hwpagetable,
mmu->device->id) |
KGSL_MMUFLAGS_TLBFLUSH;
- ret = kgsl_setstate(mmu, context_id,
+ kgsl_setstate(mmu, context_id,
KGSL_MMUFLAGS_PTUPDATE | flags);
}
}
-
- return ret;
}
/*
@@ -1900,40 +1892,31 @@
* cpu
* Return - void
*/
-static int kgsl_iommu_default_setstate(struct kgsl_mmu *mmu,
+static void kgsl_iommu_default_setstate(struct kgsl_mmu *mmu,
uint32_t flags)
{
struct kgsl_iommu *iommu = mmu->priv;
int temp;
int i;
- int ret = 0;
phys_addr_t pt_base = kgsl_iommu_get_pt_base_addr(mmu,
mmu->hwpagetable);
phys_addr_t pt_val;
- ret = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
-
- if (ret) {
+ if (kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER)) {
KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n");
- return ret;
+ return;
}
/* For v0 SMMU GPU needs to be idle for tlb invalidate as well */
- if (msm_soc_version_supports_iommu_v0()) {
- ret = kgsl_idle(mmu->device);
- if (ret)
- return ret;
- }
+ if (msm_soc_version_supports_iommu_v0())
+ kgsl_idle(mmu->device);
/* Acquire GPU-CPU sync Lock here */
_iommu_lock();
if (flags & KGSL_MMUFLAGS_PTUPDATE) {
- if (!msm_soc_version_supports_iommu_v0()) {
- ret = kgsl_idle(mmu->device);
- if (ret)
- goto unlock;
- }
+ if (!msm_soc_version_supports_iommu_v0())
+ kgsl_idle(mmu->device);
for (i = 0; i < iommu->unit_count; i++) {
/* get the lsb value which should not change when
* changing ttbr0 */
@@ -1994,13 +1977,12 @@
}
}
}
-unlock:
+
/* Release GPU-CPU sync Lock here */
_iommu_unlock();
/* Disable smmu clock */
kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
- return ret;
}
/*
@@ -2057,7 +2039,6 @@
.mmu_pagefault_resume = kgsl_iommu_pagefault_resume,
.mmu_get_current_ptbase = kgsl_iommu_get_current_ptbase,
.mmu_enable_clk = kgsl_iommu_enable_clk,
- .mmu_disable_clk = kgsl_iommu_disable_clk,
.mmu_disable_clk_on_ts = kgsl_iommu_disable_clk_on_ts,
.mmu_get_default_ttbr0 = kgsl_iommu_get_default_ttbr0,
.mmu_get_reg_gpuaddr = kgsl_iommu_get_reg_gpuaddr,
diff --git a/drivers/gpu/msm/kgsl_log.h b/drivers/gpu/msm/kgsl_log.h
index 3a32953..a7832e4 100644
--- a/drivers/gpu/msm/kgsl_log.h
+++ b/drivers/gpu/msm/kgsl_log.h
@@ -103,6 +103,15 @@
#define KGSL_PWR_CRIT(_dev, fmt, args...) \
KGSL_LOG_CRIT(_dev->dev, _dev->pwr_log, fmt, ##args)
+#define KGSL_FT_INFO(_dev, fmt, args...) \
+KGSL_LOG_INFO(_dev->dev, _dev->ft_log, fmt, ##args)
+#define KGSL_FT_WARN(_dev, fmt, args...) \
+KGSL_LOG_WARN(_dev->dev, _dev->ft_log, fmt, ##args)
+#define KGSL_FT_ERR(_dev, fmt, args...) \
+KGSL_LOG_ERR(_dev->dev, _dev->ft_log, fmt, ##args)
+#define KGSL_FT_CRIT(_dev, fmt, args...) \
+KGSL_LOG_CRIT(_dev->dev, _dev->ft_log, fmt, ##args)
+
/* Core error messages - these are for core KGSL functions that have
no device associated with them (such as memory) */
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index 6635a7c..952019f 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -566,7 +566,7 @@
}
EXPORT_SYMBOL(kgsl_mmu_putpagetable);
-int kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
+void kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
uint32_t flags)
{
struct kgsl_device *device = mmu->device;
@@ -574,16 +574,14 @@
if (!(flags & (KGSL_MMUFLAGS_TLBFLUSH | KGSL_MMUFLAGS_PTUPDATE))
&& !adreno_is_a2xx(adreno_dev))
- return 0;
+ return;
if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
- return 0;
+ return;
else if (device->ftbl->setstate)
- return device->ftbl->setstate(device, context_id, flags);
+ device->ftbl->setstate(device, context_id, flags);
else if (mmu->mmu_ops->mmu_device_setstate)
- return mmu->mmu_ops->mmu_device_setstate(mmu, flags);
-
- return 0;
+ mmu->mmu_ops->mmu_device_setstate(mmu, flags);
}
EXPORT_SYMBOL(kgsl_setstate);
@@ -592,6 +590,7 @@
struct kgsl_mh *mh = &device->mh;
/* force mmu off to for now*/
kgsl_regwrite(device, MH_MMU_CONFIG, 0);
+ kgsl_idle(device);
/* define physical memory range accessible by the core */
kgsl_regwrite(device, MH_MMU_MPU_BASE, mh->mpu_base);
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index a30ee3f..faba81e 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -133,10 +133,10 @@
int (*mmu_close) (struct kgsl_mmu *mmu);
int (*mmu_start) (struct kgsl_mmu *mmu);
void (*mmu_stop) (struct kgsl_mmu *mmu);
- int (*mmu_setstate) (struct kgsl_mmu *mmu,
+ void (*mmu_setstate) (struct kgsl_mmu *mmu,
struct kgsl_pagetable *pagetable,
unsigned int context_id);
- int (*mmu_device_setstate) (struct kgsl_mmu *mmu,
+ void (*mmu_device_setstate) (struct kgsl_mmu *mmu,
uint32_t flags);
void (*mmu_pagefault) (struct kgsl_mmu *mmu);
phys_addr_t (*mmu_get_current_ptbase)
@@ -147,8 +147,6 @@
(struct kgsl_mmu *mmu, uint32_t ts, bool ts_valid);
int (*mmu_enable_clk)
(struct kgsl_mmu *mmu, int ctx_id);
- void (*mmu_disable_clk)
- (struct kgsl_mmu *mmu);
phys_addr_t (*mmu_get_default_ttbr0)(struct kgsl_mmu *mmu,
unsigned int unit_id,
enum kgsl_iommu_context_id ctx_id);
@@ -233,7 +231,7 @@
int kgsl_mmu_put_gpuaddr(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc);
unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr);
-int kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
+void kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
uint32_t flags);
int kgsl_mmu_get_ptname_from_ptbase(struct kgsl_mmu *mmu,
phys_addr_t pt_base);
@@ -262,23 +260,19 @@
return 0;
}
-static inline int kgsl_mmu_setstate(struct kgsl_mmu *mmu,
+static inline void kgsl_mmu_setstate(struct kgsl_mmu *mmu,
struct kgsl_pagetable *pagetable,
unsigned int context_id)
{
if (mmu->mmu_ops && mmu->mmu_ops->mmu_setstate)
- return mmu->mmu_ops->mmu_setstate(mmu, pagetable, context_id);
-
- return 0;
+ mmu->mmu_ops->mmu_setstate(mmu, pagetable, context_id);
}
-static inline int kgsl_mmu_device_setstate(struct kgsl_mmu *mmu,
+static inline void kgsl_mmu_device_setstate(struct kgsl_mmu *mmu,
uint32_t flags)
{
if (mmu->mmu_ops && mmu->mmu_ops->mmu_device_setstate)
- return mmu->mmu_ops->mmu_device_setstate(mmu, flags);
-
- return 0;
+ mmu->mmu_ops->mmu_device_setstate(mmu, flags);
}
static inline void kgsl_mmu_stop(struct kgsl_mmu *mmu)
@@ -326,12 +320,6 @@
return 0;
}
-static inline void kgsl_mmu_disable_clk(struct kgsl_mmu *mmu)
-{
- if (mmu->mmu_ops && mmu->mmu_ops->mmu_disable_clk)
- mmu->mmu_ops->mmu_disable_clk(mmu);
-}
-
static inline void kgsl_mmu_disable_clk_on_ts(struct kgsl_mmu *mmu,
unsigned int ts, bool ts_valid)
{
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 07131f7..1a95761 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -1215,6 +1215,9 @@
} else {
device->pwrctrl.irq_last = 0;
}
+ } else if (device->state & (KGSL_STATE_HUNG |
+ KGSL_STATE_DUMP_AND_FT)) {
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
}
mutex_unlock(&device->mutex);
@@ -1270,6 +1273,7 @@
kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
return -EBUSY;
}
+ del_timer_sync(&device->hang_timer);
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP);
kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
@@ -1339,6 +1343,7 @@
case KGSL_STATE_NAP:
case KGSL_STATE_SLEEP:
del_timer_sync(&device->idle_timer);
+ del_timer_sync(&device->hang_timer);
/* make sure power is on to stop the device*/
kgsl_pwrctrl_enable(device);
device->ftbl->suspend_context(device);
@@ -1430,6 +1435,8 @@
kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
+ mod_timer(&device->hang_timer,
+ (jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART)));
pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
device->pwrctrl.pm_qos_latency);
case KGSL_STATE_ACTIVE:
@@ -1497,6 +1504,10 @@
return "SLEEP";
case KGSL_STATE_SUSPEND:
return "SUSPEND";
+ case KGSL_STATE_HUNG:
+ return "HUNG";
+ case KGSL_STATE_DUMP_AND_FT:
+ return "DNR";
case KGSL_STATE_SLUMBER:
return "SLUMBER";
default:
@@ -1528,6 +1539,7 @@
(device->state != KGSL_STATE_ACTIVE)) {
mutex_unlock(&device->mutex);
wait_for_completion(&device->hwaccess_gate);
+ wait_for_completion(&device->ft_gate);
mutex_lock(&device->mutex);
/* Stop the idle timer */
diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c
index dc3ad21..b7d7235 100644
--- a/drivers/gpu/msm/kgsl_sync.c
+++ b/drivers/gpu/msm/kgsl_sync.c
@@ -11,7 +11,6 @@
*
*/
-#include <linux/err.h>
#include <linux/file.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -282,65 +281,3 @@
{
sync_timeline_destroy(context->timeline);
}
-
-static void kgsl_sync_callback(struct sync_fence *fence,
- struct sync_fence_waiter *waiter)
-{
- struct kgsl_sync_fence_waiter *kwaiter =
- (struct kgsl_sync_fence_waiter *) waiter;
- kwaiter->func(kwaiter->priv);
- sync_fence_put(kwaiter->fence);
- kfree(kwaiter);
-}
-
-struct kgsl_sync_fence_waiter *kgsl_sync_fence_async_wait(int fd,
- void (*func)(void *priv), void *priv)
-{
- struct kgsl_sync_fence_waiter *kwaiter;
- struct sync_fence *fence;
- int status;
-
- fence = sync_fence_fdget(fd);
- if (fence == NULL)
- return ERR_PTR(-EINVAL);
-
- /* create the waiter */
- kwaiter = kzalloc(sizeof(*kwaiter), GFP_ATOMIC);
- if (kwaiter == NULL) {
- sync_fence_put(fence);
- return ERR_PTR(-ENOMEM);
- }
- kwaiter->fence = fence;
- kwaiter->priv = priv;
- kwaiter->func = func;
- sync_fence_waiter_init((struct sync_fence_waiter *) kwaiter,
- kgsl_sync_callback);
-
- /* if status then error or signaled */
- status = sync_fence_wait_async(fence,
- (struct sync_fence_waiter *) kwaiter);
- if (status) {
- kfree(kwaiter);
- sync_fence_put(fence);
- if (status < 0)
- kwaiter = ERR_PTR(status);
- else
- kwaiter = NULL;
- }
-
- return kwaiter;
-}
-
-int kgsl_sync_fence_async_cancel(struct kgsl_sync_fence_waiter *kwaiter)
-{
- if (kwaiter == NULL)
- return 0;
-
- if (sync_fence_cancel_async(kwaiter->fence,
- (struct sync_fence_waiter *) kwaiter) == 0) {
- sync_fence_put(kwaiter->fence);
- kfree(kwaiter);
- return 1;
- }
- return 0;
-}
diff --git a/drivers/gpu/msm/kgsl_sync.h b/drivers/gpu/msm/kgsl_sync.h
index 275eaf0..63adf06 100644
--- a/drivers/gpu/msm/kgsl_sync.h
+++ b/drivers/gpu/msm/kgsl_sync.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -28,13 +28,6 @@
unsigned int timestamp;
};
-struct kgsl_sync_fence_waiter {
- struct sync_fence_waiter waiter;
- struct sync_fence *fence;
- void (*func)(void *priv);
- void *priv;
-};
-
#if defined(CONFIG_SYNC)
struct sync_pt *kgsl_sync_pt_create(struct sync_timeline *timeline,
unsigned int timestamp);
@@ -46,9 +39,6 @@
void kgsl_sync_timeline_signal(struct sync_timeline *timeline,
unsigned int timestamp);
void kgsl_sync_timeline_destroy(struct kgsl_context *context);
-struct kgsl_sync_fence_waiter *kgsl_sync_fence_async_wait(int fd,
- void (*func)(void *priv), void *priv);
-int kgsl_sync_fence_async_cancel(struct kgsl_sync_fence_waiter *waiter);
#else
static inline struct sync_pt
*kgsl_sync_pt_create(struct sync_timeline *timeline, unsigned int timestamp)
@@ -82,20 +72,6 @@
static inline void kgsl_sync_timeline_destroy(struct kgsl_context *context)
{
}
-
-static inline struct
-kgsl_sync_fence_waiter *kgsl_sync_fence_async_wait(int fd,
- void (*func)(void *priv), void *priv)
-{
- return NULL;
-}
-
-static inline int
-kgsl_sync_fence_async_cancel(struct kgsl_sync_fence_waiter *waiter)
-{
- return 1;
-}
-
#endif
#endif /* __KGSL_SYNC_H */
diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h
index 5f39b8b..f16f2b4 100644
--- a/drivers/gpu/msm/kgsl_trace.h
+++ b/drivers/gpu/msm/kgsl_trace.h
@@ -37,13 +37,14 @@
TP_PROTO(struct kgsl_device *device,
int drawctxt_id,
- struct kgsl_cmdbatch *cmdbatch,
+ struct kgsl_ibdesc *ibdesc,
+ int numibs,
int timestamp,
int flags,
int result,
unsigned int type),
- TP_ARGS(device, drawctxt_id, cmdbatch, timestamp, flags,
+ TP_ARGS(device, drawctxt_id, ibdesc, numibs, timestamp, flags,
result, type),
TP_STRUCT__entry(
@@ -60,8 +61,8 @@
TP_fast_assign(
__assign_str(device_name, device->name);
__entry->drawctxt_id = drawctxt_id;
- __entry->ibdesc_addr = cmdbatch->ibdesc[0].gpuaddr;
- __entry->numibs = cmdbatch->ibcount;
+ __entry->ibdesc_addr = ibdesc[0].gpuaddr;
+ __entry->numibs = numibs;
__entry->timestamp = timestamp;
__entry->flags = flags;
__entry->result = result;
@@ -730,46 +731,42 @@
);
TRACE_EVENT(kgsl_register_event,
- TP_PROTO(unsigned int id, unsigned int timestamp, void *func),
- TP_ARGS(id, timestamp, func),
+ TP_PROTO(unsigned int id, unsigned int timestamp),
+ TP_ARGS(id, timestamp),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
- __field(void *, func)
),
TP_fast_assign(
__entry->id = id;
__entry->timestamp = timestamp;
- __entry->func = func;
),
TP_printk(
- "ctx=%u ts=%u cb=%pF",
- __entry->id, __entry->timestamp, __entry->func)
+ "ctx=%u ts=%u",
+ __entry->id, __entry->timestamp)
);
TRACE_EVENT(kgsl_fire_event,
TP_PROTO(unsigned int id, unsigned int ts,
- unsigned int type, unsigned int age, void *func),
- TP_ARGS(id, ts, type, age, func),
+ unsigned int type, unsigned int age),
+ TP_ARGS(id, ts, type, age),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, ts)
__field(unsigned int, type)
__field(unsigned int, age)
- __field(void *, func)
),
TP_fast_assign(
__entry->id = id;
__entry->ts = ts;
__entry->type = type;
__entry->age = age;
- __entry->func = func;
),
TP_printk(
- "ctx=%u ts=%u type=%s age=%u cb=%pF",
+ "ctx=%u ts=%u type=%s age=%u",
__entry->id, __entry->ts,
__print_symbolic(__entry->type, KGSL_EVENT_TYPES),
- __entry->age, __entry->func)
+ __entry->age)
);
TRACE_EVENT(kgsl_active_count,
diff --git a/drivers/gpu/msm/z180.c b/drivers/gpu/msm/z180.c
index 0af57aa..883417f 100644
--- a/drivers/gpu/msm/z180.c
+++ b/drivers/gpu/msm/z180.c
@@ -353,13 +353,7 @@
return ts_diff < Z180_PACKET_COUNT;
}
-/**
- * z180_idle() - Idle the 2D device
- * @device: Pointer to the KGSL device struct for the Z180
- *
- * wait until the z180 submission queue is idle
- */
-int z180_idle(struct kgsl_device *device)
+static int z180_idle(struct kgsl_device *device)
{
int status = 0;
struct z180_device *z180_dev = Z180_DEVICE(device);
@@ -379,8 +373,10 @@
int
z180_cmdstream_issueibcmds(struct kgsl_device_private *dev_priv,
struct kgsl_context *context,
- struct kgsl_cmdbatch *cmdbatch,
- uint32_t *timestamp)
+ struct kgsl_ibdesc *ibdesc,
+ unsigned int numibs,
+ uint32_t *timestamp,
+ unsigned int ctrl)
{
long result = 0;
unsigned int ofs = PACKETSIZE_STATESTREAM * sizeof(unsigned int);
@@ -393,20 +389,6 @@
struct kgsl_pagetable *pagetable = dev_priv->process_priv->pagetable;
struct z180_device *z180_dev = Z180_DEVICE(device);
unsigned int sizedwords;
- unsigned int numibs;
- struct kgsl_ibdesc *ibdesc;
-
- mutex_lock(&device->mutex);
-
- kgsl_active_count_get(device);
-
- if (cmdbatch == NULL) {
- result = EINVAL;
- goto error;
- }
-
- ibdesc = cmdbatch->ibdesc;
- numibs = cmdbatch->ibcount;
if (device->state & KGSL_STATE_HUNG) {
result = -EINVAL;
@@ -448,7 +430,7 @@
context->id, cmd, sizedwords);
/* context switch */
if ((context->id != (int)z180_dev->ringbuffer.prevctx) ||
- (cmdbatch->flags & KGSL_CONTEXT_CTX_SWITCH)) {
+ (ctrl & KGSL_CONTEXT_CTX_SWITCH)) {
KGSL_CMD_INFO(device, "context switch %d -> %d\n",
context->id, z180_dev->ringbuffer.prevctx);
kgsl_mmu_setstate(&device->mmu, pagetable,
@@ -456,13 +438,10 @@
cnt = PACKETSIZE_STATESTREAM;
ofs = 0;
}
-
- result = kgsl_setstate(&device->mmu,
+ kgsl_setstate(&device->mmu,
KGSL_MEMSTORE_GLOBAL,
kgsl_mmu_pt_get_flags(device->mmu.hwpagetable,
device->id));
- if (result < 0)
- goto error;
result = wait_event_interruptible_timeout(device->wait_queue,
room_in_rb(z180_dev),
@@ -503,12 +482,9 @@
z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, cmd);
z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, 0);
error:
- kgsl_trace_issueibcmds(device, context->id, cmdbatch,
- *timestamp, cmdbatch->flags, result, 0);
- kgsl_active_count_put(device);
-
- mutex_unlock(&device->mutex);
+ kgsl_trace_issueibcmds(device, context->id, ibdesc, numibs,
+ *timestamp, ctrl, result, 0);
return (int)result;
}
@@ -619,12 +595,8 @@
static int z180_stop(struct kgsl_device *device)
{
- int ret;
-
device->ftbl->irqctrl(device, 0);
- ret = z180_idle(device);
- if (ret)
- return ret;
+ z180_idle(device);
del_timer_sync(&device->idle_timer);
@@ -690,7 +662,7 @@
return status;
}
-static bool z180_isidle(struct kgsl_device *device)
+static unsigned int z180_isidle(struct kgsl_device *device)
{
struct z180_device *z180_dev = Z180_DEVICE(device);
@@ -903,7 +875,7 @@
return context;
}
-static int
+static void
z180_drawctxt_detach(struct kgsl_context *context)
{
struct kgsl_device *device;
@@ -917,13 +889,9 @@
if (z180_dev->ringbuffer.prevctx == context->id) {
z180_dev->ringbuffer.prevctx = Z180_INVALID_CONTEXT;
device->mmu.hwpagetable = device->mmu.defaultpagetable;
-
- /* Ignore the result - we are going down anyway */
kgsl_setstate(&device->mmu, KGSL_MEMSTORE_GLOBAL,
KGSL_MMUFLAGS_PTUPDATE);
}
-
- return 0;
}
static void
@@ -997,7 +965,6 @@
.irqctrl = z180_irqctrl,
.gpuid = z180_gpuid,
.irq_handler = z180_irq_handler,
- .drain = z180_idle, /* drain == idle for the z180 */
/* Optional functions */
.drawctxt_create = z180_drawctxt_create,
.drawctxt_detach = z180_drawctxt_detach,
diff --git a/drivers/gpu/msm/z180.h b/drivers/gpu/msm/z180.h
index a36e92d..1be0870 100644
--- a/drivers/gpu/msm/z180.h
+++ b/drivers/gpu/msm/z180.h
@@ -45,6 +45,5 @@
};
int z180_dump(struct kgsl_device *, int);
-int z180_idle(struct kgsl_device *);
#endif /* __Z180_H */
diff --git a/drivers/gpu/msm/z180_postmortem.c b/drivers/gpu/msm/z180_postmortem.c
index bc53c0e..5d929cf 100644
--- a/drivers/gpu/msm/z180_postmortem.c
+++ b/drivers/gpu/msm/z180_postmortem.c
@@ -58,8 +58,6 @@
unsigned int i;
unsigned int reg_val;
- z180_idle(device);
-
KGSL_LOG_DUMP(device, "Z180 Register Dump\n");
for (i = 0; i < ARRAY_SIZE(regs_to_dump); i++) {
kgsl_regread(device,
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index b725200..06ca31c 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -394,7 +394,7 @@
const char *fw_name;
bool no_force_update;
bool lpm_support;
- bool regs_enabled;
+ bool dev_sleep;
#if defined(CONFIG_SECURE_TOUCH)
atomic_t st_enabled;
@@ -2162,11 +2162,6 @@
if (on == false)
goto power_off;
- if (data->regs_enabled) {
- dev_dbg(&data->client->dev, "regs are already enabled\n");
- return 0;
- }
-
rc = reg_set_optimum_mode_check(data->vcc_ana, MXT_ACTIVE_LOAD_UA);
if (rc < 0) {
dev_err(&data->client->dev,
@@ -2215,8 +2210,6 @@
}
}
- data->regs_enabled = true;
-
msleep(130);
return 0;
@@ -2237,12 +2230,6 @@
return rc;
power_off:
-
- if (!data->regs_enabled) {
- dev_dbg(&data->client->dev, "regs are already disabled\n");
- return 0;
- }
-
reg_set_optimum_mode_check(data->vcc_ana, 0);
regulator_disable(data->vcc_ana);
if (data->pdata->digital_pwr_regulator) {
@@ -2254,8 +2241,6 @@
regulator_disable(data->vcc_i2c);
}
- data->regs_enabled = false;
-
msleep(50);
return 0;
}
@@ -2455,6 +2440,11 @@
struct input_dev *input_dev = data->input_dev;
int error;
+ if (data->dev_sleep) {
+ dev_dbg(dev, "Device already in sleep\n");
+ return 0;
+ }
+
disable_irq(data->irq);
mutex_lock(&input_dev->mutex);
@@ -2485,6 +2475,7 @@
}
}
+ data->dev_sleep = true;
return 0;
}
@@ -2495,6 +2486,11 @@
struct input_dev *input_dev = data->input_dev;
int error;
+ if (!data->dev_sleep) {
+ dev_dbg(dev, "Device already in resume\n");
+ return 0;
+ }
+
/* put regulators back in active power mode */
if (data->lpm_support) {
error = mxt_regulator_lpm(data, false);
@@ -2538,6 +2534,7 @@
enable_irq(data->irq);
+ data->dev_sleep = false;
return 0;
}
@@ -2924,6 +2921,7 @@
data->pdata = pdata;
data->no_force_update = pdata->no_force_update;
data->lpm_support = !pdata->no_lpm_support;
+ data->dev_sleep = false;
__set_bit(EV_ABS, input_dev->evbit);
__set_bit(EV_KEY, input_dev->evbit);
diff --git a/drivers/input/touchscreen/gt9xx/gt9xx.c b/drivers/input/touchscreen/gt9xx/gt9xx.c
index d1fe92b..4450cde 100644
--- a/drivers/input/touchscreen/gt9xx/gt9xx.c
+++ b/drivers/input/touchscreen/gt9xx/gt9xx.c
@@ -1949,7 +1949,7 @@
*******************************************************/
static void goodix_ts_suspend(struct goodix_ts_data *ts)
{
- int ret = -1;
+ int ret = -1, i;
GTP_DEBUG_FUNC();
@@ -1965,6 +1965,13 @@
gtp_irq_disable(ts);
else
hrtimer_cancel(&ts->timer);
+
+ for (i = 0; i < GTP_MAX_TOUCH; i++)
+ gtp_touch_up(ts, i);
+
+ input_report_key(ts->input_dev, BTN_TOUCH, 0);
+ input_sync(ts->input_dev);
+
ret = gtp_enter_sleep(ts);
#endif
if (ret < 0)
diff --git a/drivers/media/platform/msm/vidc/q6_hfi.c b/drivers/media/platform/msm/vidc/q6_hfi.c
index bc3b93d..44c9613 100644
--- a/drivers/media/platform/msm/vidc/q6_hfi.c
+++ b/drivers/media/platform/msm/vidc/q6_hfi.c
@@ -381,7 +381,7 @@
}
static inline void q6_hfi_add_apr_hdr(struct q6_hfi_device *dev,
- struct apr_hdr *hdr, u32 pkt_size, u32 opcode)
+ struct apr_hdr *hdr, u32 pkt_size)
{
hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
APR_HDR_LEN(sizeof(struct apr_hdr)),
@@ -394,7 +394,7 @@
hdr->dest_port = 0;
hdr->pkt_size = pkt_size;
hdr->token = 0;
- hdr->opcode = opcode;
+ hdr->opcode = VIDEO_HFI_CMD_ID;
}
static int q6_hfi_apr_callback(struct apr_client_data *data, void *priv)
@@ -496,7 +496,7 @@
goto err_core_init;
}
- q6_hfi_add_apr_hdr(dev, &apr.hdr, sizeof(apr), HFI_CMD_SYS_INIT);
+ q6_hfi_add_apr_hdr(dev, &apr.hdr, sizeof(apr));
rc = create_pkt_cmd_sys_init(&apr.pkt, HFI_VIDEO_ARCH_OX);
if (rc) {
@@ -529,22 +529,6 @@
return 0;
}
-static int q6_hfi_core_pc_prep(void *device)
-{
- (void) device;
-
- /* Q6 does not support core_pc_prep*/
- return 0;
-}
-
-static int q6_hfi_core_ping(void *device)
-{
- (void) device;
-
- /* Q6 does not support cmd_sys_ping */
- return 0;
-}
-
static void *q6_hfi_session_init(void *device, u32 session_id,
enum hal_domain session_type, enum hal_video_codec codec_type)
{
@@ -567,8 +551,7 @@
new_session->is_decoder = 1;
new_session->device = dev;
- q6_hfi_add_apr_hdr(dev, &apr.hdr, sizeof(apr),
- HFI_CMD_SYS_SESSION_INIT);
+ q6_hfi_add_apr_hdr(dev, &apr.hdr, sizeof(apr));
if (create_pkt_cmd_sys_session_init(&apr.pkt, (u32)new_session,
session_type, codec_type)) {
@@ -605,7 +588,7 @@
}
dev = session->device;
- q6_hfi_add_apr_hdr(dev, &apr.hdr, sizeof(apr), pkt_type);
+ q6_hfi_add_apr_hdr(dev, &apr.hdr, sizeof(apr));
rc = create_pkt_cmd_session_cmd(&apr.pkt, pkt_type, (u32)session);
if (rc) {
@@ -670,8 +653,7 @@
return 0;
apr = (struct q6_apr_cmd_session_set_buffers_packet *)packet;
- q6_hfi_add_apr_hdr(dev, &apr->hdr, VIDC_IFACEQ_VAR_LARGE_PKT_SIZE,
- HFI_CMD_SESSION_SET_BUFFERS);
+ q6_hfi_add_apr_hdr(dev, &apr->hdr, VIDC_IFACEQ_VAR_LARGE_PKT_SIZE);
rc = create_pkt_cmd_session_set_buffers(&apr->pkt,
(u32)session, buffer_info);
@@ -713,8 +695,7 @@
apr = (struct q6_apr_cmd_session_release_buffer_packet *) packet;
- q6_hfi_add_apr_hdr(dev, &apr->hdr, VIDC_IFACEQ_VAR_LARGE_PKT_SIZE,
- HFI_CMD_SESSION_RELEASE_BUFFERS);
+ q6_hfi_add_apr_hdr(dev, &apr->hdr, VIDC_IFACEQ_VAR_LARGE_PKT_SIZE);
rc = create_pkt_cmd_session_release_buffers(&apr->pkt,
(u32)session, buffer_info);
@@ -788,8 +769,7 @@
if (session->is_decoder) {
struct q6_apr_cmd_session_empty_buffer_compressed_packet apr;
- q6_hfi_add_apr_hdr(dev, &apr.hdr, sizeof(apr),
- HFI_CMD_SESSION_EMPTY_BUFFER);
+ q6_hfi_add_apr_hdr(dev, &apr.hdr, sizeof(apr));
rc = create_pkt_cmd_session_etb_decoder(&apr.pkt,
(u32)session, input_frame);
@@ -811,8 +791,7 @@
} else {
struct
q6_apr_cmd_session_empty_buffer_uncompressed_plane0_packet apr;
- q6_hfi_add_apr_hdr(dev, &apr.hdr, sizeof(apr),
- HFI_CMD_SESSION_EMPTY_BUFFER);
+ q6_hfi_add_apr_hdr(dev, &apr.hdr, sizeof(apr));
rc = create_pkt_cmd_session_etb_encoder(&apr.pkt,
(u32)session, input_frame);
@@ -848,8 +827,7 @@
}
dev = session->device;
- q6_hfi_add_apr_hdr(dev, &apr.hdr, sizeof(apr),
- HFI_CMD_SESSION_FILL_BUFFER);
+ q6_hfi_add_apr_hdr(dev, &apr.hdr, sizeof(apr));
rc = create_pkt_cmd_session_ftb(&apr.pkt, (u32)session, output_frame);
if (rc) {
@@ -886,8 +864,7 @@
apr = (struct q6_apr_cmd_session_parse_sequence_header_packet *) packet;
- q6_hfi_add_apr_hdr(dev, &apr->hdr, VIDC_IFACEQ_VAR_SMALL_PKT_SIZE,
- HFI_CMD_SESSION_PARSE_SEQUENCE_HEADER);
+ q6_hfi_add_apr_hdr(dev, &apr->hdr, VIDC_IFACEQ_VAR_SMALL_PKT_SIZE);
rc = create_pkt_cmd_session_parse_seq_header(&apr->pkt,
(u32)session, seq_hdr);
@@ -925,8 +902,7 @@
apr = (struct q6_apr_cmd_session_get_sequence_header_packet *) packet;
- q6_hfi_add_apr_hdr(dev, &apr->hdr, VIDC_IFACEQ_VAR_SMALL_PKT_SIZE,
- HFI_CMD_SESSION_GET_SEQUENCE_HEADER);
+ q6_hfi_add_apr_hdr(dev, &apr->hdr, VIDC_IFACEQ_VAR_SMALL_PKT_SIZE);
rc = create_pkt_cmd_session_get_seq_hdr(&apr->pkt, (u32)session,
seq_hdr);
@@ -960,8 +936,7 @@
}
dev = session->device;
- q6_hfi_add_apr_hdr(dev, &apr.hdr, sizeof(apr),
- HFI_CMD_SESSION_GET_PROPERTY);
+ q6_hfi_add_apr_hdr(dev, &apr.hdr, sizeof(apr));
rc = create_pkt_cmd_session_get_buf_req(&apr.pkt, (u32)session);
if (rc) {
@@ -993,8 +968,7 @@
}
dev = session->device;
- q6_hfi_add_apr_hdr(dev, &apr.hdr, sizeof(apr),
- HFI_CMD_SESSION_FLUSH);
+ q6_hfi_add_apr_hdr(dev, &apr.hdr, sizeof(apr));
rc = create_pkt_cmd_session_flush(&apr.pkt, (u32)session, flush_mode);
if (rc) {
@@ -1031,8 +1005,7 @@
dev = session->device;
dprintk(VIDC_DBG, "in set_prop,with prop id: 0x%x", ptype);
- q6_hfi_add_apr_hdr(dev, &apr->hdr, VIDC_IFACEQ_VAR_LARGE_PKT_SIZE,
- HFI_CMD_SESSION_SET_PROPERTY);
+ q6_hfi_add_apr_hdr(dev, &apr->hdr, VIDC_IFACEQ_VAR_LARGE_PKT_SIZE);
rc = create_pkt_cmd_session_set_property(&apr->pkt,
(u32)session, ptype, pdata);
@@ -1177,28 +1150,6 @@
return 0;
}
-static int q6_hfi_scale_clocks(void *dev, int load)
-{
- (void)dev;
- (void)load;
-
- /* Q6 does not support clocks scaling */
- return 0;
-}
-
-static int q6_hfi_scale_bus(void *dev, int load,
- enum session_type type, enum mem_type mtype)
-{
- (void)dev;
- (void)load;
- (void)type;
- (void)mtype;
-
- /* Q6 does not support bus scaling */
- return 0;
-
-}
-
static int q6_hfi_unset_ocmem(void *dev)
{
(void)dev;
@@ -1207,23 +1158,6 @@
return -EINVAL;
}
-static int q6_hfi_alloc_ocmem(void *dev, unsigned long size)
-{
- (void)dev;
- (void)size;
-
- /* Q6 does not support ocmem */
- return 0;
-}
-
-static int q6_hfi_free_ocmem(void *dev)
-{
- (void)dev;
-
- /* Q6 does not support ocmem */
- return 0;
-}
-
static int q6_hfi_iommu_get_domain_partition(void *dev, u32 flags,
u32 buffer_type, int *domain, int *partition)
{
@@ -1373,14 +1307,6 @@
}
}
-static int q6_hfi_get_fw_info(void *dev, enum fw_info info)
-{
- (void)dev;
- (void)info;
-
- return 0;
-}
-
static int q6_hfi_get_stride_scanline(int color_fmt,
int width, int height, int *stride, int *scanlines) {
*stride = VENUS_Y_STRIDE(color_fmt, width);
@@ -1392,8 +1318,6 @@
{
hdev->core_init = q6_hfi_core_init;
hdev->core_release = q6_hfi_core_release;
- hdev->core_pc_prep = q6_hfi_core_pc_prep;
- hdev->core_ping = q6_hfi_core_ping;
hdev->session_init = q6_hfi_session_init;
hdev->session_end = q6_hfi_session_end;
hdev->session_abort = q6_hfi_session_abort;
@@ -1414,15 +1338,10 @@
hdev->session_flush = q6_hfi_session_flush;
hdev->session_set_property = q6_hfi_session_set_property;
hdev->session_get_property = q6_hfi_session_get_property;
- hdev->scale_clocks = q6_hfi_scale_clocks;
- hdev->scale_bus = q6_hfi_scale_bus;
hdev->unset_ocmem = q6_hfi_unset_ocmem;
- hdev->alloc_ocmem = q6_hfi_alloc_ocmem;
- hdev->free_ocmem = q6_hfi_free_ocmem;
hdev->iommu_get_domain_partition = q6_hfi_iommu_get_domain_partition;
hdev->load_fw = q6_hfi_load_fw;
hdev->unload_fw = q6_hfi_unload_fw;
- hdev->get_fw_info = q6_hfi_get_fw_info;
hdev->get_stride_scanline = q6_hfi_get_stride_scanline;
}
diff --git a/drivers/media/platform/msm/vidc/q6_hfi.h b/drivers/media/platform/msm/vidc/q6_hfi.h
index 3dc4607..67aed5a 100644
--- a/drivers/media/platform/msm/vidc/q6_hfi.h
+++ b/drivers/media/platform/msm/vidc/q6_hfi.h
@@ -20,6 +20,15 @@
#define Q6_IFACEQ_QUEUE_SIZE (8 * 1024)
+/* client to Q6 communication path : forward path */
+#define VIDEO_HFI_CMD_ID 0x00012ECC
+
+/* Q6 to client ACK msg: reverse path*/
+#define VIDEO_HFI_MSG_ID 0x00012ECD
+
+/* Q6 to client event notifications */
+#define VIDEO_HFI_EVT_ID 0x00012ECE
+
struct q6_resources {
struct msm_vidc_fw fw;
};
diff --git a/drivers/misc/smsc_hub.c b/drivers/misc/smsc_hub.c
index 41d9ff8..0147e66 100644
--- a/drivers/misc/smsc_hub.c
+++ b/drivers/misc/smsc_hub.c
@@ -21,14 +21,12 @@
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/delay.h>
-#include <linux/smsc3503.h>
+#include <linux/smsc_hub.h>
#include <linux/module.h>
#include <mach/msm_xo.h>
-#define SMSC3503_I2C_ADDR 0x08
-#define SMSC_GSBI_I2C_BUS_ID 10
-static const unsigned short normal_i2c[] = {
-SMSC3503_I2C_ADDR, I2C_CLIENT_END };
+static unsigned short normal_i2c[] = {
+0, I2C_CLIENT_END };
struct hsic_hub {
struct device *dev;
@@ -111,6 +109,22 @@
return i2c_smbus_write_byte_data(client, reg, (ret & ~value));
}
+static int smsc4604_send_connect_cmd(struct i2c_client *client)
+{
+ u8 buf[3];
+
+ buf[0] = 0xAA;
+ buf[1] = 0x55;
+ buf[2] = 0x00;
+
+ if (i2c_master_send(client, buf, 3) != 3) {
+ dev_err(&client->dev, "%s: i2c send failed\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int i2c_hsic_hub_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -118,21 +132,37 @@
I2C_FUNC_SMBUS_WORD_DATA))
return -EIO;
- /* CONFIG_N bit in SP_ILOCK register has to be set before changing
- * other registers to change default configuration of hsic hub.
- */
- hsic_hub_set_bits(client, SMSC3503_SP_ILOCK, CONFIG_N);
+ switch (smsc_hub->pdata->model_id) {
+ case SMSC3503_ID:
+ /*
+ * CONFIG_N bit in SP_ILOCK register has to be set before
+ * changing other registers to change default configuration
+ * of hsic hub.
+ */
+ hsic_hub_set_bits(client, SMSC3503_SP_ILOCK, CONFIG_N);
- /* Can change default configuartion like VID,PID, strings etc
- * by writing new values to hsic hub registers.
- */
- hsic_hub_write_word_data(client, SMSC3503_VENDORID, 0x05C6);
+ /*
+ * Can change default configuartion like VID,PID,
+ * strings etc by writing new values to hsic hub registers
+ */
+ hsic_hub_write_word_data(client, SMSC3503_VENDORID, 0x05C6);
- /* CONFIG_N bit in SP_ILOCK register has to be cleared for new
- * values in registers to be effective after writing to
- * other registers.
- */
- hsic_hub_clear_bits(client, SMSC3503_SP_ILOCK, CONFIG_N);
+ /*
+ * CONFIG_N bit in SP_ILOCK register has to be cleared
+ * for new values in registers to be effective after
+ * writing to other registers.
+ */
+ hsic_hub_clear_bits(client, SMSC3503_SP_ILOCK, CONFIG_N);
+ break;
+ case SMSC4604_ID:
+ /*
+ * SMSC4604 requires an I2C attach command to be issued
+ * if I2C bus is connected
+ */
+ return smsc4604_send_connect_cmd(client);
+ default:
+ return -EINVAL;
+ }
return 0;
}
@@ -318,6 +348,8 @@
struct smsc_hub_platform_data *msm_hub_dt_to_pdata(
struct platform_device *pdev)
{
+ int rc;
+ u32 temp_val;
struct device_node *node = pdev->dev.of_node;
struct smsc_hub_platform_data *pdata;
@@ -327,6 +359,14 @@
return ERR_PTR(-ENOMEM);
}
+ rc = of_property_read_u32(node, "smsc,model-id", &temp_val);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to read smsc,model-id\n");
+ return ERR_PTR(rc);
+ } else {
+ pdata->model_id = temp_val;
+ }
+
pdata->hub_reset = of_get_named_gpio(node, "smsc,reset-gpio", 0);
if (pdata->hub_reset < 0)
return ERR_PTR(pdata->hub_reset);
@@ -399,18 +439,13 @@
}
gpio_direction_output(pdata->hub_reset, 0);
- /* Hub reset should be asserted for minimum 2microsec
+ /*
+ * Hub reset should be asserted for minimum 2microsec
* before deasserting.
*/
udelay(5);
gpio_direction_output(pdata->hub_reset, 1);
- ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "failed to add child node, ret=%d\n", ret);
- goto uninit_gpio;
- }
-
if (!IS_ERR(smsc_hub->hub_vbus_reg)) {
ret = regulator_enable(smsc_hub->hub_vbus_reg);
if (ret) {
@@ -436,14 +471,39 @@
memset(&i2c_info, 0, sizeof(struct i2c_board_info));
strlcpy(i2c_info.type, "i2c_hsic_hub", I2C_NAME_SIZE);
+ /* 250ms delay is required for SMSC4604 HUB to get I2C up */
+ msleep(250);
+
+ /* Assign I2C slave address per SMSC model */
+ switch (pdata->model_id) {
+ case SMSC3503_ID:
+ normal_i2c[0] = SMSC3503_I2C_ADDR;
+ break;
+ case SMSC4604_ID:
+ normal_i2c[0] = SMSC4604_I2C_ADDR;
+ break;
+ default:
+ dev_err(&pdev->dev, "unsupported SMSC model-id\n");
+ i2c_put_adapter(i2c_adap);
+ i2c_del_driver(&hsic_hub_driver);
+ goto uninit_gpio;
+ }
+
smsc_hub->client = i2c_new_probed_device(i2c_adap, &i2c_info,
normal_i2c, NULL);
i2c_put_adapter(i2c_adap);
- if (!smsc_hub->client)
- dev_err(&pdev->dev, "failed to connect to smsc_hub"
- "through I2C\n");
i2c_add_fail:
+ ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add child node, ret=%d\n", ret);
+ goto uninit_gpio;
+ }
+
+ if (!smsc_hub->client)
+ dev_err(&pdev->dev,
+ "failed to connect to smsc_hub through I2C\n");
+
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/video/msm/mdss/dsi_host_v2.c b/drivers/video/msm/mdss/dsi_host_v2.c
index f2de17d..7f731d2 100644
--- a/drivers/video/msm/mdss/dsi_host_v2.c
+++ b/drivers/video/msm/mdss/dsi_host_v2.c
@@ -320,7 +320,7 @@
wmb();
}
-void msm_dsi_set_tx_power_mode(int mode)
+void dsi_set_tx_power_mode(int mode)
{
u32 data;
unsigned char *ctrl_base = dsi_host_private->dsi_base;
diff --git a/drivers/video/msm/mdss/dsi_panel_v2.c b/drivers/video/msm/mdss/dsi_panel_v2.c
index 022d911..641730b 100644
--- a/drivers/video/msm/mdss/dsi_panel_v2.c
+++ b/drivers/video/msm/mdss/dsi_panel_v2.c
@@ -143,6 +143,29 @@
return 0;
}
+static char led_pwm1[2] = {0x51, 0x0}; /* DTYPE_DCS_WRITE1 */
+static struct dsi_cmd_desc backlight_cmd = {
+ DTYPE_DCS_WRITE1, 1, 0, 0, 1, sizeof(led_pwm1), led_pwm1};
+
+static void dsi_panel_bklt_dcs(struct mdss_panel_data *pdata, int level)
+{
+ struct mipi_panel_info *mipi;
+
+ mipi = &pdata->panel_info.mipi;
+
+ pr_debug("%s: dcs level=%d\n", __func__, level);
+
+ led_pwm1[1] = (unsigned char)level;
+
+ if (DSI_VIDEO_MODE == mipi->mode) {
+ dsi_set_tx_power_mode(0);
+ dsi_cmds_tx_v2(pdata, &panel_private->dsi_panel_tx_buf,
+ &backlight_cmd,
+ 1);
+ dsi_set_tx_power_mode(1);
+ }
+}
+
void dsi_panel_reset(struct mdss_panel_data *pdata, int enable)
{
if (pdata == NULL) {
@@ -166,13 +189,21 @@
if (enable == 2) {
dsi_panel_power(1);
gpio_request(panel_private->rst_gpio, "panel_reset");
+ gpio_set_value(panel_private->rst_gpio, 1);
if (gpio_is_valid(panel_private->disp_en_gpio)) {
gpio_request(panel_private->disp_en_gpio,
"panel_enable");
+ gpio_set_value(panel_private->disp_en_gpio, 1);
}
if (gpio_is_valid(panel_private->video_mode_gpio)) {
gpio_request(panel_private->video_mode_gpio,
"panel_video_mdoe");
+ if (pdata->panel_info.mipi.mode == DSI_VIDEO_MODE)
+ gpio_set_value(panel_private->video_mode_gpio,
+ 1);
+ else
+ gpio_set_value(panel_private->video_mode_gpio,
+ 0);
}
if (gpio_is_valid(panel_private->te_gpio))
gpio_request(panel_private->te_gpio, "panel_te");
@@ -233,6 +264,10 @@
led_trigger_event(bl_led_trigger, bl_level);
break;
+ case BL_DCS_CMD:
+ dsi_panel_bklt_dcs(pdata, bl_level);
+ break;
+
default:
pr_err("%s: Unknown bl_ctrl configuration\n",
__func__);
@@ -605,8 +640,11 @@
led_trigger_register_simple("bkl-trigger", &bl_led_trigger);
pr_debug("%s: SUCCESS-> WLED TRIGGER register\n", __func__);
*bl_ctrl = BL_WLED;
+ } else if ((bl_ctrl_type) && (!strncmp(bl_ctrl_type,
+ "bl_ctrl_dcs", 11))) {
+ pr_debug("%s: SUCCESS-> DCS COMMAND register\n", __func__);
+ *bl_ctrl = BL_DCS_CMD;
}
-
rc = of_property_read_u32_array(pdev->dev.of_node,
"qcom,mdss-pan-bl-levels", res, 2);
panel_data->panel_info.bl_min = (!rc ? res[0] : 0);
diff --git a/drivers/video/msm/mdss/dsi_v2.h b/drivers/video/msm/mdss/dsi_v2.h
index 96dd390..73df790 100644
--- a/drivers/video/msm/mdss/dsi_v2.h
+++ b/drivers/video/msm/mdss/dsi_v2.h
@@ -235,4 +235,6 @@
int dsi_long_read_resp(struct dsi_buf *rp);
+void dsi_set_tx_power_mode(int mode);
+
#endif /* MDSS_DSI_H */
diff --git a/drivers/video/msm/mdss/mdp3.c b/drivers/video/msm/mdss/mdp3.c
index f6f722e..e899fa3 100644
--- a/drivers/video/msm/mdss/mdp3.c
+++ b/drivers/video/msm/mdss/mdp3.c
@@ -1173,6 +1173,11 @@
size_t size;
int rc;
+ if (pdata->panel_info.type != MIPI_VIDEO_PANEL) {
+ pr_debug("cmd mode panel, no need to copy splash image\n");
+ return 0;
+ }
+
rgb_size = MDP3_REG_READ(MDP3_REG_DMA_P_SIZE);
stride = MDP3_REG_READ(MDP3_REG_DMA_P_IBUF_Y_STRIDE);
stride = stride & 0x3FFF;
@@ -1210,8 +1215,8 @@
status = MDP3_REG_READ(MDP3_REG_DSI_VIDEO_EN);
rc = status & 0x1;
} else {
- status = MDP3_REG_READ(MDP3_REG_DMA_P_START);
- rc = status & 01;
+ status = MDP3_REG_READ(MDP3_REG_DMA_P_CONFIG);
+ rc = status & 0x80000;
}
mdp3_clk_update(MDP3_CLK_AHB, 0);
@@ -1285,7 +1290,13 @@
return 0;
}
rc = mdp3_continuous_splash_on(pdata);
+ } else {
+ if (mdp3_is_display_on(pdata)) {
+ pr_err("lk continuous splash, but kerenl not\n");
+ rc = mdp3_continuous_splash_on(pdata);
+ }
}
+
return rc;
}
diff --git a/drivers/video/msm/mdss/mdp3_ctrl.c b/drivers/video/msm/mdss/mdp3_ctrl.c
index 568a347..573a7a7 100644
--- a/drivers/video/msm/mdss/mdp3_ctrl.c
+++ b/drivers/video/msm/mdss/mdp3_ctrl.c
@@ -370,6 +370,14 @@
struct mdp3_dma_output_config outputConfig;
struct mdp3_dma_source sourceConfig;
int frame_rate = mfd->panel_info->mipi.frame_rate;
+ int vbp, vfp, vspw;
+ int vtotal, vporch;
+
+ vbp = panel_info->lcdc.v_back_porch;
+ vfp = panel_info->lcdc.v_front_porch;
+ vspw = panel_info->lcdc.v_pulse_width;
+ vporch = vbp + vfp + vspw;
+ vtotal = vporch + panel_info->yres;
fix = &fbi->fix;
var = &fbi->var;
@@ -381,8 +389,9 @@
sourceConfig.y = 0;
sourceConfig.stride = fix->line_length;
sourceConfig.buf = (void *)mfd->iova;
+ sourceConfig.vporch = vporch;
sourceConfig.vsync_count =
- MDP_VSYNC_CLK_RATE / (frame_rate * sourceConfig.width);
+ MDP_VSYNC_CLK_RATE / (frame_rate * vtotal);
outputConfig.dither_en = 0;
outputConfig.out_sel = mdp3_ctrl_get_intf_type(mfd);
@@ -475,9 +484,6 @@
mdp3_fbmem_clear();
- if (panel->set_backlight)
- panel->set_backlight(panel, panel->panel_info.bl_max);
-
pr_debug("mdp3_ctrl_on dma start\n");
if (mfd->fbi->screen_base) {
rc = mdp3_session->dma->start(mdp3_session->dma,
@@ -520,18 +526,16 @@
mdp3_histogram_stop(mdp3_session, MDP_BLOCK_DMA_P);
- pr_debug("mdp3_ctrl_off turn panel off\n");
- if (panel->set_backlight)
- panel->set_backlight(panel, 0);
+ rc = mdp3_session->dma->stop(mdp3_session->dma, mdp3_session->intf);
+ if (rc)
+ pr_debug("fail to stop the MDP3 dma\n");
if (panel->event_handler)
rc = panel->event_handler(panel, MDSS_EVENT_PANEL_OFF, NULL);
if (rc)
pr_err("fail to turn off the panel\n");
- rc = mdp3_session->dma->stop(mdp3_session->dma, mdp3_session->intf);
- if (rc)
- pr_err("fail to stop the MDP3 dma\n");
+
mdp3_irq_deregister();
diff --git a/drivers/video/msm/mdss/mdp3_dma.c b/drivers/video/msm/mdss/mdp3_dma.c
index 22105d0..f4421f2 100644
--- a/drivers/video/msm/mdss/mdp3_dma.c
+++ b/drivers/video/msm/mdss/mdp3_dma.c
@@ -247,16 +247,22 @@
pr_debug("mdp3_dma_sync_config\n");
if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
- sync_config = (source_config->height - 1) << 21;
+ int porch = source_config->vporch;
+ int height = source_config->height;
+ int vtotal = height + porch;
+ sync_config = vtotal << 21;
sync_config |= source_config->vsync_count;
sync_config |= BIT(19);
sync_config |= BIT(20);
MDP3_REG_WRITE(MDP3_REG_SYNC_CONFIG_0 + dma_sel, sync_config);
MDP3_REG_WRITE(MDP3_REG_VSYNC_SEL, 0x024);
- MDP3_REG_WRITE(MDP3_REG_PRIMARY_VSYNC_INIT_VAL + dma_sel, 0);
- MDP3_REG_WRITE(MDP3_REG_SYNC_THRESH_0 + dma_sel, 0x00100000);
- MDP3_REG_WRITE(MDP3_REG_PRIMARY_START_P0S + dma_sel, 0x0);
+ MDP3_REG_WRITE(MDP3_REG_PRIMARY_VSYNC_INIT_VAL + dma_sel,
+ height);
+ MDP3_REG_WRITE(MDP3_REG_PRIMARY_RD_PTR_IRQ, 0x5);
+ MDP3_REG_WRITE(MDP3_REG_SYNC_THRESH_0 + dma_sel, (4 << 16 | 2));
+ MDP3_REG_WRITE(MDP3_REG_PRIMARY_START_P0S + dma_sel, porch);
+ MDP3_REG_WRITE(MDP3_REG_TEAR_CHECK_EN, 0x1);
}
return 0;
}
@@ -291,7 +297,7 @@
* the default 16 for MDP hang issue workaround
*/
MDP3_REG_WRITE(MDP3_REG_DMA_P_FETCH_CFG, 0x20);
- MDP3_REG_WRITE(MDP3_REG_PRIMARY_RD_PTR_IRQ, 0x10);
+
dma->source_config = *source_config;
dma->output_config = *output_config;
@@ -530,7 +536,7 @@
pr_debug("mdp3_dmap_update\n");
if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
- cb_type |= MDP3_DMA_CALLBACK_TYPE_DMA_DONE;
+ cb_type = MDP3_DMA_CALLBACK_TYPE_DMA_DONE;
if (intf->active)
wait_for_completion_killable(&dma->dma_comp);
}
@@ -553,7 +559,8 @@
mdp3_dma_callback_enable(dma, cb_type);
pr_debug("mdp3_dmap_update wait for vsync_comp in\n");
- wait_for_completion_killable(&dma->vsync_comp);
+ if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO)
+ wait_for_completion_killable(&dma->vsync_comp);
pr_debug("mdp3_dmap_update wait for vsync_comp out\n");
return 0;
}
@@ -565,7 +572,7 @@
int cb_type = MDP3_DMA_CALLBACK_TYPE_VSYNC;
if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
- cb_type |= MDP3_DMA_CALLBACK_TYPE_DMA_DONE;
+ cb_type = MDP3_DMA_CALLBACK_TYPE_DMA_DONE;
if (intf->active)
wait_for_completion_killable(&dma->dma_comp);
}
@@ -586,7 +593,8 @@
spin_unlock_irqrestore(&dma->dma_lock, flag);
mdp3_dma_callback_enable(dma, cb_type);
- wait_for_completion_killable(&dma->vsync_comp);
+ if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO)
+ wait_for_completion_killable(&dma->vsync_comp);
return 0;
}
@@ -631,7 +639,7 @@
return ret;
if (dma->histo_state != MDP3_DMA_HISTO_STATE_READY) {
- pr_err("mdp3_dmap_histo_get after dma shut down\n");
+ pr_debug("mdp3_dmap_histo_get after dma shut down\n");
return -EPERM;
}
@@ -694,9 +702,6 @@
unsigned long flag;
int ret;
- if (dma->histo_state == MDP3_DMA_HISTO_STATE_START)
- return -EINVAL;
-
spin_lock_irqsave(&dma->histo_lock, flag);
init_completion(&dma->histo_comp);
diff --git a/drivers/video/msm/mdss/mdp3_dma.h b/drivers/video/msm/mdss/mdp3_dma.h
index 7dd6ba7..6983e55 100644
--- a/drivers/video/msm/mdss/mdp3_dma.h
+++ b/drivers/video/msm/mdss/mdp3_dma.h
@@ -151,6 +151,7 @@
void *buf;
int stride;
int vsync_count;
+ int vporch;
};
struct mdp3_dma_output_config {
diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
index aba77e3..717241d 100644
--- a/drivers/video/msm/mdss/mdss_mdp.h
+++ b/drivers/video/msm/mdss/mdss_mdp.h
@@ -333,6 +333,7 @@
u8 mixer_stage;
u8 is_fg;
u8 alpha;
+ u8 blend_op;
u8 overfetch_disable;
u32 transp;
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index d1595b3..edd4c19 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -280,11 +280,6 @@
pipe = mixer->stage_pipe[i];
if (pipe == NULL)
continue;
- if (pipe->is_fg) {
- ab_total = 0;
- ib_total = 0;
- max_clk_rate = 0;
- }
if (mdss_mdp_perf_calc_pipe(pipe, &perf))
continue;
@@ -1207,7 +1202,8 @@
{
struct mdss_mdp_pipe *pipe;
u32 off, blend_op, blend_stage;
- u32 mixercfg = 0, blend_color_out = 0, bgalpha = 0;
+ u32 mixercfg = 0, blend_color_out = 0, bg_alpha_enable = 0;
+ u32 fg_alpha = 0, bg_alpha = 0;
int stage, secure = 0;
if (!mixer)
@@ -1227,7 +1223,7 @@
mixercfg = 1 << (3 * pipe->num);
}
if (pipe->src_fmt->alpha_enable)
- bgalpha = 1;
+ bg_alpha_enable = 1;
secure = pipe->flags & MDP_SECURE_OVERLAY_SESSION;
}
@@ -1244,48 +1240,79 @@
blend_stage = stage - MDSS_MDP_STAGE_0;
off = MDSS_MDP_REG_LM_BLEND_OFFSET(blend_stage);
- if (pipe->is_fg) {
- bgalpha = 0;
- if (!secure)
- mixercfg = MDSS_MDP_LM_BORDER_COLOR;
+ blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST |
+ MDSS_MDP_BLEND_BG_ALPHA_BG_CONST);
+ fg_alpha = pipe->alpha;
+ bg_alpha = 0xFF - pipe->alpha;
+ /* keep fg alpha */
+ blend_color_out |= 1 << (blend_stage + 1);
+
+ switch (pipe->blend_op) {
+ case BLEND_OP_OPAQUE:
blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST |
MDSS_MDP_BLEND_BG_ALPHA_BG_CONST);
- /* keep fg alpha */
- blend_color_out |= 1 << (blend_stage + 1);
- pr_debug("pnum=%d stg=%d alpha=IS_FG\n", pipe->num,
+ pr_debug("pnum=%d stg=%d op=OPAQUE\n", pipe->num,
stage);
- } else if (pipe->src_fmt->alpha_enable) {
- bgalpha = 0;
- blend_op = (MDSS_MDP_BLEND_BG_ALPHA_FG_PIXEL |
- MDSS_MDP_BLEND_BG_INV_ALPHA);
- /* keep fg alpha */
- blend_color_out |= 1 << (blend_stage + 1);
+ break;
- pr_debug("pnum=%d stg=%d alpha=FG PIXEL\n", pipe->num,
+ case BLEND_OP_PREMULTIPLIED:
+ if (pipe->src_fmt->alpha_enable) {
+ blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST |
+ MDSS_MDP_BLEND_BG_ALPHA_FG_PIXEL);
+ if (fg_alpha != 0xff) {
+ bg_alpha = fg_alpha;
+ blend_op |=
+ MDSS_MDP_BLEND_BG_MOD_ALPHA |
+ MDSS_MDP_BLEND_BG_INV_MOD_ALPHA;
+ } else {
+ blend_op |= MDSS_MDP_BLEND_BG_INV_ALPHA;
+ }
+ }
+ pr_debug("pnum=%d stg=%d op=PREMULTIPLIED\n", pipe->num,
stage);
- } else if (bgalpha) {
- blend_op = (MDSS_MDP_BLEND_BG_ALPHA_BG_PIXEL |
- MDSS_MDP_BLEND_FG_ALPHA_BG_PIXEL |
- MDSS_MDP_BLEND_FG_INV_ALPHA);
- /* keep bg alpha */
- pr_debug("pnum=%d stg=%d alpha=BG_PIXEL\n", pipe->num,
+ break;
+
+ case BLEND_OP_COVERAGE:
+ if (pipe->src_fmt->alpha_enable) {
+ blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_PIXEL |
+ MDSS_MDP_BLEND_BG_ALPHA_FG_PIXEL);
+ if (fg_alpha != 0xff) {
+ bg_alpha = fg_alpha;
+ blend_op |=
+ MDSS_MDP_BLEND_FG_MOD_ALPHA |
+ MDSS_MDP_BLEND_FG_INV_MOD_ALPHA |
+ MDSS_MDP_BLEND_BG_MOD_ALPHA |
+ MDSS_MDP_BLEND_BG_INV_MOD_ALPHA;
+ } else {
+ blend_op |= MDSS_MDP_BLEND_BG_INV_ALPHA;
+ }
+ }
+ pr_debug("pnum=%d stg=%d op=COVERAGE\n", pipe->num,
stage);
- } else {
+ break;
+
+ default:
blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST |
MDSS_MDP_BLEND_BG_ALPHA_BG_CONST);
- pr_debug("pnum=%d stg=%d alpha=CONST\n", pipe->num,
+ pr_debug("pnum=%d stg=%d op=NONE\n", pipe->num,
stage);
+ break;
}
+ if (!pipe->src_fmt->alpha_enable && bg_alpha_enable)
+ blend_color_out = 0;
+
mixercfg |= stage << (3 * pipe->num);
+ pr_debug("stg=%d op=%x fg_alpha=%x bg_alpha=%x\n", stage,
+ blend_op, fg_alpha, bg_alpha);
mdp_mixer_write(mixer, off + MDSS_MDP_REG_LM_OP_MODE, blend_op);
mdp_mixer_write(mixer, off + MDSS_MDP_REG_LM_BLEND_FG_ALPHA,
- pipe->alpha);
+ fg_alpha);
mdp_mixer_write(mixer, off + MDSS_MDP_REG_LM_BLEND_BG_ALPHA,
- 0xFF - pipe->alpha);
+ bg_alpha);
}
if (mixer->cursor_enabled)
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index c4dee86..6252e17 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -491,6 +491,16 @@
pipe->is_fg = req->is_fg;
pipe->alpha = req->alpha;
pipe->transp = req->transp_mask;
+ pipe->blend_op = req->blend_op;
+ if (pipe->blend_op == BLEND_OP_NOT_DEFINED)
+ pipe->blend_op = fmt->alpha_enable ?
+ BLEND_OP_PREMULTIPLIED :
+ BLEND_OP_OPAQUE;
+
+ if (!fmt->alpha_enable && (pipe->blend_op != BLEND_OP_OPAQUE))
+ pr_warn("Unintended blend_op %d on layer with no alpha plane\n",
+ pipe->blend_op);
+
pipe->overfetch_disable = fmt->is_yuv &&
!(pipe->flags & MDP_SOURCE_ROTATED_90);
diff --git a/include/linux/msm_kgsl.h b/include/linux/msm_kgsl.h
index f74fcbe..87047d2 100644
--- a/include/linux/msm_kgsl.h
+++ b/include/linux/msm_kgsl.h
@@ -20,10 +20,7 @@
#define KGSL_CONTEXT_TRASH_STATE 0x00000020
#define KGSL_CONTEXT_PER_CONTEXT_TS 0x00000040
#define KGSL_CONTEXT_USER_GENERATED_TS 0x00000080
-#define KGSL_CONTEXT_END_OF_FRAME 0x00000100
-
#define KGSL_CONTEXT_NO_FAULT_TOLERANCE 0x00000200
-#define KGSL_CONTEXT_SYNC 0x00000400
/* bits [12:15] are reserved for future use */
#define KGSL_CONTEXT_TYPE_MASK 0x01F00000
#define KGSL_CONTEXT_TYPE_SHIFT 20
@@ -286,7 +283,7 @@
#define IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID \
_IOW(KGSL_IOC_TYPE, 0x7, struct kgsl_device_waittimestamp_ctxtid)
-/* DEPRECATED: issue indirect commands to the GPU.
+/* issue indirect commands to the GPU.
* drawctxt_id must have been created with IOCTL_KGSL_DRAWCTXT_CREATE
* ibaddr and sizedwords must specify a subset of a buffer created
* with IOCTL_KGSL_SHAREDMEM_FROM_PMEM
@@ -294,9 +291,6 @@
* timestamp is a returned counter value which can be passed to
* other ioctls to determine when the commands have been executed by
* the GPU.
- *
- * This fucntion is deprecated - consider using IOCTL_KGSL_SUBMIT_COMMANDS
- * instead
*/
struct kgsl_ringbuffer_issueibcmds {
unsigned int drawctxt_id;
@@ -811,77 +805,6 @@
#define IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK \
_IOWR(KGSL_IOC_TYPE, 0x3C, struct kgsl_gpumem_sync_cache_bulk)
-/*
- * struct kgsl_cmd_syncpoint_timestamp
- * @context_id: ID of a KGSL context
- * @timestamp: GPU timestamp
- *
- * This structure defines a syncpoint comprising a context/timestamp pair. A
- * list of these may be passed by IOCTL_KGSL_SUBMIT_COMMANDS to define
- * dependencies that must be met before the command can be submitted to the
- * hardware
- */
-struct kgsl_cmd_syncpoint_timestamp {
- unsigned int context_id;
- unsigned int timestamp;
-};
-
-#define KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP 0
-
-struct kgsl_cmd_syncpoint_fence {
- int fd;
-};
-
-#define KGSL_CMD_SYNCPOINT_TYPE_FENCE 1
-
-/**
- * struct kgsl_cmd_syncpoint - Define a sync point for a command batch
- * @type: type of sync point defined here
- * @priv: Pointer to the type specific buffer
- * @size: Size of the type specific buffer
- *
- * This structure contains pointers defining a specific command sync point.
- * The pointer and size should point to a type appropriate structure.
- */
-struct kgsl_cmd_syncpoint {
- int type;
- void __user *priv;
- unsigned int size;
-};
-
-/**
- * struct kgsl_submit_commands - Argument to IOCTL_KGSL_SUBMIT_COMMANDS
- * @context_id: KGSL context ID that owns the commands
- * @flags:
- * @cmdlist: User pointer to a list of kgsl_ibdesc structures
- * @numcmds: Number of commands listed in cmdlist
- * @synclist: User pointer to a list of kgsl_cmd_syncpoint structures
- * @numsyncs: Number of sync points listed in synclist
- * @timestamp: On entry the a user defined timestamp, on exist the timestamp
- * assigned to the command batch
- *
- * This structure specifies a command to send to the GPU hardware. This is
- * similar to kgsl_issueibcmds expect that it doesn't support the legacy way to
- * submit IB lists and it adds sync points to block the IB until the
- * dependencies are satisified. This entry point is the new and preferred way
- * to submit commands to the GPU.
- */
-
-struct kgsl_submit_commands {
- unsigned int context_id;
- unsigned int flags;
- struct kgsl_ibdesc __user *cmdlist;
- unsigned int numcmds;
- struct kgsl_cmd_syncpoint __user *synclist;
- unsigned int numsyncs;
- unsigned int timestamp;
-/* private: reserved for future use */
- unsigned int __pad[4];
-};
-
-#define IOCTL_KGSL_SUBMIT_COMMANDS \
- _IOWR(KGSL_IOC_TYPE, 0x3D, struct kgsl_submit_commands)
-
#ifdef __KERNEL__
#ifdef CONFIG_MSM_KGSL_DRM
int kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start,
diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h
index 2455212..fab9301 100644
--- a/include/linux/msm_mdp.h
+++ b/include/linux/msm_mdp.h
@@ -405,6 +405,33 @@
struct mdp_hist_lut_data hist_lut_cfg;
};
+/**
+ * enum mdss_mdp_blend_op - Different blend operations set by userspace
+ *
+ * @BLEND_OP_NOT_DEFINED: No blend operation defined for the layer.
+ * @BLEND_OP_OPAQUE: Apply a constant blend operation. The layer
+ * would appear opaque in case fg plane alpha is
+ * 0xff.
+ * @BLEND_OP_PREMULTIPLIED: Apply source over blend rule. Layer already has
+ * alpha pre-multiplication done. If fg plane alpha
+ * is less than 0xff, apply modulation as well. This
+ * operation is intended on layers having alpha
+ * channel.
+ * @BLEND_OP_COVERAGE: Apply source over blend rule. Layer is not alpha
+ * pre-multiplied. Apply pre-multiplication. If fg
+ * plane alpha is less than 0xff, apply modulation as
+ * well.
+ * @BLEND_OP_MAX: Used to track maximum blend operation possible by
+ * mdp.
+ */
+enum mdss_mdp_blend_op {
+ BLEND_OP_NOT_DEFINED = 0,
+ BLEND_OP_OPAQUE,
+ BLEND_OP_PREMULTIPLIED,
+ BLEND_OP_COVERAGE,
+ BLEND_OP_MAX,
+};
+
struct mdp_overlay {
struct msmfb_img src;
struct mdp_rect src_rect;
@@ -412,6 +439,7 @@
uint32_t z_order; /* stage number */
uint32_t is_fg; /* control alpha & transp */
uint32_t alpha;
+ uint32_t blend_op;
uint32_t transp_mask;
uint32_t flags;
uint32_t id;
diff --git a/include/linux/smsc3503.h b/include/linux/smsc_hub.h
similarity index 90%
rename from include/linux/smsc3503.h
rename to include/linux/smsc_hub.h
index 1e28a58..9c0afc0 100644
--- a/include/linux/smsc3503.h
+++ b/include/linux/smsc_hub.h
@@ -14,6 +14,12 @@
#ifndef __LINUX_SMSC3503_H__
#define __LINUX_SMSC3503_H__
+#define SMSC3503_ID 3503
+#define SMSC4604_ID 4604
+#define SMSC3503_I2C_ADDR 0x08
+#define SMSC4604_I2C_ADDR 0x2d
+#define SMSC_GSBI_I2C_BUS_ID 0
+
/*Serial interface Registers*/
#define SMSC3503_VENDORID 0x00 /*u16 read*/
#define SMSC3503_PRODUCTID 0x02 /*u16 read*/
@@ -42,6 +48,7 @@
#define OCSPINSEL (1<<5)
struct smsc_hub_platform_data {
+ u32 model_id;
int hub_reset;
int refclk_gpio;
int int_gpio;
diff --git a/sound/soc/codecs/msm8x10-wcd.c b/sound/soc/codecs/msm8x10-wcd.c
index 170dbe7..66b0094 100644
--- a/sound/soc/codecs/msm8x10-wcd.c
+++ b/sound/soc/codecs/msm8x10-wcd.c
@@ -126,6 +126,16 @@
ON_DEMAND_SUPPLIES_MAX,
};
+/*
+ * The delay list is per codec HW specification.
+ * Please add delay in the list in the future instead
+ * of magic number
+ */
+enum {
+ CODEC_DELAY_1_MS = 1000,
+ CODEC_DELAY_1_1_MS = 1100,
+};
+
struct hpf_work {
struct msm8x10_wcd_priv *msm8x10_wcd;
u32 decimator;
@@ -1148,13 +1158,8 @@
"ZERO", "ADC1", "ADC2", "DMIC1", "DMIC2"
};
-static const char * const anc_mux_text[] = {
- "ZERO", "ADC1", "ADC2", "ADC3", "ADC4", "ADC5", "ADC6", "ADC_MB",
- "RSVD_1", "DMIC1", "DMIC2", "DMIC3", "DMIC4", "DMIC5", "DMIC6"
-};
-
-static const char * const anc1_fb_mux_text[] = {
- "ZERO", "EAR_HPH_L", "EAR_LINE_1",
+static const char * const adc2_mux_text[] = {
+ "ZERO", "INP2", "INP3"
};
static const char * const iir1_inp1_text[] = {
@@ -1212,6 +1217,9 @@
SOC_ENUM_SINGLE(MSM8X10_WCD_A_CDC_CONN_LO_DAC_CTL, 0, 3,
rx_rdac4_text);
+static const struct soc_enum adc2_enum =
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(adc2_mux_text), adc2_mux_text);
+
static const struct snd_kcontrol_new rx_mix1_inp1_mux =
SOC_DAPM_ENUM("RX1 MIX1 INP1 Mux", rx_mix1_inp1_chain_enum);
@@ -1242,6 +1250,9 @@
static const struct snd_kcontrol_new rx_dac4_mux =
SOC_DAPM_ENUM("RDAC4 MUX Mux", rx_rdac4_enum);
+static const struct snd_kcontrol_new tx_adc2_mux =
+ SOC_DAPM_ENUM("ADC2 MUX Mux", adc2_enum);
+
static int msm8x10_wcd_put_dec_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -1370,7 +1381,8 @@
if (w->reg == MSM8X10_WCD_A_TX_1_EN)
init_bit_shift = 7;
- else if (w->reg == MSM8X10_WCD_A_TX_2_EN)
+ else if ((w->reg == MSM8X10_WCD_A_TX_2_EN) ||
+ (w->reg == MSM8X10_WCD_A_TX_3_EN))
init_bit_shift = 6;
else {
dev_err(codec->dev, "%s: Error, invalid adc register\n",
@@ -1383,9 +1395,11 @@
msm8x10_wcd_codec_enable_adc_block(codec, 1);
snd_soc_update_bits(codec, adc_reg, 1 << init_bit_shift,
1 << init_bit_shift);
+ usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
break;
case SND_SOC_DAPM_POST_PMU:
snd_soc_update_bits(codec, adc_reg, 1 << init_bit_shift, 0x00);
+ usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
break;
case SND_SOC_DAPM_POST_PMD:
msm8x10_wcd_codec_enable_adc_block(codec, 0);
@@ -1930,9 +1944,14 @@
{"DEC2 MUX", "ADC2", "ADC2"},
{"DEC2 MUX", NULL, "CDC_CONN"},
+ {"ADC2", NULL, "ADC2 MUX"},
+ {"ADC2 MUX", "INP2", "ADC2_INP2"},
+ {"ADC2 MUX", "INP3", "ADC2_INP3"},
+
/* ADC Connections */
{"ADC1", NULL, "AMIC1"},
- {"ADC2", NULL, "AMIC2"},
+ {"ADC2_INP2", NULL, "AMIC2"},
+ {"ADC2_INP3", NULL, "AMIC3"},
{"IIR1", NULL, "IIR1 INP1 MUX"},
{"IIR1 INP1 MUX", "DEC1", "DEC1 MUX"},
@@ -2406,9 +2425,17 @@
SND_SOC_DAPM_ADC_E("ADC1", NULL, MSM8X10_WCD_A_TX_1_EN, 7, 0,
msm8x10_wcd_codec_enable_adc, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_ADC_E("ADC2", NULL, MSM8X10_WCD_A_TX_2_EN, 7, 0,
+ SND_SOC_DAPM_ADC_E("ADC2_INP2", NULL, MSM8X10_WCD_A_TX_2_EN, 7, 0,
msm8x10_wcd_codec_enable_adc, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_ADC_E("ADC2_INP3", NULL, MSM8X10_WCD_A_TX_3_EN, 7, 0,
+ msm8x10_wcd_codec_enable_adc, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MIXER("ADC2", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_MUX("ADC2 MUX", SND_SOC_NOPM, 0, 0,
+ &tx_adc2_mux),
SND_SOC_DAPM_MICBIAS("MIC BIAS External", MSM8X10_WCD_A_MICB_1_CTL,
7, 0),