Merge "diag: Fix for CRC check for Control byte"
diff --git a/arch/arm/boot/dts/apq8074-dragonboard.dtsi b/arch/arm/boot/dts/apq8074-dragonboard.dtsi
index 8afd986..824b0ab 100644
--- a/arch/arm/boot/dts/apq8074-dragonboard.dtsi
+++ b/arch/arm/boot/dts/apq8074-dragonboard.dtsi
@@ -504,13 +504,6 @@
 	};
 
 	gpio@e300 { /* GPIO 36 */
-		qcom,mode = <1>;  /* QPNP_PIN_MODE_DIG_OUT */
-		qcom,output-type = <0>; /* QPNP_PIN_OUT_BUF_CMOS */
-		qcom,pull = <5>; /* QPNP_PIN_PULL_NO */
-		qcom,vin-sel = <2>; /* QPNP_PIN_VIN2 */
-		qcom,out-strength = <3>; /* QPNP_PIN_OUT_STRENGTH_HIGH */
-		qcom,src-sel = <3>; /* QPNP_PIN_SEL_FUNC_2 */
-		qcom,master-en = <1>;
 	};
 };
 
diff --git a/arch/arm/boot/dts/msm8974-liquid.dtsi b/arch/arm/boot/dts/msm8974-liquid.dtsi
index 16672fd..ba085a0 100644
--- a/arch/arm/boot/dts/msm8974-liquid.dtsi
+++ b/arch/arm/boot/dts/msm8974-liquid.dtsi
@@ -216,7 +216,7 @@
 					/* Object 6, Instance = 0 */
 					00 00 00 00 00 00
 					/* Object 38, Instance = 0 */
-					19 03 00 1E 05 0D 00 00 00 00
+					19 04 00 07 08 0D 00 00 00 00
 					00 00 00 00 00 00 00 00 00 00
 					00 00 00 00 00 00 00 00 00 00
 					00 00 00 00 00 00 00 00 00 00
@@ -242,7 +242,7 @@
 					00 00 00 00 00 00 00 00 00
 					/* Object 25, Instance = 0 */
 					00 00 54 6F F0 55 00 00 00 00
-					00 00 00 00 00
+					00 00 00 00 0C
 					/* Object 27, Instance = 0 */
 					00 00 00 00 00 00 00
 					/* Object 40, Instance = 0 */
diff --git a/arch/arm/configs/msm8226-perf_defconfig b/arch/arm/configs/msm8226-perf_defconfig
index 03ed61f..2f38ebf 100644
--- a/arch/arm/configs/msm8226-perf_defconfig
+++ b/arch/arm/configs/msm8226-perf_defconfig
@@ -242,6 +242,9 @@
 CONFIG_PPPOE=y
 CONFIG_PPP_ASYNC=y
 CONFIG_PPP_SYNC_TTY=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_PPP_MPPE=y
 CONFIG_USB_USBNET=y
 CONFIG_WCNSS_CORE=y
 CONFIG_WCNSS_CORE_PRONTO=y
diff --git a/arch/arm/configs/msm8226_defconfig b/arch/arm/configs/msm8226_defconfig
index 8d116f1..8ca54fd 100644
--- a/arch/arm/configs/msm8226_defconfig
+++ b/arch/arm/configs/msm8226_defconfig
@@ -244,6 +244,9 @@
 CONFIG_PPPOE=y
 CONFIG_PPP_ASYNC=y
 CONFIG_PPP_SYNC_TTY=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_PPP_MPPE=y
 CONFIG_USB_USBNET=y
 CONFIG_WCNSS_CORE=y
 CONFIG_WCNSS_CORE_PRONTO=y
diff --git a/arch/arm/configs/msm8610-perf_defconfig b/arch/arm/configs/msm8610-perf_defconfig
index 20c6362..7ea1bd5 100644
--- a/arch/arm/configs/msm8610-perf_defconfig
+++ b/arch/arm/configs/msm8610-perf_defconfig
@@ -350,6 +350,7 @@
 CONFIG_MMC_MSM_SPS_SUPPORT=y
 CONFIG_LEDS_QPNP=y
 CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_GPIO=y
 CONFIG_SWITCH=y
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_DRV_MSM is not set
@@ -407,6 +408,9 @@
 CONFIG_PPP_DEFLATE=y
 CONFIG_PPP_BSDCOMP=y
 CONFIG_PPPOE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_PPP_MPPE=y
 CONFIG_N_HDLC=y
 CONFIG_UNIX98_PTYS=y
 CONFIG_INPUT_KXTJ9=y
diff --git a/arch/arm/configs/msm8610_defconfig b/arch/arm/configs/msm8610_defconfig
index 0a7faae..8e6f5f9 100644
--- a/arch/arm/configs/msm8610_defconfig
+++ b/arch/arm/configs/msm8610_defconfig
@@ -373,6 +373,7 @@
 CONFIG_LEDS_QPNP=y
 CONFIG_LEDS_MSM_GPIO_FLASH=y
 CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_GPIO=y
 CONFIG_SWITCH=y
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_DRV_MSM is not set
@@ -461,6 +462,9 @@
 CONFIG_PPP_DEFLATE=y
 CONFIG_PPP_BSDCOMP=y
 CONFIG_PPPOE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_PPP_MPPE=y
 CONFIG_N_HDLC=y
 CONFIG_UNIX98_PTYS=y
 CONFIG_INPUT_KXTJ9=y
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 4a9373a..6218a2b 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -120,6 +120,7 @@
 	.ft_pf_policy = KGSL_FT_PAGEFAULT_DEFAULT_POLICY,
 	.fast_hang_detect = 1,
 	.long_ib_detect = 1,
+	.intr_mask = 0xFFFFFFFF,
 };
 
 /* This set of registers are used for Hang detection
@@ -287,7 +288,7 @@
  */
 
 int adreno_perfcounter_read_group(struct adreno_device *adreno_dev,
-	struct kgsl_perfcounter_read_group *reads, unsigned int count)
+	struct kgsl_perfcounter_read_group __user *reads, unsigned int count)
 {
 	struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
 	struct adreno_perfcount_group *group;
@@ -307,12 +308,6 @@
 	if (reads == NULL || count == 0 || count > 100)
 		return -EINVAL;
 
-	/* verify valid inputs group ids and countables */
-	for (i = 0; i < count; i++) {
-		if (reads[i].groupid >= counters->group_count)
-			return -EINVAL;
-	}
-
 	list = kmalloc(sizeof(struct kgsl_perfcounter_read_group) * count,
 			GFP_KERNEL);
 	if (!list)
@@ -326,8 +321,15 @@
 
 	/* list iterator */
 	for (j = 0; j < count; j++) {
+
 		list[j].value = 0;
 
+		/* Verify that the group ID is within range */
+		if (list[j].groupid >= counters->group_count) {
+			ret = -EINVAL;
+			goto done;
+		}
+
 		group = &(counters->groups[list[j].groupid]);
 
 		/* group/counter iterator */
@@ -335,8 +337,7 @@
 			if (group->regs[i].countable == list[j].countable) {
 				list[j].value =
 					adreno_dev->gpudev->perfcounter_read(
-					adreno_dev, list[j].groupid,
-					i, group->regs[i].offset);
+					adreno_dev, list[j].groupid, i);
 				break;
 			}
 		}
@@ -599,6 +600,33 @@
 	return -EINVAL;
 }
 
+/**
+ * adreno_perfcounter_restore() - Restore performance counters
+ * @adreno_dev: adreno device to configure
+ *
+ * Load the physical performance counters with 64 bit value which are
+ * saved on GPU power collapse.
+ */
+static inline void adreno_perfcounter_restore(struct adreno_device *adreno_dev)
+{
+	if (adreno_dev->gpudev->perfcounter_restore)
+		adreno_dev->gpudev->perfcounter_restore(adreno_dev);
+}
+
+/**
+ * adreno_perfcounter_save() - Save performance counters
+ * @adreno_dev: adreno device to configure
+ *
+ * Save the performance counter values before GPU power collapse.
+ * The saved values are restored on restart.
+ * This ensures physical counters are coherent across power-collapse.
+ */
+static inline void adreno_perfcounter_save(struct adreno_device *adreno_dev)
+{
+	if (adreno_dev->gpudev->perfcounter_save)
+		adreno_dev->gpudev->perfcounter_save(adreno_dev);
+}
+
 static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
 {
 	irqreturn_t result;
@@ -1702,6 +1730,9 @@
 
 	adreno_perfcounter_init(device);
 
+	if (adreno_dev->gpudev->irq_init)
+		adreno_dev->gpudev->irq_init(adreno_dev);
+
 	/* Power down the device */
 	kgsl_pwrctrl_disable(device);
 
@@ -1766,11 +1797,15 @@
 		adreno_dev->gpudev->soft_reset(adreno_dev);
 	}
 
+	/* Restore performance counter registers with saved values */
+	adreno_perfcounter_restore(adreno_dev);
+
 	/* Start the GPU */
 	adreno_dev->gpudev->start(adreno_dev);
 
+	kgsl_atomic_set(&adreno_dev->hang_intr_set, 0);
 	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
-	device->ftbl->irqctrl(device, 1);
+	device->ftbl->irqctrl(device, adreno_dev->intr_mask);
 
 	status = adreno_ringbuffer_start(&adreno_dev->ringbuffer);
 	if (status)
@@ -1821,6 +1856,9 @@
 
 	adreno_ocmem_gmem_free(adreno_dev);
 
+	/* Save physical performance counter values before GPU power down*/
+	adreno_perfcounter_save(adreno_dev);
+
 	/* Power down the device */
 	kgsl_pwrctrl_disable(device);
 
@@ -2224,15 +2262,22 @@
 	/* Make sure we are totally awake */
 	kgsl_pwrctrl_enable(device);
 
+	/* save physical performance counter values before GPU soft reset */
+	adreno_perfcounter_save(adreno_dev);
+
 	/* Reset the GPU */
 	adreno_dev->gpudev->soft_reset(adreno_dev);
 
+	/* Restore physical performance counter values after soft reset */
+	adreno_perfcounter_restore(adreno_dev);
+
 	/* Reinitialize the GPU */
 	adreno_dev->gpudev->start(adreno_dev);
 
 	/* Enable IRQ */
+	kgsl_atomic_set(&adreno_dev->hang_intr_set, 0);
 	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
-	device->ftbl->irqctrl(device, 1);
+	device->ftbl->irqctrl(device, adreno_dev->intr_mask);
 
 	/*
 	 * Restart the ringbuffer - we can go down the warm start path because
@@ -2971,6 +3016,52 @@
 				(adreno_dev->long_ib_detect ? 1 : 0));
 }
 
+/**
+ * _ft_hang_intr_status_store() -  Routine to configure GPU
+ * hang interrupt
+ * @dev: device ptr
+ * @attr: Device attribute
+ * @buf: value to write
+ * @count: size of the value to write
+ */
+static int _ft_hang_intr_status_store(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	struct adreno_device *adreno_dev = _get_adreno_dev(dev);
+	int ret = 0;
+
+	if (adreno_dev == NULL)
+		return 0;
+
+	mutex_lock(&adreno_dev->dev.mutex);
+	if (adreno_hang_intr_supported(adreno_dev))
+		ret = _ft_sysfs_store(buf, count, &adreno_dev->hang_intr_en);
+	mutex_unlock(&adreno_dev->dev.mutex);
+
+	return ret;
+
+}
+
+/**
+ * _ft_hang_intr_status_show() -  Routine to show GPU hang
+ * interrupt enable status
+ * @dev: device ptr
+ * @attr: Device attribute
+ * @buf: value read
+ */
+static int _ft_hang_intr_status_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct adreno_device *adreno_dev = _get_adreno_dev(dev);
+	if (adreno_dev == NULL)
+		return 0;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", (adreno_dev->hang_intr_en &&
+			(adreno_hang_intr_supported(adreno_dev))) ? 1 : 0);
+
+}
 
 #define FT_DEVICE_ATTR(name) \
 	DEVICE_ATTR(name, 0644,	_ ## name ## _show, _ ## name ## _store);
@@ -2979,6 +3070,7 @@
 FT_DEVICE_ATTR(ft_pagefault_policy);
 FT_DEVICE_ATTR(ft_fast_hang_detect);
 FT_DEVICE_ATTR(ft_long_ib_detect);
+FT_DEVICE_ATTR(ft_hang_intr_status);
 
 
 const struct device_attribute *ft_attr_list[] = {
@@ -2986,6 +3078,7 @@
 	&dev_attr_ft_pagefault_policy,
 	&dev_attr_ft_fast_hang_detect,
 	&dev_attr_ft_long_ib_detect,
+	&dev_attr_ft_hang_intr_status,
 	NULL,
 };
 
@@ -3149,6 +3242,10 @@
 	unsigned int rptr;
 
 	do {
+
+		if (kgsl_atomic_read(&adreno_dev->hang_intr_set))
+			return -ETIMEDOUT;
+
 		/*
 		 * Wait is "jiffies" first time in the loop to start
 		 * GPU stall detection immediately.
@@ -3199,6 +3296,9 @@
 			return 0;
 
 		/* Dont wait for timeout, detect hang faster.  */
+		if (kgsl_atomic_read(&adreno_dev->hang_intr_set))
+			goto err;
+
 		if (time_after(jiffies, wait_time_part)) {
 			wait_time_part = jiffies +
 				msecs_to_jiffies(KGSL_TIMEOUT_PART);
@@ -3678,16 +3778,8 @@
 				fast_hang_detected = 0;
 		}
 
-		if (fast_hang_detected) {
-			KGSL_FT_ERR(device,
-				"Proc %s, ctxt_id %d ts %d triggered fault tolerance"
-				" on global ts %d\n",
-				pid_name, context ? context->id : 0,
-				(kgsl_readtimestamp(device, context,
-				KGSL_TIMESTAMP_RETIRED) + 1),
-				curr_global_ts + 1);
+		if (fast_hang_detected)
 			return 1;
-		}
 
 		if (curr_context != NULL) {
 
@@ -4063,10 +4155,10 @@
 	}
 }
 
-void adreno_irqctrl(struct kgsl_device *device, int state)
+void adreno_irqctrl(struct kgsl_device *device, unsigned int mask)
 {
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	adreno_dev->gpudev->irq_control(adreno_dev, state);
+	adreno_dev->gpudev->irq_control(adreno_dev, mask);
 }
 
 static unsigned int adreno_gpuid(struct kgsl_device *device,
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index a837574..26a6720 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -127,6 +127,9 @@
 	unsigned int pix_shader_start;
 	unsigned int instruction_size;
 	unsigned int ib_check_level;
+	atomic_t hang_intr_set;
+	unsigned int hang_intr_en;
+	unsigned int intr_mask;
 	unsigned int fast_hang_detect;
 	unsigned int ft_policy;
 	unsigned int long_ib_detect;
@@ -164,12 +167,18 @@
  * @kernelcount: number of user space users of the register
  * @usercount: number of kernel users of the register
  * @offset: register hardware offset
+ * @load_bit: The bit number in LOAD register which corresponds to this counter
+ * @select: The countable register offset
+ * @value: The 64 bit countable register value
  */
 struct adreno_perfcount_register {
 	unsigned int countable;
 	unsigned int kernelcount;
 	unsigned int usercount;
 	unsigned int offset;
+	int load_bit;
+	unsigned int select;
+	uint64_t value;
 };
 
 /**
@@ -194,6 +203,9 @@
 	unsigned int group_count;
 };
 
+#define ADRENO_PERFCOUNTER_GROUP(core, name) { core##_perfcounters_##name, \
+	ARRAY_SIZE(core##_perfcounters_##name), __stringify(name) }
+
 /**
  * adreno_regs: List of registers that are used in kgsl driver for all
  * 3D devices. Each device type has different offset value for the same
@@ -286,18 +298,22 @@
 	void (*ctxt_draw_workaround)(struct adreno_device *,
 					struct adreno_context *);
 	irqreturn_t (*irq_handler)(struct adreno_device *);
-	void (*irq_control)(struct adreno_device *, int);
+	void (*irq_control)(struct adreno_device *, unsigned int);
 	unsigned int (*irq_pending)(struct adreno_device *);
+	void (*irq_init)(struct adreno_device *);
 	void * (*snapshot)(struct adreno_device *, void *, int *, int);
 	int (*rb_init)(struct adreno_device *, struct adreno_ringbuffer *);
 	void (*perfcounter_init)(struct adreno_device *);
+	void (*perfcounter_save)(struct adreno_device *);
+	void (*perfcounter_restore)(struct adreno_device *);
 	void (*start)(struct adreno_device *);
 	unsigned int (*busy_cycles)(struct adreno_device *);
 	void (*perfcounter_enable)(struct adreno_device *, unsigned int group,
 		unsigned int counter, unsigned int countable);
 	uint64_t (*perfcounter_read)(struct adreno_device *adreno_dev,
-		unsigned int group, unsigned int counter,
-		unsigned int offset);
+		unsigned int group, unsigned int counter);
+	void (*perfcounter_write)(struct adreno_device *adreno_dev,
+		unsigned int group, unsigned int counter);
 	int (*coresight_enable) (struct kgsl_device *device);
 	void (*coresight_disable) (struct kgsl_device *device);
 	void (*coresight_config_debug_reg) (struct kgsl_device *device,
@@ -755,4 +771,29 @@
 static inline void adreno_debugfs_init(struct kgsl_device *device) { }
 #endif
 
+/* 
+ * adreno_hang_intr_supported() - Returns if hang interrupt is supported
+ * @adreno_dev:		Pointer to the the adreno device
+ */
+static inline bool adreno_hang_intr_supported(struct adreno_device *adreno_dev)
+{
+	bool ret = 0;
+	if (adreno_is_a330v2(adreno_dev))
+		ret = 1;
+	return ret;
+}
+
+/*
+ * adreno_fatal_err_work() - Schedules a work to do GFT on fatal error
+ * @adreno_dev:		Pointer to the the adreno device
+ */
+static inline void adreno_fatal_err_work(struct adreno_device *adreno_dev)
+{
+	struct kgsl_device *device = &adreno_dev->dev;
+
+	/* If hang_intr_set is 0, set it to 1 and queue work */
+	if (!atomic_cmpxchg(&adreno_dev->hang_intr_set, 0, 1))
+		/* Schedule work to do fault tolerance */
+		queue_work(device->work_queue, &device->hang_intr_ws);
+}
 #endif /*__ADRENO_H */
diff --git a/drivers/gpu/msm/adreno_a2xx.c b/drivers/gpu/msm/adreno_a2xx.c
index 3d72c5c..63e3871 100644
--- a/drivers/gpu/msm/adreno_a2xx.c
+++ b/drivers/gpu/msm/adreno_a2xx.c
@@ -1833,11 +1833,12 @@
 	return result;
 }
 
-static void a2xx_irq_control(struct adreno_device *adreno_dev, int state)
+static void a2xx_irq_control(struct adreno_device *adreno_dev,
+							 unsigned int mask)
 {
 	struct kgsl_device *device = &adreno_dev->dev;
 
-	if (state) {
+	if (mask) {
 		kgsl_regwrite(device, REG_RBBM_INT_CNTL, RBBM_INT_MASK);
 		kgsl_regwrite(device, REG_CP_INT_CNTL, CP_INT_MASK);
 		kgsl_regwrite(device, MH_INTERRUPT_MASK,
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index f9110ea..5778e9e 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -13,6 +13,7 @@
 
 #include <linux/delay.h>
 #include <linux/sched.h>
+#include <linux/ratelimit.h>
 #include <mach/socinfo.h>
 
 #include "kgsl.h"
@@ -3022,6 +3023,8 @@
 {
 	struct kgsl_device *device = &adreno_dev->dev;
 	const char *err = "";
+	/* Limit to 10 messages every 5 seconds */
+	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
 
 	switch (bit) {
 	case A3XX_INT_RBBM_AHB_ERROR: {
@@ -3033,8 +3036,8 @@
 		 * Return the word address of the erroring register so that it
 		 * matches the register specification
 		 */
-
-		KGSL_DRV_CRIT(device,
+		if (!__ratelimit(&ratelimit_state))
+			KGSL_DRV_CRIT(device,
 			"RBBM | AHB bus error | %s | addr=%x | ports=%x:%x\n",
 			reg & (1 << 28) ? "WRITE" : "READ",
 			(reg & 0xFFFFF) >> 2, (reg >> 20) & 0x3,
@@ -3059,34 +3062,17 @@
 	case A3XX_INT_VFD_ERROR:
 		err = "VFD: Out of bounds access";
 		break;
-	case A3XX_INT_CP_T0_PACKET_IN_IB:
-		err = "ringbuffer TO packet in IB interrupt";
-		break;
-	case A3XX_INT_CP_OPCODE_ERROR:
-		err = "ringbuffer opcode error interrupt";
-		break;
-	case A3XX_INT_CP_RESERVED_BIT_ERROR:
-		err = "ringbuffer reserved bit error interrupt";
-		break;
-	case A3XX_INT_CP_HW_FAULT:
-		err = "ringbuffer hardware fault";
-		break;
-	case A3XX_INT_CP_REG_PROTECT_FAULT:
-		err = "ringbuffer protected mode error interrupt";
-		break;
-	case A3XX_INT_CP_AHB_ERROR_HALT:
-		err = "ringbuffer AHB error interrupt";
-		break;
-	case A3XX_INT_MISC_HANG_DETECT:
-		err = "MISC: GPU hang detected";
-		break;
 	case A3XX_INT_UCHE_OOB_ACCESS:
 		err = "UCHE:  Out of bounds access";
 		break;
 	}
 
-	KGSL_DRV_CRIT(device, "%s\n", err);
-	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+	/*
+	 * Limit error interrupt reporting to prevent
+	 * kernel logs causing watchdog timeout
+	 */
+	if (!__ratelimit(&ratelimit_state))
+		KGSL_DRV_CRIT(device, "%s\n", err);
 }
 
 static void a3xx_cp_callback(struct adreno_device *adreno_dev, int irq)
@@ -3101,116 +3087,47 @@
 }
 
 /**
- * struct a3xx_perfcounter_register - Define a performance counter register
- * @load_bit: the bit to set in RBBM_LOAD_CMD0/RBBM_LOAD_CMD1 to force the RBBM
- * to load the reset value into the appropriate counter
- * @select: The dword offset of the register to write the selected
- * countable into
+ * a3xx_fatal_err_callback -  Routine for GPU fatal interrupts
+ * @adreno_dev: adreno device ptr
+ * @bit: interrupt bit
  */
+static void a3xx_fatal_err_callback(struct adreno_device *adreno_dev, int bit)
+{
+	struct kgsl_device *device = &adreno_dev->dev;
+	const char *err = "";
 
-struct a3xx_perfcounter_register {
-	unsigned int load_bit;
-	unsigned int select;
-};
+	switch (bit) {
+	case A3XX_INT_CP_OPCODE_ERROR:
+		err = "ringbuffer opcode error interrupt";
+		break;
+	case A3XX_INT_CP_RESERVED_BIT_ERROR:
+		err = "ringbuffer reserved bit error interrupt";
+		break;
+	case A3XX_INT_CP_T0_PACKET_IN_IB:
+		err = "ringbuffer TO packet in IB interrupt";
+		break;
+	case A3XX_INT_CP_HW_FAULT:
+		err = "ringbuffer hardware fault";
+		break;
+	case A3XX_INT_CP_REG_PROTECT_FAULT:
+		err = "ringbuffer protected mode error interrupt";
+		break;
+	case A3XX_INT_CP_AHB_ERROR_HALT:
+		err = "ringbuffer AHB error interrupt";
+		break;
+	case A3XX_INT_MISC_HANG_DETECT:
+		if (!adreno_dev->hang_intr_en)
+			return;
+		err = "stall interrupt";
+		break;
+	}
 
-static struct a3xx_perfcounter_register a3xx_perfcounter_reg_cp[] = {
-	{ 0, A3XX_CP_PERFCOUNTER_SELECT },
-};
+	KGSL_DRV_CRIT(device, "%s\n", err);
+	if ((!device->mmu.fault) &&
+		(adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE))
+		adreno_fatal_err_work(adreno_dev);
+}
 
-static struct a3xx_perfcounter_register a3xx_perfcounter_reg_rbbm[] = {
-	{ 1, A3XX_RBBM_PERFCOUNTER0_SELECT },
-	{ 2, A3XX_RBBM_PERFCOUNTER1_SELECT },
-};
-
-static struct a3xx_perfcounter_register a3xx_perfcounter_reg_pc[] = {
-	{ 3, A3XX_PC_PERFCOUNTER0_SELECT },
-	{ 4, A3XX_PC_PERFCOUNTER1_SELECT },
-	{ 5, A3XX_PC_PERFCOUNTER2_SELECT },
-	{ 6, A3XX_PC_PERFCOUNTER3_SELECT },
-};
-
-static struct a3xx_perfcounter_register a3xx_perfcounter_reg_vfd[] = {
-	{ 7, A3XX_VFD_PERFCOUNTER0_SELECT },
-	{ 8, A3XX_VFD_PERFCOUNTER1_SELECT },
-};
-
-static struct a3xx_perfcounter_register a3xx_perfcounter_reg_hlsq[] = {
-	{ 9, A3XX_HLSQ_PERFCOUNTER0_SELECT },
-	{ 10, A3XX_HLSQ_PERFCOUNTER1_SELECT },
-	{ 11, A3XX_HLSQ_PERFCOUNTER2_SELECT },
-	{ 12, A3XX_HLSQ_PERFCOUNTER3_SELECT },
-	{ 13, A3XX_HLSQ_PERFCOUNTER4_SELECT },
-	{ 14, A3XX_HLSQ_PERFCOUNTER5_SELECT },
-};
-
-static struct a3xx_perfcounter_register a3xx_perfcounter_reg_vpc[] = {
-	{ 15, A3XX_VPC_PERFCOUNTER0_SELECT },
-	{ 16, A3XX_VPC_PERFCOUNTER1_SELECT },
-};
-
-static struct a3xx_perfcounter_register a3xx_perfcounter_reg_tse[] = {
-	{ 17, A3XX_GRAS_PERFCOUNTER0_SELECT },
-	{ 18, A3XX_GRAS_PERFCOUNTER1_SELECT },
-};
-
-static struct a3xx_perfcounter_register a3xx_perfcounter_reg_ras[] = {
-	{ 19, A3XX_GRAS_PERFCOUNTER2_SELECT },
-	{ 20, A3XX_GRAS_PERFCOUNTER3_SELECT },
-};
-
-static struct a3xx_perfcounter_register a3xx_perfcounter_reg_uche[] = {
-	{ 21, A3XX_UCHE_PERFCOUNTER0_SELECT },
-	{ 22, A3XX_UCHE_PERFCOUNTER1_SELECT },
-	{ 23, A3XX_UCHE_PERFCOUNTER2_SELECT },
-	{ 24, A3XX_UCHE_PERFCOUNTER3_SELECT },
-	{ 25, A3XX_UCHE_PERFCOUNTER4_SELECT },
-	{ 26, A3XX_UCHE_PERFCOUNTER5_SELECT },
-};
-
-static struct a3xx_perfcounter_register a3xx_perfcounter_reg_tp[] = {
-	{ 27, A3XX_TP_PERFCOUNTER0_SELECT },
-	{ 28, A3XX_TP_PERFCOUNTER1_SELECT },
-	{ 29, A3XX_TP_PERFCOUNTER2_SELECT },
-	{ 30, A3XX_TP_PERFCOUNTER3_SELECT },
-	{ 31, A3XX_TP_PERFCOUNTER4_SELECT },
-	{ 32, A3XX_TP_PERFCOUNTER5_SELECT },
-};
-
-static struct a3xx_perfcounter_register a3xx_perfcounter_reg_sp[] = {
-	{ 33, A3XX_SP_PERFCOUNTER0_SELECT },
-	{ 34, A3XX_SP_PERFCOUNTER1_SELECT },
-	{ 35, A3XX_SP_PERFCOUNTER2_SELECT },
-	{ 36, A3XX_SP_PERFCOUNTER3_SELECT },
-	{ 37, A3XX_SP_PERFCOUNTER4_SELECT },
-	{ 38, A3XX_SP_PERFCOUNTER5_SELECT },
-	{ 39, A3XX_SP_PERFCOUNTER6_SELECT },
-	{ 40, A3XX_SP_PERFCOUNTER7_SELECT },
-};
-
-static struct a3xx_perfcounter_register a3xx_perfcounter_reg_rb[] = {
-	{ 41, A3XX_RB_PERFCOUNTER0_SELECT },
-	{ 42, A3XX_RB_PERFCOUNTER1_SELECT },
-};
-
-#define REGCOUNTER_GROUP(_x) { (_x), ARRAY_SIZE((_x)) }
-
-static struct {
-	struct a3xx_perfcounter_register *regs;
-	int count;
-} a3xx_perfcounter_reglist[] = {
-	REGCOUNTER_GROUP(a3xx_perfcounter_reg_cp),
-	REGCOUNTER_GROUP(a3xx_perfcounter_reg_rbbm),
-	REGCOUNTER_GROUP(a3xx_perfcounter_reg_pc),
-	REGCOUNTER_GROUP(a3xx_perfcounter_reg_vfd),
-	REGCOUNTER_GROUP(a3xx_perfcounter_reg_hlsq),
-	REGCOUNTER_GROUP(a3xx_perfcounter_reg_vpc),
-	REGCOUNTER_GROUP(a3xx_perfcounter_reg_tse),
-	REGCOUNTER_GROUP(a3xx_perfcounter_reg_ras),
-	REGCOUNTER_GROUP(a3xx_perfcounter_reg_uche),
-	REGCOUNTER_GROUP(a3xx_perfcounter_reg_tp),
-	REGCOUNTER_GROUP(a3xx_perfcounter_reg_sp),
-	REGCOUNTER_GROUP(a3xx_perfcounter_reg_rb),
-};
 
 static void a3xx_perfcounter_enable_pwr(struct kgsl_device *device,
 	unsigned int countable)
@@ -3309,7 +3226,7 @@
 {
 	struct kgsl_device *device = &adreno_dev->dev;
 	unsigned int val = 0;
-	struct a3xx_perfcounter_register *reg;
+	struct adreno_perfcount_register *reg;
 
 	/* Special cases */
 	if (group == KGSL_PERFCOUNTER_GROUP_PWR)
@@ -3319,13 +3236,14 @@
 	else if (group == KGSL_PERFCOUNTER_GROUP_VBIF_PWR)
 		return a3xx_perfcounter_enable_vbif_pwr(device, countable);
 
-	if (group >= ARRAY_SIZE(a3xx_perfcounter_reglist))
+	if (group >= adreno_dev->gpudev->perfcounters->group_count)
 		return;
 
-	if (counter >= a3xx_perfcounter_reglist[group].count)
+	if (counter >=
+		adreno_dev->gpudev->perfcounters->groups[group].reg_count)
 		return;
 
-	reg = &(a3xx_perfcounter_reglist[group].regs[counter]);
+	reg = &(adreno_dev->gpudev->perfcounters->groups[group].regs[counter]);
 
 	/* Select the desired perfcounter */
 	kgsl_regwrite(device, reg->select, countable);
@@ -3340,27 +3258,29 @@
 }
 
 static uint64_t a3xx_perfcounter_read(struct adreno_device *adreno_dev,
-	unsigned int group, unsigned int counter,
-	unsigned int offset)
+	unsigned int group, unsigned int counter)
 {
 	struct kgsl_device *device = &adreno_dev->dev;
-	struct a3xx_perfcounter_register *reg = NULL;
+	struct adreno_perfcount_register *reg;
 	unsigned int lo = 0, hi = 0;
 	unsigned int val;
+	unsigned int offset;
 
-	if (group >= ARRAY_SIZE(a3xx_perfcounter_reglist))
+	if (group >= adreno_dev->gpudev->perfcounters->group_count)
 		return 0;
 
-	if (counter >= a3xx_perfcounter_reglist[group].count)
+	if (counter >=
+		adreno_dev->gpudev->perfcounters->groups[group].reg_count)
 		return 0;
 
-	reg = &(a3xx_perfcounter_reglist[group].regs[counter]);
+	reg = &(adreno_dev->gpudev->perfcounters->groups[group].regs[counter]);
 
 	/* Freeze the counter */
 	kgsl_regread(device, A3XX_RBBM_PERFCTR_CTL, &val);
 	val &= ~reg->load_bit;
 	kgsl_regwrite(device, A3XX_RBBM_PERFCTR_CTL, val);
 
+	offset = reg->offset;
 	/* Read the values */
 	kgsl_regread(device, offset, &lo);
 	kgsl_regread(device, offset + 1, &hi);
@@ -3372,6 +3292,136 @@
 	return (((uint64_t) hi) << 32) | lo;
 }
 
+/*
+ * values cannot be loaded into physical performance
+ * counters belonging to these groups.
+ */
+static inline int loadable_perfcounter_group(unsigned int groupid)
+{
+	return ((groupid == KGSL_PERFCOUNTER_GROUP_VBIF_PWR) ||
+		(groupid == KGSL_PERFCOUNTER_GROUP_VBIF) ||
+		(groupid == KGSL_PERFCOUNTER_GROUP_PWR)) ? 0 : 1;
+}
+
+/*
+ * Return true if the countable is used and not broken
+ */
+static inline int active_countable(unsigned int countable)
+{
+	return ((countable != KGSL_PERFCOUNTER_NOT_USED) &&
+		(countable != KGSL_PERFCOUNTER_BROKEN));
+}
+
+/**
+ * a3xx_perfcounter_save() - Save the physical performance counter values
+ * @adreno_dev -  Adreno device whose registers need to be saved
+ *
+ * Read all the physical performance counter's values and save them
+ * before GPU power collapse.
+ */
+static void a3xx_perfcounter_save(struct adreno_device *adreno_dev)
+{
+	struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+	struct adreno_perfcount_group *group;
+	unsigned int regid, groupid;
+
+	for (groupid = 0; groupid < counters->group_count; groupid++) {
+		if (!loadable_perfcounter_group(groupid))
+			continue;
+
+		group = &(counters->groups[groupid]);
+
+		/* group/counter iterator */
+		for (regid = 0; regid < group->reg_count; regid++) {
+			if (!active_countable(group->regs[regid].countable))
+				continue;
+
+			group->regs[regid].value =
+				adreno_dev->gpudev->perfcounter_read(
+				adreno_dev, groupid, regid);
+		}
+	}
+}
+
+/**
+ * a3xx_perfcounter_write() - Write the physical performance counter values.
+ * @adreno_dev -  Adreno device whose registers are to be written to.
+ * @group - group to which the physical counter belongs to.
+ * @counter - register id of the physical counter to which the value is
+ *		written to.
+ *
+ * This function loads the 64 bit saved value into the particular physical
+ * counter by enabling the corresponding bit in A3XX_RBBM_PERFCTR_LOAD_CMD*
+ * register.
+ */
+static void a3xx_perfcounter_write(struct adreno_device *adreno_dev,
+				unsigned int group, unsigned int counter)
+{
+	struct kgsl_device *device = &(adreno_dev->dev);
+	struct adreno_perfcount_register *reg;
+	unsigned int val;
+
+	reg = &(adreno_dev->gpudev->perfcounters->groups[group].regs[counter]);
+
+	/* Clear the load cmd registers */
+	kgsl_regwrite(device, A3XX_RBBM_PERFCTR_LOAD_CMD0, 0);
+	kgsl_regwrite(device, A3XX_RBBM_PERFCTR_LOAD_CMD1, 0);
+
+	/* Write the saved value to PERFCTR_LOAD_VALUE* registers. */
+	kgsl_regwrite(device, A3XX_RBBM_PERFCTR_LOAD_VALUE_LO,
+			(uint32_t)reg->value);
+	kgsl_regwrite(device, A3XX_RBBM_PERFCTR_LOAD_VALUE_HI,
+			(uint32_t)(reg->value >> 32));
+
+	/*
+	 * Set the load bit in PERFCTR_LOAD_CMD for the physical counter
+	 * we want to restore. The value in PERFCTR_LOAD_VALUE* is loaded
+	 * into the corresponding physical counter.
+	 */
+	if (reg->load_bit < 32)	{
+		val = 1 << reg->load_bit;
+		kgsl_regwrite(device, A3XX_RBBM_PERFCTR_LOAD_CMD0, val);
+	} else {
+		val  = 1 << (reg->load_bit - 32);
+		kgsl_regwrite(device, A3XX_RBBM_PERFCTR_LOAD_CMD1, val);
+	}
+}
+
+/**
+ * a3xx_perfcounter_restore() - Restore the physical performance counter values.
+ * @adreno_dev -  Adreno device whose registers are to be restored.
+ *
+ * This function together with a3xx_perfcounter_save make sure that performance
+ * counters are coherent across GPU power collapse.
+ */
+static void a3xx_perfcounter_restore(struct adreno_device *adreno_dev)
+{
+	struct kgsl_device *device = &adreno_dev->dev;
+	struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+	struct adreno_perfcount_group *group;
+	unsigned int regid, groupid;
+
+	for (groupid = 0; groupid < counters->group_count; groupid++) {
+		if (!loadable_perfcounter_group(groupid))
+			continue;
+
+		group = &(counters->groups[groupid]);
+
+		/* group/counter iterator */
+		for (regid = 0; regid < group->reg_count; regid++) {
+			if (!active_countable(group->regs[regid].countable))
+				continue;
+
+			a3xx_perfcounter_write(adreno_dev, groupid, regid);
+		}
+	}
+
+	/* Clear the load cmd registers */
+	kgsl_regwrite(device, A3XX_RBBM_PERFCTR_LOAD_CMD0, 0);
+	kgsl_regwrite(device, A3XX_RBBM_PERFCTR_LOAD_CMD1, 0);
+
+}
+
 #define A3XX_IRQ_CALLBACK(_c) { .func = _c }
 
 #define A3XX_INT_MASK \
@@ -3399,23 +3449,25 @@
 	A3XX_IRQ_CALLBACK(a3xx_err_callback),  /* 5 - RBBM_ATB_BUS_OVERFLOW */
 	A3XX_IRQ_CALLBACK(a3xx_err_callback),  /* 6 - RBBM_VFD_ERROR */
 	A3XX_IRQ_CALLBACK(NULL),	       /* 7 - CP_SW */
-	A3XX_IRQ_CALLBACK(a3xx_err_callback),  /* 8 - CP_T0_PACKET_IN_IB */
-	A3XX_IRQ_CALLBACK(a3xx_err_callback),  /* 9 - CP_OPCODE_ERROR */
-	A3XX_IRQ_CALLBACK(a3xx_err_callback),  /* 10 - CP_RESERVED_BIT_ERROR */
+	A3XX_IRQ_CALLBACK(a3xx_fatal_err_callback),/* 8 - CP_T0_PACKET_IN_IB */
+	A3XX_IRQ_CALLBACK(a3xx_fatal_err_callback),/* 9 - CP_OPCODE_ERROR */
+	/* 10 - CP_RESERVED_BIT_ERROR */
+	A3XX_IRQ_CALLBACK(a3xx_fatal_err_callback),
 	A3XX_IRQ_CALLBACK(a3xx_err_callback),  /* 11 - CP_HW_FAULT */
 	A3XX_IRQ_CALLBACK(NULL),	       /* 12 - CP_DMA */
 	A3XX_IRQ_CALLBACK(a3xx_cp_callback),   /* 13 - CP_IB2_INT */
 	A3XX_IRQ_CALLBACK(a3xx_cp_callback),   /* 14 - CP_IB1_INT */
 	A3XX_IRQ_CALLBACK(a3xx_cp_callback),   /* 15 - CP_RB_INT */
-	A3XX_IRQ_CALLBACK(a3xx_err_callback),  /* 16 - CP_REG_PROTECT_FAULT */
+	/* 16 - CP_REG_PROTECT_FAULT */
+	A3XX_IRQ_CALLBACK(a3xx_fatal_err_callback),
 	A3XX_IRQ_CALLBACK(NULL),	       /* 17 - CP_RB_DONE_TS */
 	A3XX_IRQ_CALLBACK(NULL),	       /* 18 - CP_VS_DONE_TS */
 	A3XX_IRQ_CALLBACK(NULL),	       /* 19 - CP_PS_DONE_TS */
 	A3XX_IRQ_CALLBACK(NULL),	       /* 20 - CP_CACHE_FLUSH_TS */
-	A3XX_IRQ_CALLBACK(a3xx_err_callback),  /* 21 - CP_AHB_ERROR_FAULT */
+	A3XX_IRQ_CALLBACK(a3xx_fatal_err_callback),/* 21 - CP_AHB_ERROR_FAULT */
 	A3XX_IRQ_CALLBACK(NULL),	       /* 22 - Unused */
 	A3XX_IRQ_CALLBACK(NULL),	       /* 23 - Unused */
-	A3XX_IRQ_CALLBACK(NULL),	       /* 24 - MISC_HANG_DETECT */
+	A3XX_IRQ_CALLBACK(a3xx_fatal_err_callback),/* 24 - MISC_HANG_DETECT */
 	A3XX_IRQ_CALLBACK(a3xx_err_callback),  /* 25 - UCHE_OOB_ACCESS */
 	/* 26 to 31 - Unused */
 };
@@ -3451,14 +3503,27 @@
 	return ret;
 }
 
-static void a3xx_irq_control(struct adreno_device *adreno_dev, int state)
+/**
+ * a3xx_irq_init() -  Routine to init a3xx irq
+ * @adreno_dev: adreno device ptr
+ */
+static void a3xx_irq_init(struct adreno_device *adreno_dev)
+{
+	if (adreno_hang_intr_supported(adreno_dev)) {
+		adreno_dev->intr_mask = (A3XX_INT_MASK |
+			(1 << A3XX_INT_MISC_HANG_DETECT));
+		adreno_dev->hang_intr_en = 1;
+	} else
+		adreno_dev->intr_mask = A3XX_INT_MASK;
+
+}
+
+static void a3xx_irq_control(struct adreno_device *adreno_dev,
+							 unsigned int mask)
 {
 	struct kgsl_device *device = &adreno_dev->dev;
 
-	if (state)
-		kgsl_regwrite(device, A3XX_RBBM_INT_0_MASK, A3XX_INT_MASK);
-	else
-		kgsl_regwrite(device, A3XX_RBBM_INT_0_MASK, 0);
+	kgsl_regwrite(device, A3XX_RBBM_INT_0_MASK, mask);
 }
 
 static unsigned int a3xx_irq_pending(struct adreno_device *adreno_dev)
@@ -3624,118 +3689,160 @@
  */
 
 static struct adreno_perfcount_register a3xx_perfcounters_cp[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_CP_0_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_CP_0_LO,
+		0, A3XX_CP_PERFCOUNTER_SELECT },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_rbbm[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RBBM_0_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RBBM_1_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RBBM_0_LO,
+		1, A3XX_RBBM_PERFCOUNTER0_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RBBM_1_LO,
+		2, A3XX_RBBM_PERFCOUNTER1_SELECT },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_pc[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PC_0_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PC_1_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PC_2_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PC_3_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PC_0_LO,
+		3, A3XX_PC_PERFCOUNTER0_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PC_1_LO,
+		4, A3XX_PC_PERFCOUNTER1_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PC_2_LO,
+		5, A3XX_PC_PERFCOUNTER2_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PC_3_LO,
+		6, A3XX_PC_PERFCOUNTER3_SELECT },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_vfd[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_VFD_0_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_VFD_1_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_VFD_0_LO,
+		7, A3XX_VFD_PERFCOUNTER0_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_VFD_1_LO,
+		8, A3XX_VFD_PERFCOUNTER1_SELECT },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_hlsq[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_0_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_1_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_2_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_3_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_4_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_5_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_0_LO,
+		9, A3XX_HLSQ_PERFCOUNTER0_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_1_LO,
+		10, A3XX_HLSQ_PERFCOUNTER1_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_2_LO,
+		11, A3XX_HLSQ_PERFCOUNTER2_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_3_LO,
+		12, A3XX_HLSQ_PERFCOUNTER3_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_4_LO,
+		13, A3XX_HLSQ_PERFCOUNTER4_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_5_LO,
+		14, A3XX_HLSQ_PERFCOUNTER5_SELECT },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_vpc[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_VPC_0_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_VPC_1_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_VPC_0_LO,
+		15, A3XX_VPC_PERFCOUNTER0_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_VPC_1_LO,
+		16, A3XX_VPC_PERFCOUNTER1_SELECT },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_tse[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TSE_0_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TSE_1_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TSE_0_LO,
+		17, A3XX_GRAS_PERFCOUNTER0_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TSE_1_LO,
+		18, A3XX_GRAS_PERFCOUNTER1_SELECT },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_ras[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RAS_0_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RAS_1_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RAS_0_LO,
+		19, A3XX_GRAS_PERFCOUNTER2_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RAS_1_LO,
+		20, A3XX_GRAS_PERFCOUNTER3_SELECT },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_uche[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_0_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_1_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_2_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_3_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_4_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_5_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_0_LO,
+		21, A3XX_UCHE_PERFCOUNTER0_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_1_LO,
+		22, A3XX_UCHE_PERFCOUNTER1_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_2_LO,
+		23, A3XX_UCHE_PERFCOUNTER2_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_3_LO,
+		24, A3XX_UCHE_PERFCOUNTER3_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_4_LO,
+		25, A3XX_UCHE_PERFCOUNTER4_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_5_LO,
+		26, A3XX_UCHE_PERFCOUNTER5_SELECT },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_tp[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_0_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_1_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_2_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_3_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_4_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_5_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_0_LO,
+		27, A3XX_TP_PERFCOUNTER0_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_1_LO,
+		28, A3XX_TP_PERFCOUNTER1_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_2_LO,
+		29, A3XX_TP_PERFCOUNTER2_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_3_LO,
+		30, A3XX_TP_PERFCOUNTER3_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_4_LO,
+		31, A3XX_TP_PERFCOUNTER4_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_5_LO,
+		32, A3XX_TP_PERFCOUNTER5_SELECT },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_sp[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_0_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_1_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_2_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_3_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_4_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_5_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_6_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_7_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_0_LO,
+		33, A3XX_SP_PERFCOUNTER0_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_1_LO,
+		34, A3XX_SP_PERFCOUNTER1_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_2_LO,
+		35, A3XX_SP_PERFCOUNTER2_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_3_LO,
+		36, A3XX_SP_PERFCOUNTER3_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_4_LO,
+		37, A3XX_SP_PERFCOUNTER4_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_5_LO,
+		38, A3XX_SP_PERFCOUNTER5_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_6_LO,
+		39, A3XX_SP_PERFCOUNTER6_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_7_LO,
+		40, A3XX_SP_PERFCOUNTER7_SELECT },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_rb[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RB_0_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RB_1_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RB_0_LO,
+		41, A3XX_RB_PERFCOUNTER0_SELECT },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RB_1_LO,
+		42, A3XX_RB_PERFCOUNTER1_SELECT },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_pwr[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PWR_0_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PWR_1_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PWR_0_LO,
+		-1, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PWR_1_LO,
+		-1, 0 },
 };
 
 static struct adreno_perfcount_register a3xx_perfcounters_vbif[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_CNT0_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_CNT1_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_CNT0_LO, -1, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_CNT1_LO, -1, 0 },
 };
 static struct adreno_perfcount_register a3xx_perfcounters_vbif_pwr[] = {
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_PWR_CNT0_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_PWR_CNT1_LO },
-	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_PWR_CNT2_LO },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_PWR_CNT0_LO, -1, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_PWR_CNT1_LO, -1, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_PWR_CNT2_LO, -1, 0 },
 };
 
-#define A3XX_PERFCOUNTER_GROUP(name) { a3xx_perfcounters_##name, \
-	ARRAY_SIZE(a3xx_perfcounters_##name), __stringify(name) }
-
 static struct adreno_perfcount_group a3xx_perfcounter_groups[] = {
-	A3XX_PERFCOUNTER_GROUP(cp),
-	A3XX_PERFCOUNTER_GROUP(rbbm),
-	A3XX_PERFCOUNTER_GROUP(pc),
-	A3XX_PERFCOUNTER_GROUP(vfd),
-	A3XX_PERFCOUNTER_GROUP(hlsq),
-	A3XX_PERFCOUNTER_GROUP(vpc),
-	A3XX_PERFCOUNTER_GROUP(tse),
-	A3XX_PERFCOUNTER_GROUP(ras),
-	A3XX_PERFCOUNTER_GROUP(uche),
-	A3XX_PERFCOUNTER_GROUP(tp),
-	A3XX_PERFCOUNTER_GROUP(sp),
-	A3XX_PERFCOUNTER_GROUP(rb),
-	A3XX_PERFCOUNTER_GROUP(pwr),
-	A3XX_PERFCOUNTER_GROUP(vbif),
-	A3XX_PERFCOUNTER_GROUP(vbif_pwr),
+	ADRENO_PERFCOUNTER_GROUP(a3xx, cp),
+	ADRENO_PERFCOUNTER_GROUP(a3xx, rbbm),
+	ADRENO_PERFCOUNTER_GROUP(a3xx, pc),
+	ADRENO_PERFCOUNTER_GROUP(a3xx, vfd),
+	ADRENO_PERFCOUNTER_GROUP(a3xx, hlsq),
+	ADRENO_PERFCOUNTER_GROUP(a3xx, vpc),
+	ADRENO_PERFCOUNTER_GROUP(a3xx, tse),
+	ADRENO_PERFCOUNTER_GROUP(a3xx, ras),
+	ADRENO_PERFCOUNTER_GROUP(a3xx, uche),
+	ADRENO_PERFCOUNTER_GROUP(a3xx, tp),
+	ADRENO_PERFCOUNTER_GROUP(a3xx, sp),
+	ADRENO_PERFCOUNTER_GROUP(a3xx, rb),
+	ADRENO_PERFCOUNTER_GROUP(a3xx, pwr),
+	ADRENO_PERFCOUNTER_GROUP(a3xx, vbif),
+	ADRENO_PERFCOUNTER_GROUP(a3xx, vbif_pwr),
 };
 
 static struct adreno_perfcounters a3xx_perfcounters = {
@@ -3857,8 +3964,11 @@
 
 	/* Turn on hang detection - this spews a lot of useful information
 	 * into the RBBM registers on a hang */
-
-	kgsl_regwrite(device, A3XX_RBBM_INTERFACE_HANG_INT_CTL,
+	if (adreno_is_a330v2(adreno_dev))
+		kgsl_regwrite(device, A3XX_RBBM_INTERFACE_HANG_INT_CTL,
+			(1 << 31) | 0xFFFF);
+	else
+		kgsl_regwrite(device, A3XX_RBBM_INTERFACE_HANG_INT_CTL,
 			(1 << 16) | 0xFFF);
 
 	/* Enable 64-byte cacheline size. HW Default is 32-byte (0x000000E0). */
@@ -4257,14 +4367,18 @@
 	.ctxt_draw_workaround = NULL,
 	.rb_init = a3xx_rb_init,
 	.perfcounter_init = a3xx_perfcounter_init,
+	.perfcounter_save = a3xx_perfcounter_save,
+	.perfcounter_restore = a3xx_perfcounter_restore,
 	.irq_control = a3xx_irq_control,
 	.irq_handler = a3xx_irq_handler,
 	.irq_pending = a3xx_irq_pending,
+	.irq_init = a3xx_irq_init,
 	.busy_cycles = a3xx_busy_cycles,
 	.start = a3xx_start,
 	.snapshot = a3xx_snapshot,
 	.perfcounter_enable = a3xx_perfcounter_enable,
 	.perfcounter_read = a3xx_perfcounter_read,
+	.perfcounter_write = a3xx_perfcounter_write,
 	.coresight_enable = a3xx_coresight_enable,
 	.coresight_disable = a3xx_coresight_disable,
 	.coresight_config_debug_reg = a3xx_coresight_config_debug_reg,
diff --git a/drivers/gpu/msm/adreno_postmortem.c b/drivers/gpu/msm/adreno_postmortem.c
index 32dbd51..b8c451e 100644
--- a/drivers/gpu/msm/adreno_postmortem.c
+++ b/drivers/gpu/msm/adreno_postmortem.c
@@ -406,9 +406,11 @@
 	const uint32_t *rb_vaddr;
 	int num_item = 0;
 	int read_idx, write_idx;
-	unsigned int ts_processed = 0xdeaddead;
+	unsigned int ts_processed = 0;
+	unsigned int curr_global_ts = 0;
 	struct kgsl_context *context;
 	unsigned int context_id;
+	static char pid_name[TASK_COMM_LEN] = "unknown";
 	unsigned int rbbm_status;
 
 	static struct ib_list ib_list;
@@ -459,8 +461,36 @@
 		adreno_getreg(adreno_dev, ADRENO_REG_CP_IB2_BUFSZ),
 		&cp_ib2_bufsz);
 
+	kgsl_sharedmem_readl(&device->memstore,
+			(unsigned int *) &context_id,
+			KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+				current_context));
+	context = idr_find(&device->context_idr, context_id);
+	if (context)
+		ts_processed = kgsl_readtimestamp(device, context,
+					  KGSL_TIMESTAMP_RETIRED);
+
 	/* If postmortem dump is not enabled, dump minimal set and return */
 	if (!device->pm_dump_enable) {
+		struct task_struct *task = NULL;
+		/* Read the current global timestamp here */
+		kgsl_sharedmem_readl(&device->memstore,
+			&curr_global_ts,
+			KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+			eoptimestamp));
+
+		if (context)
+			task = find_task_by_vpid(context->pid);
+
+		if (task)
+			get_task_comm(pid_name, task);
+
+		KGSL_LOG_DUMP(device,
+			"Proc %s, ctxt_id %d ts %d triggered fault tolerance"
+			" on global ts %d\n", pid_name,
+			context ? context->id : 0,
+			ts_processed ? ts_processed + 1 : 0,
+			curr_global_ts ? curr_global_ts + 1 : 0);
 
 		KGSL_LOG_DUMP(device,
 			"STATUS %08X | IB1:%08X/%08X | IB2: %08X/%08X"
@@ -471,19 +501,10 @@
 		return 0;
 	}
 
-	kgsl_sharedmem_readl(&device->memstore,
-			(unsigned int *) &context_id,
-			KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
-				current_context));
-
-	context = kgsl_context_get(device, context_id);
-
-	if (context) {
-		ts_processed = kgsl_readtimestamp(device, context,
-						  KGSL_TIMESTAMP_RETIRED);
+	if (ts_processed)
 		KGSL_LOG_DUMP(device, "FT CTXT: %d  TIMESTM RTRD: %08X\n",
 				context->id, ts_processed);
-	} else
+	else
 		KGSL_LOG_DUMP(device, "BAD CTXT: %d\n", context_id);
 
 	kgsl_context_put(context);
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 8aec755..8871a23 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -69,6 +69,7 @@
 	unsigned long wait_time_part;
 	unsigned int prev_reg_val[FT_DETECT_REGS_COUNT];
 	unsigned int rptr;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
 
 	memset(prev_reg_val, 0, sizeof(prev_reg_val));
 
@@ -107,17 +108,16 @@
 
 		/* Dont wait for timeout, detect hang faster.
 		 */
+
+		if (kgsl_atomic_read(&adreno_dev->hang_intr_set))
+			goto hang_detected;
+
 		if (time_after(jiffies, wait_time_part)) {
 			wait_time_part = jiffies +
 				msecs_to_jiffies(KGSL_TIMEOUT_PART);
-			if ((adreno_ft_detect(rb->device,
-						prev_reg_val))){
-				KGSL_DRV_ERR(rb->device,
-				"Hang detected while waiting for freespace in"
-				"ringbuffer rptr: 0x%x, wptr: 0x%x\n",
-				rptr, rb->wptr);
-				goto err;
-			}
+
+			if ((adreno_ft_detect(rb->device, prev_reg_val)))
+				goto hang_detected;
 		}
 
 		if (time_after(jiffies, wait_time)) {
@@ -129,6 +129,12 @@
 
 		continue;
 
+hang_detected:
+		KGSL_DRV_ERR(rb->device,
+			"Hang detected while waiting for freespace in"
+			"ringbuffer rptr: 0x%x, wptr: 0x%x\n",
+			rptr, rb->wptr);
+
 err:
 		if (!adreno_dump_and_exec_ft(rb->device)) {
 			if (context && context->flags & CTXT_FLAGS_GPU_HANG) {
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 298b36e..6cd1f68 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -110,6 +110,29 @@
 }
 
 /**
+ * kgsl_hang_intr_work() - GPU hang interrupt work
+ * @dev: device ptr
+ *
+ * This function is called when GPU hang interrupt happens. In
+ * this fuction we check the device state and trigger fault
+ * tolerance.
+ */
+void kgsl_hang_intr_work(struct work_struct *work)
+{
+	struct kgsl_device *device = container_of(work, struct kgsl_device,
+							hang_intr_ws);
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	/* If hang_intr_set is set, turn it off and trigger FT */
+	mutex_lock(&device->mutex);
+	if ((device->state == KGSL_STATE_ACTIVE) &&
+		(atomic_cmpxchg(&adreno_dev->hang_intr_set, 1, 0)))
+			adreno_dump_and_exec_ft(device);
+	mutex_unlock(&device->mutex);
+
+}
+
+/**
  * kgsl_trace_issueibcmds() - Call trace_issueibcmds by proxy
  * device: KGSL device
  * id: ID of the context submitting the command
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index 0525eab..42d3b3e 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -205,10 +205,25 @@
 #define MMU_CONFIG 1
 #endif
 
+void kgsl_hang_intr_work(struct work_struct *work);
 void kgsl_hang_check(struct work_struct *work);
 void kgsl_mem_entry_destroy(struct kref *kref);
 int kgsl_postmortem_dump(struct kgsl_device *device, int manual);
 
+static inline void kgsl_atomic_set(atomic_t *addr, unsigned int val)
+{
+	atomic_set(addr, val);
+	/* make sure above write is posted */
+	wmb();
+}
+
+static inline int kgsl_atomic_read(atomic_t *addr)
+{
+	/* make sure below read is read from memory */
+	rmb();
+	return atomic_read(addr);
+}
+
 struct kgsl_mem_entry *kgsl_get_mem_entry(struct kgsl_device *device,
 		phys_addr_t ptbase, unsigned int gpuaddr, unsigned int size);
 
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 0b5fe52..40e4e39 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -110,7 +110,7 @@
 		struct kgsl_pagetable *pagetable);
 	void (*power_stats)(struct kgsl_device *device,
 		struct kgsl_power_stats *stats);
-	void (*irqctrl)(struct kgsl_device *device, int state);
+	void (*irqctrl)(struct kgsl_device *device, unsigned int mask);
 	unsigned int (*gpuid)(struct kgsl_device *device, unsigned int *chipid);
 	void * (*snapshot)(struct kgsl_device *device, void *snapshot,
 		int *remain, int hang);
@@ -191,6 +191,7 @@
 	const struct kgsl_functable *ftbl;
 	struct work_struct idle_check_ws;
 	struct work_struct hang_check_ws;
+	struct work_struct hang_intr_ws;
 	struct timer_list idle_timer;
 	struct timer_list hang_timer;
 	struct kgsl_pwrctrl pwrctrl;
@@ -259,6 +260,8 @@
 			kgsl_idle_check),\
 	.hang_check_ws = __WORK_INITIALIZER((_dev).hang_check_ws,\
 			kgsl_hang_check),\
+	.hang_intr_ws = __WORK_INITIALIZER((_dev).hang_intr_ws,\
+			kgsl_hang_intr_work),\
 	.ts_expired_ws  = __WORK_INITIALIZER((_dev).ts_expired_ws,\
 			kgsl_process_events),\
 	.context_idr = IDR_INIT((_dev).context_idr),\
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index a06ebbf..acb3b17 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -344,6 +344,9 @@
 	device = mmu->device;
 	adreno_dev = ADRENO_DEVICE(device);
 
+	mmu->fault = 1;
+	iommu_dev->fault = 1;
+
 	ptbase = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
 					iommu_dev->ctx_id, TTBR0);
 
@@ -394,9 +397,6 @@
 
 	}
 
-	mmu->fault = 1;
-	iommu_dev->fault = 1;
-
 	kgsl_sharedmem_readl(&device->memstore, &curr_context_id,
 		KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context));
 
@@ -425,8 +425,10 @@
 	 * the GPU and trigger a snapshot. To stall the transaction return
 	 * EBUSY error.
 	 */
-	if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)
+	if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE) {
+		adreno_fatal_err_work(adreno_dev);
 		ret = -EBUSY;
+	}
 done:
 	return ret;
 }
@@ -1803,6 +1805,10 @@
 						iommu_unit,
 						iommu_unit->dev[j].ctx_id,
 						RESUME, 1);
+					KGSL_IOMMU_SET_CTX_REG(iommu,
+						iommu_unit,
+						iommu_unit->dev[j].ctx_id,
+						FSR, 0);
 					_iommu_unlock(iommu);
 					iommu_unit->dev[j].fault = 0;
 				}
diff --git a/drivers/gpu/msm/z180.c b/drivers/gpu/msm/z180.c
index 103a751..ccb2e00 100644
--- a/drivers/gpu/msm/z180.c
+++ b/drivers/gpu/msm/z180.c
@@ -927,11 +927,11 @@
 	}
 }
 
-static void z180_irqctrl(struct kgsl_device *device, int state)
+static void z180_irqctrl(struct kgsl_device *device, unsigned int mask)
 {
 	/* Control interrupts for Z180 and the Z180 MMU */
 
-	if (state) {
+	if (mask) {
 		z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 3);
 		z180_regwrite(device, MH_INTERRUPT_MASK,
 			kgsl_mmu_get_int_mask());
diff --git a/drivers/iommu/msm_iommu-v1.c b/drivers/iommu/msm_iommu-v1.c
index c81aa0ac..84f81bf 100644
--- a/drivers/iommu/msm_iommu-v1.c
+++ b/drivers/iommu/msm_iommu-v1.c
@@ -956,7 +956,8 @@
 			__print_ctx_regs(drvdata->base, ctx_drvdata->num, fsr);
 		}
 
-		SET_FSR(drvdata->base, ctx_drvdata->num, fsr);
+		if (ret != -EBUSY)
+			SET_FSR(drvdata->base, ctx_drvdata->num, fsr);
 		ret = IRQ_HANDLED;
 	} else
 		ret = IRQ_NONE;
diff --git a/drivers/leds/leds-qpnp.c b/drivers/leds/leds-qpnp.c
index cf6c6e2..8ae5671 100644
--- a/drivers/leds/leds-qpnp.c
+++ b/drivers/leds/leds-qpnp.c
@@ -3355,7 +3355,6 @@
 		if (led->default_on) {
 			led->cdev.brightness = led->cdev.max_brightness;
 			__qpnp_led_work(led, led->cdev.brightness);
-			schedule_work(&led->work);
 			if (led->turn_off_delay_ms > 0)
 				qpnp_led_turn_off(led);
 		} else
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index 9d606a1..65387ba 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -3118,7 +3118,7 @@
 	struct dvb_demux *dvbdemux = (struct dvb_demux *)demux;
 
 	mutex_lock(&dvbdemux->mutex);
-
+	dvbdemux->sw_filter_abort = 0;
 	demux->frontend = NULL;
 	mutex_unlock(&dvbdemux->mutex);
 	return 0;
@@ -3203,6 +3203,7 @@
 		return -ENOMEM;
 	}
 
+	dvbdemux->sw_filter_abort = 0;
 	dvbdemux->total_process_time = 0;
 	dvbdemux->total_crc_time = 0;
 	snprintf(dvbdemux->alias,
diff --git a/drivers/media/platform/msm/camera_v1/msm_v4l2_video.c b/drivers/media/platform/msm/camera_v1/msm_v4l2_video.c
index 1849bf6..23e74ef 100644
--- a/drivers/media/platform/msm/camera_v1/msm_v4l2_video.c
+++ b/drivers/media/platform/msm/camera_v1/msm_v4l2_video.c
@@ -742,6 +742,9 @@
 	unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
 	u32 len = PAGE_ALIGN((start & ~PAGE_MASK) + v4l2_ram_size);
 
+	if (!start)
+		return -EINVAL;
+
 	/*
 	 * This is probably unnecessary now - the last PAGE_SHIFT
 	 * bits of start should be 0 now, since we are page aligning
@@ -749,16 +752,20 @@
 	 */
 	start &= PAGE_MASK;
 
+	if ((vma->vm_end <= vma->vm_start) ||
+	    (off >= len) ||
+	    ((vma->vm_end - vma->vm_start) > (len - off))) {
+		pr_err("v4l2 map request, memory requested out of bounds\n");
+		return -EINVAL;
+	}
+
 	pr_debug("v4l2 map req for phys(%p,%p) offset %u to virt (%p,%p)\n",
 	(void *)(start+off), (void *)(start+off+(vma->vm_end - vma->vm_start)),
 	(unsigned int)off, (void *)vma->vm_start, (void *)vma->vm_end);
 
-	if ((vma->vm_end - vma->vm_start + off) > len) {
-		pr_err("v4l2 map request, memory requested too big\n");
-		return -EINVAL;
-	}
-
 	start += off;
+	if (start < off)
+		return -EINVAL;
 	vma->vm_pgoff = start >> PAGE_SHIFT;
 	/* This is an IO map - tell maydump to skip this VMA */
 	vma->vm_flags |= VM_IO | VM_RESERVED;
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
index 2bc460b..f6d1d51 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
@@ -4775,6 +4775,14 @@
 		todo = fill_count > limit ? limit : fill_count;
 		ret = mpq_sdmx_process_buffer(mpq_demux, input, todo,
 			read_offset);
+
+		if (mpq_demux->demux.sw_filter_abort) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: Demuxing from DVR was aborted\n",
+				__func__);
+			return -ENODEV;
+		}
+
 		if (ret > 0) {
 			total_bytes_read += ret;
 			fill_count -= ret;
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index e5315bd..743668b 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -2800,8 +2800,11 @@
  err_putdisk:
 	put_disk(md->disk);
  err_kfree:
+	if (!subname)
+		__clear_bit(md->name_idx, name_use);
 	kfree(md);
  out:
+	__clear_bit(devidx, dev_use);
 	return ERR_PTR(ret);
 }
 
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 5786190..95f0a04 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -6473,7 +6473,7 @@
 	host->debugfs_host_dir = NULL;
 }
 #else
-static void msmsdcc_remove_debugfs(msmsdcc_host *host) {}
+static void msmsdcc_remove_debugfs(struct msmsdcc_host *host) {}
 #endif
 
 static int msmsdcc_remove(struct platform_device *pdev)
@@ -6680,7 +6680,7 @@
 }
 #endif
 
-#if CONFIG_DEBUG_FS
+#ifdef CONFIG_DEBUG_FS
 static void msmsdcc_print_pm_stats(struct msmsdcc_host *host, ktime_t start,
 				   const char *func, int err)
 {
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 2c8598b..b93eaf4 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2310,6 +2310,7 @@
 	struct sdhci_msm_host *msm_host = pltfm_host->priv;
 	struct mmc_ios	curr_ios = host->mmc->ios;
 	u32 sup_clock, ddr_clock;
+	bool curr_pwrsave;
 
 	if (!clock) {
 		sdhci_msm_prepare_clocks(host, false);
@@ -2321,6 +2322,22 @@
 	if (rc)
 		return;
 
+	curr_pwrsave = !!(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
+			  CORE_CLK_PWRSAVE);
+	if ((clock > 400000) &&
+	    !curr_pwrsave && mmc_host_may_gate_card(host->mmc->card))
+		writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
+				| CORE_CLK_PWRSAVE,
+				host->ioaddr + CORE_VENDOR_SPEC);
+	/*
+	 * Disable pwrsave for a newly added card if doesn't allow clock
+	 * gating.
+	 */
+	else if (curr_pwrsave && !mmc_host_may_gate_card(host->mmc->card))
+		writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
+				& ~CORE_CLK_PWRSAVE,
+				host->ioaddr + CORE_VENDOR_SPEC);
+
 	sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
 	if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
 		(curr_ios.timing == MMC_TIMING_MMC_HS400)) {
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 0cb0491..758a79e 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -110,7 +110,7 @@
 		sdhci_readl(host, SDHCI_INT_ENABLE),
 		sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
 	pr_info(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
-		sdhci_readw(host, SDHCI_AUTO_CMD_ERR),
+		host->auto_cmd_err_sts,
 		sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
 	pr_info(DRIVER_NAME ": Caps:     0x%08x | Caps_1:   0x%08x\n",
 		sdhci_readl(host, SDHCI_CAPABILITIES),
@@ -2375,6 +2375,7 @@
 	host->mrq = NULL;
 	host->cmd = NULL;
 	host->data = NULL;
+	host->auto_cmd_err_sts = 0;
 
 #ifndef SDHCI_USE_LEDS_CLASS
 	sdhci_deactivate_led(host);
@@ -2465,7 +2466,9 @@
 		host->cmd->error = -EILSEQ;
 
 	if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
-		auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_ERR);
+		auto_cmd_status = host->auto_cmd_err_sts;
+		pr_err("%s: %s: AUTO CMD err sts 0x%08x\n",
+			mmc_hostname(host->mmc), __func__, auto_cmd_status);
 		if (auto_cmd_status & (SDHCI_AUTO_CMD12_NOT_EXEC |
 				       SDHCI_AUTO_CMD_INDEX_ERR |
 				       SDHCI_AUTO_CMD_ENDBIT_ERR))
@@ -2728,6 +2731,9 @@
 	}
 
 	if (intmask & SDHCI_INT_CMD_MASK) {
+		if (intmask & SDHCI_INT_AUTO_CMD_ERR)
+			host->auto_cmd_err_sts = sdhci_readw(host,
+					SDHCI_AUTO_CMD_ERR);
 		sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
 			SDHCI_INT_STATUS);
 		if ((host->quirks2 & SDHCI_QUIRK2_SLOW_INT_CLR) &&
diff --git a/drivers/power/qpnp-charger.c b/drivers/power/qpnp-charger.c
index 64ad940..8137ad8 100644
--- a/drivers/power/qpnp-charger.c
+++ b/drivers/power/qpnp-charger.c
@@ -1507,6 +1507,8 @@
 
 	if (chip->dc_present ^ dc_present) {
 		chip->dc_present = dc_present;
+		if (qpnp_chg_is_otg_en_set(chip))
+			qpnp_chg_force_run_on_batt(chip, !dc_present ? 1 : 0);
 		if (!dc_present && !qpnp_chg_is_usb_chg_plugged_in(chip)) {
 			chip->delta_vddmax_mv = 0;
 			qpnp_chg_set_appropriate_vddmax(chip);
@@ -1711,10 +1713,12 @@
 	if (qpnp_chg_is_otg_en_set(chip))
 		return 0;
 
-	rc = qpnp_chg_force_run_on_batt(chip, 1);
-	if (rc) {
-		pr_err("Failed to disable charging rc = %d\n", rc);
-		return rc;
+	if (!qpnp_chg_is_dc_chg_plugged_in(chip)) {
+		rc = qpnp_chg_force_run_on_batt(chip, 1);
+		if (rc) {
+			pr_err("Failed to disable charging rc = %d\n", rc);
+			return rc;
+		}
 	}
 
 	/* force usb ovp fet off */
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index 186fff1..08ca341 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -252,6 +252,7 @@
 	struct mutex ios_mutex;
 	enum sdhci_power_policy power_policy;
 
+	u32 auto_cmd_err_sts;
 	unsigned long private[0] ____cacheline_aligned;
 };
 #endif /* LINUX_MMC_SDHCI_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index cfaaa69..0844dc3 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -205,7 +205,16 @@
 #else
 static inline struct page *__page_cache_alloc(gfp_t gfp)
 {
-	return alloc_pages(gfp, 0);
+	struct page *page;
+
+	page = alloc_pages(gfp, 0);
+
+	if (page && is_cma_pageblock(page)) {
+		__free_page(page);
+		page = alloc_pages(gfp & ~__GFP_MOVABLE, 0);
+	}
+
+	return page;
 }
 #endif
 
diff --git a/sound/soc/msm/qdsp6v2/audio_ocmem.c b/sound/soc/msm/qdsp6v2/audio_ocmem.c
index bedaba0..711f850 100644
--- a/sound/soc/msm/qdsp6v2/audio_ocmem.c
+++ b/sound/soc/msm/qdsp6v2/audio_ocmem.c
@@ -454,6 +454,15 @@
 
 			}
 
+			if (test_bit_pos(audio_ocmem_lcl.audio_state,
+						OCMEM_STATE_DISABLE) ||
+			    test_bit_pos(audio_ocmem_lcl.audio_state,
+			     OCMEM_STATE_FREE)) {
+				pr_info("%s: audio already freed from ocmem, state[0x%x]\n",
+					__func__,
+				atomic_read(&audio_ocmem_lcl.audio_state));
+				goto fail_cmd2;
+			}
 			pr_debug("%s: calling ocmem free, state:0x%x\n",
 				__func__,
 				atomic_read(&audio_ocmem_lcl.audio_state));
@@ -491,7 +500,11 @@
 				ret);
 				goto fail_cmd2;
 			}
-			pr_debug("%s: ocmem_free success\n", __func__);
+			set_bit_pos(audio_ocmem_lcl.audio_state,
+					OCMEM_STATE_FREE);
+			pr_debug("%s: ocmem_free success, state[0x%x]\n",
+				 __func__,
+				 atomic_read(&audio_ocmem_lcl.audio_state));
 		/* Fall through */
 		case OCMEM_STATE_SSR:
 			msm_bus_scale_client_update_request(
@@ -517,6 +530,8 @@
 	ret = ocmem_free(OCMEM_LP_AUDIO, audio_ocmem_lcl.buf);
 	if (ret)
 		pr_err("%s: ocmem_free failed\n", __func__);
+	set_bit_pos(audio_ocmem_lcl.audio_state,
+		    OCMEM_STATE_FREE);
 fail_cmd2:
 	mutex_unlock(&audio_ocmem_lcl.state_process_lock);
 fail_cmd:
@@ -710,7 +725,8 @@
 			audio_ocmem_lcl.ocmem_en = true;
 	}
 
-	if (audio_ocmem_lcl.ocmem_en) {
+	if (audio_ocmem_lcl.ocmem_en &&
+	    (!enable || !audio_ocmem_lcl.audio_ocmem_running)) {
 		if (audio_ocmem_lcl.audio_ocmem_workqueue == NULL) {
 			pr_err("%s: audio ocmem workqueue is NULL\n",
 								__func__);
diff --git a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
old mode 100644
new mode 100755
index aa6ef6b..a632644
--- a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
@@ -51,6 +51,8 @@
 #define COMPRE_OUTPUT_METADATA_SIZE	(sizeof(struct output_meta_data_st))
 #define COMPRESSED_LR_VOL_MAX_STEPS	0x20002000
 
+#define MAX_AC3_PARAM_SIZE		(18*2*sizeof(int))
+
 const DECLARE_TLV_DB_LINEAR(compr_rx_vol_gain, 0,
 			    COMPRESSED_LR_VOL_MAX_STEPS);
 struct snd_msm {
@@ -977,19 +979,25 @@
 			compr->codec = FORMAT_MPEG4_AAC;
 			break;
 		case SND_AUDIOCODEC_AC3: {
-			char params_value[18*2*sizeof(int)];
+			char params_value[MAX_AC3_PARAM_SIZE];
 			int *params_value_data = (int *)params_value;
 			/* 36 is the max param length for ddp */
 			int i;
 			struct snd_dec_ddp *ddp =
 				&compr->info.codec_param.codec.options.ddp;
-			int params_length = ddp->params_length*sizeof(int);
+			uint32_t params_length = ddp->params_length*sizeof(int);
+			if(params_length > MAX_AC3_PARAM_SIZE) {
+				/*MAX is 36*sizeof(int) this should not happen*/
+				pr_err("params_length(%d) is greater than %d",
+				params_length, MAX_AC3_PARAM_SIZE);
+				params_length = MAX_AC3_PARAM_SIZE;
+			}
 			pr_debug("SND_AUDIOCODEC_AC3\n");
 			compr->codec = FORMAT_AC3;
 			if (copy_from_user(params_value, (void *)ddp->params,
 					params_length))
-				pr_err("%s: ERROR: copy ddp params value\n",
-					__func__);
+				pr_err("%s: copy ddp params value, size=%d\n",
+					__func__, params_length);
 			pr_debug("params_length: %d\n", ddp->params_length);
 			for (i = 0; i < params_length; i++)
 				pr_debug("params_value[%d]: %x\n", i,
@@ -1008,19 +1016,25 @@
 			break;
 		}
 		case SND_AUDIOCODEC_EAC3: {
-			char params_value[18*2*sizeof(int)];
+			char params_value[MAX_AC3_PARAM_SIZE];
 			int *params_value_data = (int *)params_value;
 			/* 36 is the max param length for ddp */
 			int i;
 			struct snd_dec_ddp *ddp =
 				&compr->info.codec_param.codec.options.ddp;
-			int params_length = ddp->params_length*sizeof(int);
+			uint32_t params_length = ddp->params_length*sizeof(int);
+			if(params_length > MAX_AC3_PARAM_SIZE) {
+				/*MAX is 36*sizeof(int) this should not happen*/
+				pr_err("params_length(%d) is greater than %d",
+				params_length, MAX_AC3_PARAM_SIZE);
+				params_length = MAX_AC3_PARAM_SIZE;
+			}
 			pr_debug("SND_AUDIOCODEC_EAC3\n");
 			compr->codec = FORMAT_EAC3;
 			if (copy_from_user(params_value, (void *)ddp->params,
 					params_length))
-				pr_err("%s: ERROR: copy ddp params value\n",
-					__func__);
+				pr_err("%s: copy ddp params value, size=%d\n",
+					__func__, params_length);
 			pr_debug("params_length: %d\n", ddp->params_length);
 			for (i = 0; i < ddp->params_length; i++)
 				pr_debug("params_value[%d]: %x\n", i,