Merge "msm: Add log collection ability on a watchdog reset after a kernel panic"
diff --git a/arch/arm/configs/msm8960-perf_defconfig b/arch/arm/configs/msm8960-perf_defconfig
index 5f7cc53..7ab8522 100644
--- a/arch/arm/configs/msm8960-perf_defconfig
+++ b/arch/arm/configs/msm8960-perf_defconfig
@@ -215,6 +215,9 @@
 CONFIG_IP6_NF_TARGET_REJECT=y
 CONFIG_IP6_NF_MANGLE=y
 CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_BRIDGE=y
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_HTB=y
 CONFIG_NET_SCH_PRIO=y
diff --git a/arch/arm/configs/msm8960_defconfig b/arch/arm/configs/msm8960_defconfig
index 73ad361..809ed77 100644
--- a/arch/arm/configs/msm8960_defconfig
+++ b/arch/arm/configs/msm8960_defconfig
@@ -220,6 +220,9 @@
 CONFIG_IP6_NF_TARGET_REJECT=y
 CONFIG_IP6_NF_MANGLE=y
 CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_BRIDGE=y
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_HTB=y
 CONFIG_NET_SCH_PRIO=y
diff --git a/arch/arm/configs/msm8974-perf_defconfig b/arch/arm/configs/msm8974-perf_defconfig
index 76a13f9..1148dc5 100644
--- a/arch/arm/configs/msm8974-perf_defconfig
+++ b/arch/arm/configs/msm8974-perf_defconfig
@@ -197,6 +197,9 @@
 CONFIG_IP6_NF_TARGET_REJECT=y
 CONFIG_IP6_NF_MANGLE=y
 CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_BRIDGE=y
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_HTB=y
 CONFIG_NET_SCH_PRIO=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index aa2d236..029dec2 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -201,6 +201,9 @@
 CONFIG_IP6_NF_TARGET_REJECT=y
 CONFIG_IP6_NF_MANGLE=y
 CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_BRIDGE=y
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_HTB=y
 CONFIG_NET_SCH_PRIO=y
diff --git a/arch/arm/configs/msm9615_defconfig b/arch/arm/configs/msm9615_defconfig
index 06ec01a..9ab1cdd 100644
--- a/arch/arm/configs/msm9615_defconfig
+++ b/arch/arm/configs/msm9615_defconfig
@@ -146,6 +146,9 @@
 CONFIG_IP6_NF_TARGET_REJECT_SKERR=y
 CONFIG_IP6_NF_MANGLE=y
 CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_BRIDGE=y
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_HTB=y
 CONFIG_NET_SCH_PRIO=y
diff --git a/arch/arm/configs/msm9625_defconfig b/arch/arm/configs/msm9625_defconfig
index 3806b47..19301fe 100644
--- a/arch/arm/configs/msm9625_defconfig
+++ b/arch/arm/configs/msm9625_defconfig
@@ -145,6 +145,9 @@
 CONFIG_IP6_NF_TARGET_REJECT_SKERR=y
 CONFIG_IP6_NF_MANGLE=y
 CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_BRIDGE=y
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_HTB=y
 CONFIG_NET_SCH_PRIO=y
diff --git a/arch/arm/mach-msm/board-8064-pmic.c b/arch/arm/mach-msm/board-8064-pmic.c
index b7ae5c9..a1ff607 100644
--- a/arch/arm/mach-msm/board-8064-pmic.c
+++ b/arch/arm/mach-msm/board-8064-pmic.c
@@ -473,6 +473,8 @@
 	.chg_term_ua			= CHG_TERM_MA * 1000,
 	.normal_voltage_calc_ms		= 20000,
 	.low_voltage_calc_ms		= 1000,
+	.alarm_low_mv			= 3400,
+	.alarm_high_mv			= 4000,
 };
 
 static struct pm8921_platform_data
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index 2fb743b..82fa37b 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -2493,6 +2493,7 @@
 	.axi_addr = PCIE_AXI_BAR_PHYS,
 	.axi_size = PCIE_AXI_BAR_SIZE,
 	.wake_n = PM8921_GPIO_IRQ(PM8921_IRQ_BASE, PCIE_EP_WAKE_N_PMIC_GPIO),
+	.vreg_n = 4
 };
 
 static int __init mpq8064_pcie_enabled(void)
diff --git a/arch/arm/mach-msm/board-8930-pmic.c b/arch/arm/mach-msm/board-8930-pmic.c
index 4fb5fe9..cd292e0 100644
--- a/arch/arm/mach-msm/board-8930-pmic.c
+++ b/arch/arm/mach-msm/board-8930-pmic.c
@@ -476,6 +476,8 @@
 	.rconn_mohm			= 18,
 	.normal_voltage_calc_ms		= 20000,
 	.low_voltage_calc_ms		= 1000,
+	.alarm_low_mv			= 3400,
+	.alarm_high_mv			= 4000,
 };
 
 static struct pm8038_platform_data pm8038_platform_data __devinitdata = {
diff --git a/arch/arm/mach-msm/board-8960-pmic.c b/arch/arm/mach-msm/board-8960-pmic.c
index f0ba1c9..8c16984 100644
--- a/arch/arm/mach-msm/board-8960-pmic.c
+++ b/arch/arm/mach-msm/board-8960-pmic.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -433,6 +433,8 @@
 	.chg_term_ua			= CHG_TERM_MA * 1000,
 	.normal_voltage_calc_ms		= 20000,
 	.low_voltage_calc_ms		= 1000,
+	.alarm_low_mv			= 3400,
+	.alarm_high_mv			= 4000,
 };
 
 #define	PM8921_LC_LED_MAX_CURRENT	4	/* I = 4mA */
diff --git a/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audppcmdi.h b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audppcmdi.h
index de30c65..93b77f4 100644
--- a/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audppcmdi.h
+++ b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audppcmdi.h
@@ -15,7 +15,7 @@
 EXTERNALIZED FUNCTIONS
   None
 
-Copyright(c) 1992-2009, 2012 Code Aurora Forum. All rights reserved.
+Copyright(c) 1992-2009, 2012-2013 The Linux Foundation. All rights reserved.
 
 This software is licensed under the terms of the GNU General Public
 License version 2, as published by the Free Software Foundation, and
@@ -1071,6 +1071,9 @@
 } __packed;
 #define AUDPP_CMD_SAMPLING_FREQUENCY	7
 #define AUDPP_CMD_QRUMBLE		9
+#define AUDPP_CMD_SRS			18
+#define AUDPP_DISABLE_FEATS_LSW		2
+#define AUDPP_DISABLE_FEATS_MSW		3
 
 #endif /* QDSP5AUDPPCMDI_H */
 
diff --git a/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audppmsg.h b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audppmsg.h
index fef4c35..664e246 100644
--- a/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audppmsg.h
+++ b/arch/arm/mach-msm/include/mach/qdsp5/qdsp5audppmsg.h
@@ -2,29 +2,29 @@
 #define QDSP5AUDPPMSG_H
 
 /*====*====*====*====*====*====*====*====*====*====*====*====*====*====*====*
-*
-*       Q D S P 5  A U D I O   P O S T   P R O C E S S I N G   M S G
-*
-* GENERAL DESCRIPTION
-*   Messages sent by AUDPPTASK to ARM
-*
-* REFERENCES
-*   None
-*
-* EXTERNALIZED FUNCTIONS
-*   None
-*
-* Copyright (c) 1992-2009, 2012 Code Aurora Forum. All rights reserved.
-*
-* This software is licensed under the terms of the GNU General Public
-* License version 2, as published by the Free Software Foundation, and
-* may be copied, distributed, and modified under those terms.
-*
-* This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-* GNU General Public License for more details.
-*
+
+       Q D S P 5  A U D I O   P O S T   P R O C E S S I N G   M S G
+
+ GENERAL DESCRIPTION
+   Messages sent by AUDPPTASK to ARM
+
+ REFERENCES
+   None
+
+ EXTERNALIZED FUNCTIONS
+   None
+
+ Copyright (c) 1992-2009, 2012-2013 The Linux Foundation. All rights reserved.
+
+ This software is licensed under the terms of the GNU General Public
+ License version 2, as published by the Free Software Foundation, and
+ may be copied, distributed, and modified under those terms.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ GNU General Public License for more details.
+
 *====*====*====*====*====*====*====*====*====*====*====*====*====*====*====*/
 /*===========================================================================
 
@@ -319,4 +319,18 @@
 #define ADSP_MESSAGE_ID 0xFFFF
 
 #define AUDPP_MSG_FEAT_QUERY_DM_DONE 0x000b
+
+/*
+ * ADSP sends this message when a PP feature is disabled
+ * due to ADSP resource limitation.
+ */
+#define AUDPP_MSG_PP_DISABLE_FEEDBACK 0x000C
+
+/*
+ * This message is sent by ADSP if any PP features is disabled
+ * due to video and audio concurrency due to MIPS limitation and
+ * the video session is ended in ADSP.
+ */
+#define AUDPP_MSG_PP_FEATS_RE_ENABLE 0x000D
+
 #endif /* QDSP5AUDPPMSG_H */
diff --git a/arch/arm/mach-msm/qdsp5/audio_acdb.c b/arch/arm/mach-msm/qdsp5/audio_acdb.c
index 681b41c..7819395 100644
--- a/arch/arm/mach-msm/qdsp5/audio_acdb.c
+++ b/arch/arm/mach-msm/qdsp5/audio_acdb.c
@@ -97,11 +97,13 @@
 	u32 device_cb_compl;
 	u32 audpp_cb_compl;
 	u32 preproc_cb_compl;
+	u32 audpp_cb_reenable_compl;
 	u8 preproc_stream_id;
 	u8 audrec_applied;
 	u32 multiple_sessions;
 	u32 cur_tx_session;
 	struct acdb_result acdb_result;
+	uint32_t audpp_disabled_features;
 
 	spinlock_t dsp_lock;
 	int dec_id;
@@ -1550,6 +1552,40 @@
 	return result;
 }
 
+static s32 acdb_re_enable_audpp(void)
+{
+	s32	result = 0;
+
+	if ((acdb_data.audpp_disabled_features &
+			(1 << AUDPP_CMD_IIR_TUNING_FILTER))
+			== (1 << AUDPP_CMD_IIR_TUNING_FILTER)) {
+		result = audpp_dsp_set_rx_iir(COMMON_OBJ_ID,
+				acdb_data.pp_iir->active_flag,
+				acdb_data.pp_iir);
+		if (result) {
+			MM_ERR("ACDB=> Failed to send IIR data to postproc\n");
+			result = -EINVAL;
+		} else {
+			MM_DBG("Re-enable IIR parameters");
+		}
+	}
+	if ((acdb_data.audpp_disabled_features & (1 << AUDPP_CMD_MBADRC))
+			== (1 << AUDPP_CMD_MBADRC)) {
+		result = audpp_dsp_set_mbadrc(COMMON_OBJ_ID,
+				acdb_data.pp_mbadrc->enable,
+				acdb_data.pp_mbadrc);
+		if (result) {
+			MM_ERR("ACDB=> Failed to send MBADRC data to"\
+					" postproc\n");
+			result = -EINVAL;
+		} else {
+			MM_DBG("Re-enable MBADRC parameters");
+		}
+	}
+	acdb_data.audpp_disabled_features = 0;
+	return result;
+}
+
 static struct acdb_agc_block *get_audpreproc_agc_block(void)
 {
 	struct header *prs_hdr;
@@ -2311,6 +2347,22 @@
 static void audpp_cb(void *private, u32 id, u16 *msg)
 {
 	MM_DBG("\n");
+
+	if (id == AUDPP_MSG_PP_DISABLE_FEEDBACK) {
+		acdb_data.audpp_disabled_features |=
+			((uint32_t)(msg[AUDPP_DISABLE_FEATS_MSW] << 16) |
+			 msg[AUDPP_DISABLE_FEATS_LSW]);
+		MM_INFO("AUDPP disable feedback: %x",
+				acdb_data.audpp_disabled_features);
+		goto done;
+	} else if (id == AUDPP_MSG_PP_FEATS_RE_ENABLE) {
+		MM_INFO("AUDPP re-enable messaage: %x",
+				acdb_data.audpp_disabled_features);
+		acdb_data.audpp_cb_reenable_compl = 1;
+		wake_up(&acdb_data.wait);
+		return;
+	}
+
 	if (id != AUDPP_MSG_CFG_MSG)
 		goto done;
 
@@ -2504,6 +2556,7 @@
 		wait_event_interruptible(acdb_data.wait,
 					(acdb_data.device_cb_compl
 					| acdb_data.audpp_cb_compl
+					| acdb_data.audpp_cb_reenable_compl
 					| acdb_data.preproc_cb_compl));
 		mutex_lock(&acdb_data.acdb_mutex);
 		if (acdb_data.device_cb_compl) {
@@ -2534,6 +2587,11 @@
 			if (acdb_data.device_info->dev_type.tx_device)
 				handle_tx_device_ready_callback();
 			else {
+				if (acdb_data.audpp_cb_reenable_compl) {
+					MM_INFO("Reset disabled feature flag");
+					acdb_data.audpp_disabled_features = 0;
+					acdb_data.audpp_cb_reenable_compl = 0;
+				}
 				acdb_cache_rx.node_status =\
 						ACDB_VALUES_FILLED;
 				if (acdb_data.acdb_state &
@@ -2546,6 +2604,7 @@
 		}
 
 		if (!(acdb_data.audpp_cb_compl ||
+				acdb_data.audpp_cb_reenable_compl ||
 				acdb_data.preproc_cb_compl)) {
 			MM_DBG("need to wait for either AUDPP / AUDPREPROC "\
 					"Event\n");
@@ -2554,10 +2613,21 @@
 		} else {
 			MM_DBG("got audpp / preproc call back\n");
 			if (acdb_data.audpp_cb_compl) {
+				if (acdb_data.audpp_cb_reenable_compl) {
+					MM_INFO("Reset disabled feature flag");
+					acdb_data.audpp_disabled_features = 0;
+					acdb_data.audpp_cb_reenable_compl = 0;
+				}
 				send_acdb_values_for_active_devices();
 				acdb_data.audpp_cb_compl = 0;
 				mutex_unlock(&acdb_data.acdb_mutex);
 				continue;
+			} else if (acdb_data.audpp_cb_reenable_compl) {
+				acdb_re_enable_audpp();
+				acdb_data.audpp_disabled_features = 0;
+				acdb_data.audpp_cb_reenable_compl = 0;
+				mutex_unlock(&acdb_data.acdb_mutex);
+				continue;
 			} else {
 				result = handle_audpreproc_cb();
 				if (result < 0) {
diff --git a/arch/arm/mach-msm/qdsp5/audio_out.c b/arch/arm/mach-msm/qdsp5/audio_out.c
index ceb73f0..07f9f4c 100644
--- a/arch/arm/mach-msm/qdsp5/audio_out.c
+++ b/arch/arm/mach-msm/qdsp5/audio_out.c
@@ -189,6 +189,8 @@
 	int srs_needs_commit;
 	int srs_feature_mask;
 	audpp_cmd_cfg_object_params_qconcert qconcert_plus;
+	int srs_current_feature_mask;
+	uint32_t audpp_disabled_features;
 
 	int status;
 	int opened;
@@ -298,12 +300,75 @@
 		return;
 	}
 
-	if (AUDPP_MSG_CFG_MSG == id && msg[0] == AUDPP_MSG_ENA_DIS)
+	if (AUDPP_MSG_CFG_MSG == id && msg[0] == AUDPP_MSG_ENA_DIS) {
+		audio_copp->audpp_disabled_features = 0;
 		return;
+	}
+	if (AUDPP_MSG_CFG_MSG == id && msg[0] == AUDPP_MSG_ENA_ENA)
+		audio_copp->audpp_disabled_features = 0;
 
 	if (!audio_copp->status)
 		return;
 
+	if (id == AUDPP_MSG_PP_DISABLE_FEEDBACK) {
+		audio_copp->audpp_disabled_features |=
+			((uint32_t)(msg[AUDPP_DISABLE_FEATS_MSW] << 16) |
+			 msg[AUDPP_DISABLE_FEATS_LSW]);
+		MM_DBG("AUDPP disable feedback: %x",
+				audio_copp->audpp_disabled_features);
+		return;
+	} else if (id == AUDPP_MSG_PP_FEATS_RE_ENABLE) {
+		MM_DBG("AUDPP re-enable messaage: %x, acdb_enabled %d",
+			audio_copp->audpp_disabled_features, is_acdb_enabled());
+		if (!is_acdb_enabled()) {
+			if ((audio_copp->audpp_disabled_features &
+				(1 << AUDPP_CMD_MBADRC)) ==
+				(1 << AUDPP_CMD_MBADRC)) {
+				audpp_dsp_set_mbadrc(COMMON_OBJ_ID,
+						audio_copp->mbadrc_enable,
+						&audio_copp->mbadrc);
+			}
+			if ((audio_copp->audpp_disabled_features &
+				(1 << AUDPP_CMD_EQUALIZER)) ==
+				(1 << AUDPP_CMD_EQUALIZER)) {
+				audpp_dsp_set_eq(COMMON_OBJ_ID,
+						audio_copp->eq_enable,
+						&audio_copp->eq);
+			}
+			if ((audio_copp->audpp_disabled_features &
+				(1 << AUDPP_CMD_IIR_TUNING_FILTER)) ==
+				(1 << AUDPP_CMD_IIR_TUNING_FILTER)) {
+				audpp_dsp_set_rx_iir(COMMON_OBJ_ID,
+						audio_copp->rx_iir_enable,
+						&audio_copp->iir);
+			}
+			if ((audio_copp->audpp_disabled_features &
+				(1 << AUDPP_CMD_QCONCERT)) ==
+					(1 << AUDPP_CMD_QCONCERT)) {
+				audpp_dsp_set_qconcert_plus(COMMON_OBJ_ID,
+					audio_copp->qconcert_plus_enable,
+					&audio_copp->qconcert_plus);
+			}
+		}
+		if ((audio_copp->audpp_disabled_features & (1 << AUDPP_CMD_SRS))
+			== (1 << AUDPP_CMD_SRS)) {
+			if (audio_copp->srs_current_feature_mask & SRS_MASK_W)
+				audpp_dsp_set_rx_srs_trumedia_w(&audio_copp->w);
+			if (audio_copp->srs_current_feature_mask & SRS_MASK_C)
+				audpp_dsp_set_rx_srs_trumedia_c(&audio_copp->c);
+			if (audio_copp->srs_current_feature_mask & SRS_MASK_HP)
+				audpp_dsp_set_rx_srs_trumedia_h(&audio_copp->h);
+			if (audio_copp->srs_current_feature_mask & SRS_MASK_P)
+				audpp_dsp_set_rx_srs_trumedia_p(&audio_copp->p);
+			if (audio_copp->srs_current_feature_mask & SRS_MASK_HL)
+				audpp_dsp_set_rx_srs_trumedia_l(&audio_copp->l);
+			if (audio_copp->srs_current_feature_mask & SRS_MASK_G)
+				audpp_dsp_set_rx_srs_trumedia_g(&audio_copp->g);
+		}
+		audio_copp->audpp_disabled_features = 0;
+		return;
+	}
+
 	if (!is_acdb_enabled()) {
 		audpp_dsp_set_mbadrc(COMMON_OBJ_ID, audio_copp->mbadrc_enable,
 						&audio_copp->mbadrc);
@@ -513,6 +578,8 @@
 		if (audio_copp->srs_feature_mask & SRS_MASK_G)
 			audpp_dsp_set_rx_srs_trumedia_g(&audio_copp->g);
 
+		audio_copp->srs_current_feature_mask =
+			audio_copp->srs_feature_mask;
 		audio_copp->srs_needs_commit = 0;
 		audio_copp->srs_feature_mask = 0;
 	}
diff --git a/arch/arm/mach-msm/qdsp5/audpp.c b/arch/arm/mach-msm/qdsp5/audpp.c
index 2a83238..bcc00a4 100644
--- a/arch/arm/mach-msm/qdsp5/audpp.c
+++ b/arch/arm/mach-msm/qdsp5/audpp.c
@@ -211,9 +211,13 @@
 			    uint16_t *msg)
 {
 	unsigned n;
-	for (n = 0; n < AUDPP_CLNT_MAX_COUNT; n++) {
-		if (audpp->func[n])
-			audpp->func[n] (audpp->private[n], id, msg);
+
+	if ((id != AUDPP_MSG_PP_DISABLE_FEEDBACK) &&
+		(id != AUDPP_MSG_PP_FEATS_RE_ENABLE)) {
+		for (n = 0; n < AUDPP_CLNT_MAX_COUNT; n++) {
+			if (audpp->func[n])
+				audpp->func[n] (audpp->private[n], id, msg);
+		}
 	}
 
 	for (n = 0; n < MAX_EVENT_CALLBACK_CLIENTS; ++n)
@@ -337,6 +341,14 @@
 			msg[1], msg[2]);
 		acdb_rtc_set_err(msg[2]);
 		break;
+	case AUDPP_MSG_PP_DISABLE_FEEDBACK:
+		MM_DBG("PP Disable feedback due to mips limitation");
+		audpp_broadcast(audpp, id, msg);
+		break;
+	case AUDPP_MSG_PP_FEATS_RE_ENABLE:
+		MM_DBG("Re-enable the disabled PP features");
+		audpp_broadcast(audpp, id, msg);
+		break;
 	default:
 		MM_ERR("unhandled msg id %x\n", id);
 	}
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 78efb03..81409b0 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -31,6 +31,7 @@
 #include <linux/swap.h>
 #include <linux/mm_types.h>
 #include <linux/dma-contiguous.h>
+#include <trace/events/kmem.h>
 
 #ifndef SZ_1M
 #define SZ_1M (1 << 20)
@@ -316,6 +317,7 @@
 	unsigned long mask, pfn, pageno, start = 0;
 	struct cma *cma = dev_get_cma_area(dev);
 	int ret;
+	int tries = 0;
 
 	if (!cma || !cma->count)
 		return NULL;
@@ -349,6 +351,9 @@
 		} else if (ret != -EBUSY) {
 			goto error;
 		}
+		tries++;
+		trace_dma_alloc_contiguous_retry(tries);
+
 		pr_debug("%s(): memory range at %p is busy, retrying\n",
 			 __func__, pfn_to_page(pfn));
 		/* try again with a bit different memory target */
diff --git a/drivers/gpio/pm8xxx-gpio.c b/drivers/gpio/pm8xxx-gpio.c
index 92f697f..a972714 100644
--- a/drivers/gpio/pm8xxx-gpio.c
+++ b/drivers/gpio/pm8xxx-gpio.c
@@ -1,7 +1,4 @@
-/*
- * Qualcomm PMIC8XXX GPIO driver
- *
- * Copyright (c) 2011-2013, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index b3df752..7ef8c15 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -2,7 +2,7 @@
  * drivers/gpu/ion/ion.c
  *
  * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -34,6 +34,8 @@
 #include <linux/debugfs.h>
 #include <linux/dma-buf.h>
 #include <linux/msm_ion.h>
+#include <trace/events/kmem.h>
+
 
 #include <mach/iommu_domains.h>
 #include "ion_priv.h"
@@ -440,9 +442,16 @@
 		if (secure_allocation &&
 			(heap->type != (enum ion_heap_type) ION_HEAP_TYPE_CP))
 			continue;
+		trace_ion_alloc_buffer_start(client->name, heap->name, len,
+					     heap_mask, flags);
 		buffer = ion_buffer_create(heap, dev, len, align, flags);
+		trace_ion_alloc_buffer_end(client->name, heap->name, len,
+					   heap_mask, flags);
 		if (!IS_ERR_OR_NULL(buffer))
 			break;
+
+		trace_ion_alloc_buffer_fallback(client->name, heap->name, len,
+					    heap_mask, flags, PTR_ERR(buffer));
 		if (dbg_str_idx < MAX_DBG_STR_LEN) {
 			unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1;
 			int ret_value = snprintf(&dbg_str[dbg_str_idx],
@@ -461,10 +470,15 @@
 	}
 	mutex_unlock(&dev->lock);
 
-	if (buffer == NULL)
+	if (buffer == NULL) {
+		trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
+					    heap_mask, flags, -ENODEV);
 		return ERR_PTR(-ENODEV);
+	}
 
 	if (IS_ERR(buffer)) {
+		trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
+					    heap_mask, flags, PTR_ERR(buffer));
 		pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
 			 "0x%x) from heap(s) %sfor client %s with heap "
 			 "mask 0x%x\n",
diff --git a/drivers/gpu/ion/ion_cp_heap.c b/drivers/gpu/ion/ion_cp_heap.c
index 16b07e8..4649606 100644
--- a/drivers/gpu/ion/ion_cp_heap.c
+++ b/drivers/gpu/ion/ion_cp_heap.c
@@ -29,6 +29,7 @@
 #include <linux/fmem.h>
 #include <linux/iommu.h>
 #include <linux/dma-mapping.h>
+#include <trace/events/kmem.h>
 
 #include <asm/mach/map.h>
 
@@ -162,8 +163,10 @@
 						&(cp_heap->handle),
 						0,
 						&attrs);
-		if (!cp_heap->cpu_addr)
+		if (!cp_heap->cpu_addr) {
+			trace_ion_cp_alloc_retry(tries);
 			msleep(20);
+		}
 	}
 
 	if (!cp_heap->cpu_addr)
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
index cb8fbed..d43f19f 100644
--- a/drivers/gpu/ion/ion_iommu_heap.c
+++ b/drivers/gpu/ion/ion_iommu_heap.c
@@ -82,8 +82,10 @@
 		for_each_sg(table->sgl, sg, table->nents, i) {
 			data->pages[i] = alloc_page(
 				GFP_KERNEL | __GFP_HIGHMEM);
-			if (!data->pages[i])
+			if (!data->pages[i]) {
+				ret = -ENOMEM;
 				goto err3;
+			}
 
 			sg_set_page(sg, data->pages[i], PAGE_SIZE, 0);
 			sg_dma_address(sg) = sg_phys(sg);
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index e2bab8a..31d15aa 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -2720,10 +2720,13 @@
 		return -EINVAL;
 
 	list_for_each_entry_safe(feed, tmp, &filter->feed.ts, next) {
-		if ((feed->pid == pid) && (feed->ts != NULL)) {
-			feed->ts->stop_filtering(feed->ts);
-			filter->dev->demux->release_ts_feed(filter->dev->demux,
-							    feed->ts);
+		if (feed->pid == pid) {
+			if (feed->ts != NULL) {
+				feed->ts->stop_filtering(feed->ts);
+				filter->dev->demux->release_ts_feed(
+							filter->dev->demux,
+							feed->ts);
+			}
 			list_del(&feed->next);
 			kfree(feed);
 		}
diff --git a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.c b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.c
index 2ca48b9..c687930 100644
--- a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.c
+++ b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_common.c
@@ -1975,7 +1975,15 @@
 }
 EXPORT_SYMBOL(mpq_dmx_decoder_fullness_init);
 
-
+/**
+ * Returns whether the free space of decoder's output
+ * buffer is larger than specific number of bytes.
+ *
+ * @sbuff: MPQ stream buffer used for decoder data.
+ * @required_space: number of required free bytes in the buffer
+ *
+ * Return 1 if required free bytes are available, 0 otherwise.
+ */
 static inline int mpq_dmx_check_decoder_fullness(
 	struct mpq_streambuffer *sbuff,
 	size_t required_space)
@@ -1995,16 +2003,30 @@
 		return (free >= required_space);
 }
 
-int mpq_dmx_decoder_fullness_wait(
+/**
+ * Checks whether decoder's output buffer has free space
+ * for specific number of bytes, if not, the function waits
+ * until the amount of free-space is available.
+ *
+ * @feed: decoder's feed object
+ * @required_space: number of required free bytes in the buffer
+ * @lock_feed: indicates whether mutex should be held before
+ * accessing the feed information. If the caller of this function
+ * already holds a mutex then this should be set to 0 and 1 otherwise.
+ *
+ * Return 0 if required space is available and error code
+ * in case waiting on buffer fullness was aborted.
+ */
+static int mpq_dmx_decoder_fullness_check(
 		struct dvb_demux_feed *feed,
-		size_t required_space)
+		size_t required_space,
+		int lock_feed)
 {
 	struct mpq_demux *mpq_demux = feed->demux->priv;
 	struct mpq_streambuffer *sbuff = NULL;
 	struct mpq_video_feed_info *feed_data;
 	struct mpq_feed *mpq_feed;
 	int ret = 0;
-	int was_locked;
 
 	if (!mpq_dmx_is_video_feed(feed)) {
 		MPQ_DVB_DBG_PRINT("%s: Invalid feed type %d\n",
@@ -2013,11 +2035,13 @@
 		return -EINVAL;
 	}
 
-	if (mutex_is_locked(&mpq_demux->mutex)) {
-		was_locked = 1;
-	} else {
+	if (lock_feed) {
 		mutex_lock(&mpq_demux->mutex);
-		was_locked = 0;
+	} else if (!mutex_is_locked(&mpq_demux->mutex)) {
+		MPQ_DVB_ERR_PRINT(
+				"%s: Mutex should have been locked\n",
+				__func__);
+		return -EINVAL;
 	}
 
 	mpq_feed = feed->priv;
@@ -2025,7 +2049,7 @@
 
 	sbuff = feed_data->video_buffer;
 	if (sbuff == NULL) {
-		if (!was_locked)
+		if (lock_feed)
 			mutex_unlock(&mpq_demux->mutex);
 		MPQ_DVB_ERR_PRINT("%s: mpq_streambuffer object is NULL\n",
 			__func__);
@@ -2060,22 +2084,29 @@
 	}
 
 	if (ret < 0) {
-		if (!was_locked)
+		if (lock_feed)
 			mutex_unlock(&mpq_demux->mutex);
 		return ret;
 	}
 
 	if ((feed_data->fullness_wait_cancel) ||
 		(feed_data->video_buffer == NULL)) {
-		if (!was_locked)
+		if (lock_feed)
 			mutex_unlock(&mpq_demux->mutex);
 		return -EINVAL;
 	}
 
-	if (!was_locked)
+	if (lock_feed)
 		mutex_unlock(&mpq_demux->mutex);
 	return 0;
 }
+
+int mpq_dmx_decoder_fullness_wait(
+		struct dvb_demux_feed *feed,
+		size_t required_space)
+{
+	return mpq_dmx_decoder_fullness_check(feed, required_space, 1);
+}
 EXPORT_SYMBOL(mpq_dmx_decoder_fullness_wait);
 
 int mpq_dmx_decoder_fullness_abort(struct dvb_demux_feed *feed)
@@ -3695,6 +3726,13 @@
 	u8 tmp;
 	int i;
 
+	if (!mutex_is_locked(&mpq_feed->mpq_demux->mutex)) {
+		MPQ_DVB_ERR_PRINT(
+				"%s: Mutex should have been locked\n",
+				__func__);
+		return -EINVAL;
+	}
+
 	for (i = 0; i < DVB_DEMUX_MASK_MAX; i++) {
 		tmp = DVB_RINGBUFFER_PEEK(&mpq_feed->sdmx_buf, i);
 		xor = f->filter.filter_value[i] ^ tmp;
@@ -3709,20 +3747,12 @@
 		return 0;
 
 	if (feed->demux->playback_mode == DMX_PB_MODE_PULL) {
-		int was_locked;
-
-		if (mutex_is_locked(&mpq_feed->mpq_demux->mutex)) {
-			mutex_unlock(&mpq_feed->mpq_demux->mutex);
-			was_locked = 1;
-		} else {
-			was_locked = 0;
-		}
+		mutex_unlock(&mpq_feed->mpq_demux->mutex);
 
 		ret = feed->demux->buffer_ctrl.sec(&f->filter,
 					header->payload_length);
 
-		if (was_locked)
-			mutex_lock(&mpq_feed->mpq_demux->mutex);
+		mutex_lock(&mpq_feed->mpq_demux->mutex);
 
 		if (ret) {
 			MPQ_DVB_DBG_PRINT(
@@ -3757,7 +3787,13 @@
 {
 	struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
 	int ret;
-	int was_locked;
+
+	if (!mutex_is_locked(&mpq_feed->mpq_demux->mutex)) {
+		MPQ_DVB_ERR_PRINT(
+				"%s: Mutex should have been locked\n",
+				__func__);
+		return -EINVAL;
+	}
 
 	/*
 	 * For PULL mode need to verify there is enough space for the dmxdev
@@ -3769,19 +3805,13 @@
 		MPQ_DVB_DBG_PRINT("%s: Stalling for events and %d bytes\n",
 			__func__, req);
 
-		if (mutex_is_locked(&mpq_demux->mutex)) {
-			mutex_unlock(&mpq_demux->mutex);
-			was_locked = 1;
-		} else {
-			was_locked = 0;
-		}
+		mutex_unlock(&mpq_demux->mutex);
 
 		ret = mpq_demux->demux.buffer_ctrl.ts(&feed->feed.ts, req);
 		MPQ_DVB_DBG_PRINT("%s: stall result = %d\n",
 			__func__, ret);
 
-		if (was_locked)
-			mutex_lock(&mpq_demux->mutex);
+		mutex_lock(&mpq_demux->mutex);
 
 		return ret;
 	}
@@ -3971,12 +4001,12 @@
 		(sts->error_indicators & SDMX_FILTER_ERR_D_LIN_BUFS_FULL)) {
 		MPQ_DVB_DBG_PRINT("%s: Decoder stall...\n", __func__);
 
-		ret = mpq_dmx_decoder_fullness_wait(
-			mpq_feed->dvb_demux_feed, 0);
+		ret = mpq_dmx_decoder_fullness_check(
+			mpq_feed->dvb_demux_feed, 0, 0);
 		if (ret) {
 			/* we reach here if demuxing was aborted */
 			MPQ_DVB_DBG_PRINT(
-				"%s: mpq_dmx_decoder_fullness_wait aborted\n",
+				"%s: mpq_dmx_decoder_fullness_check aborted\n",
 				__func__);
 			return;
 		}
diff --git a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c
index 7442094..49f87ba 100644
--- a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c
+++ b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c
@@ -20,10 +20,20 @@
 
 #define TSIF_COUNT			2
 
-#define TSPP_MAX_PID_FILTER_NUM		16
+/* Max number of PID filters */
+#define TSPP_MAX_PID_FILTER_NUM		128
+
+/* Max number of user-defined HW PID filters */
+#define TSPP_MAX_HW_PID_FILTER_NUM	15
+
+/* HW index  of the last entry in the TSPP HW filter table */
+#define TSPP_LAST_HW_FILTER_INDEX	15
+
+/* Number of filters required to accept all packets except NULL packets */
+#define TSPP_BLOCK_NULLS_FILTERS_NUM	13
 
 /* Max number of section filters */
-#define TSPP_MAX_SECTION_FILTER_NUM	64
+#define TSPP_MAX_SECTION_FILTER_NUM	128
 
 /* For each TSIF we use a single pipe holding the data after PID filtering */
 #define TSPP_CHANNEL			0
@@ -38,6 +48,9 @@
 /* dvb-demux defines pid 0x2000 as full capture pid */
 #define TSPP_PASS_THROUGH_PID		0x2000
 
+/* NULL packets pid */
+#define TSPP_NULL_PACKETS_PID		0x1FFF
+
 #define TSPP_RAW_TTS_SIZE		192
 #define TSPP_RAW_SIZE			188
 
@@ -111,9 +124,10 @@
 		/* TSPP data buffer heap physical base address */
 		ion_phys_addr_t ch_mem_heap_phys_base;
 
-		/* buffer allocation index */
+		/* Buffer allocation index */
 		int buff_index;
 
+		/* Number of buffers */
 		u32 buffer_count;
 
 		/*
@@ -124,15 +138,43 @@
 		int *aggregate_ids;
 
 		/*
-		 * Holds PIDs of allocated TSPP filters along with
-		 * how many feeds are opened on same PID.
+		 * Holds PIDs of allocated filters along with
+		 * how many feeds are opened on the same PID. For
+		 * TSPP HW filters, holds also the filter table index.
+		 * When pid == -1, the entry is free.
 		 */
 		struct {
 			int pid;
 			int ref_count;
+			int hw_index;
 		} filters[TSPP_MAX_PID_FILTER_NUM];
 
-		/* thread processing TS packets from TSPP */
+		/* Indicates available/allocated filter table indexes */
+		int hw_indexes[TSPP_MAX_HW_PID_FILTER_NUM];
+
+		/* Number of currently allocated PID filters */
+		u16 current_filter_count;
+
+		/*
+		 * Flag to indicate whether the user added a filter to accept
+		 * NULL packets (PID = 0x1FFF)
+		 */
+		int pass_nulls_flag;
+
+		/*
+		 * Flag to indicate whether the user added a filter to accept
+		 * all packets (PID = 0x2000)
+		 */
+		int pass_all_flag;
+
+		/*
+		 * Flag to indicate whether the filter that accepts
+		 * all packets has already been added and is
+		 * currently enabled
+		 */
+		int accept_all_filter_exists_flag;
+
+		/* Thread processing TS packets from TSPP */
 		struct task_struct *thread;
 		wait_queue_head_t wait_queue;
 
@@ -142,7 +184,7 @@
 		/* Pointer to the demux connected to this TSIF */
 		struct mpq_demux *mpq_demux;
 
-		/* mutex protecting the data-structure */
+		/* Mutex protecting the data-structure */
 		struct mutex mutex;
 	} tsif[TSIF_COUNT];
 
@@ -192,20 +234,54 @@
 }
 
 /**
- * Returns a free filter slot that can be used.
+ * Returns a free HW filter index that can be used.
  *
  * @tsif: The TSIF to allocate filter from
- * @channel_id: The channel allocating filter to
  *
- * Return  filter index or -1 if no filters available
+ * Return  HW filter index or -ENOMEM if no filters available
  */
-static int mpq_tspp_get_free_filter_slot(int tsif, int channel_id)
+static int mpq_tspp_allocate_hw_filter_index(int tsif)
 {
 	int i;
 
-	for (i = 0; i < TSPP_MAX_PID_FILTER_NUM; i++)
-		if (mpq_dmx_tspp_info.tsif[tsif].filters[i].pid == -1)
+	for (i = 0; i < TSPP_MAX_HW_PID_FILTER_NUM; i++) {
+		if (mpq_dmx_tspp_info.tsif[tsif].hw_indexes[i] == 0) {
+			mpq_dmx_tspp_info.tsif[tsif].hw_indexes[i] = 1;
 			return i;
+		}
+	}
+
+	return -ENOMEM;
+}
+
+/**
+ * Releases a HW filter index for future reuse.
+ *
+ * @tsif: The TSIF from which the filter should be released
+ * @hw_index: The HW index to release
+ *
+ */
+static inline void mpq_tspp_release_hw_filter_index(int tsif, int hw_index)
+{
+	if ((hw_index >= 0) && (hw_index < TSPP_MAX_HW_PID_FILTER_NUM))
+		mpq_dmx_tspp_info.tsif[tsif].hw_indexes[hw_index] = 0;
+}
+
+
+/**
+ * Returns a free filter slot that can be used.
+ *
+ * @tsif: The TSIF to allocate filter from
+ *
+ * Return  filter index or -ENOMEM if no filters available
+ */
+static int mpq_tspp_get_free_filter_slot(int tsif)
+{
+	int slot;
+
+	for (slot = 0; slot < TSPP_MAX_PID_FILTER_NUM; slot++)
+		if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid == -1)
+			return slot;
 
 	return -ENOMEM;
 }
@@ -220,11 +296,11 @@
  */
 static int mpq_tspp_get_filter_slot(int tsif, int pid)
 {
-	int i;
+	int slot;
 
-	for (i = 0; i < TSPP_MAX_PID_FILTER_NUM; i++)
-		if (mpq_dmx_tspp_info.tsif[tsif].filters[i].pid == pid)
-			return i;
+	for (slot = 0; slot < TSPP_MAX_PID_FILTER_NUM; slot++)
+		if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid == pid)
+			return slot;
 
 	return -EINVAL;
 }
@@ -477,6 +553,328 @@
 }
 
 /**
+ * Add a filter to accept all packets as the last entry
+ * of the TSPP HW filter table.
+ *
+ * @channel_id: Channel ID number.
+ * @source: TSPP source.
+ *
+ * Return  error status
+ */
+static int mpq_tspp_add_accept_all_filter(int channel_id,
+				enum tspp_source source)
+{
+	struct tspp_filter tspp_filter;
+	int tsif = TSPP_GET_TSIF_NUM(channel_id);
+	int ret;
+
+	MPQ_DVB_DBG_PRINT("%s: executed, channel id = %d, source = %d\n",
+		__func__, channel_id, source);
+
+	if (mpq_dmx_tspp_info.tsif[tsif].accept_all_filter_exists_flag) {
+		MPQ_DVB_DBG_PRINT("%s: accept all filter already exists\n",
+				__func__);
+		return 0;
+	}
+
+	/* This filter will be the last entry in the table */
+	tspp_filter.priority = TSPP_LAST_HW_FILTER_INDEX;
+	/* Pass all pids - set mask to 0 */
+	tspp_filter.pid = 0;
+	tspp_filter.mask = 0;
+	/*
+	 * Include TTS in RAW packets, if you change this to
+	 * TSPP_MODE_RAW_NO_SUFFIX you must also change TSPP_RAW_TTS_SIZE
+	 * accordingly.
+	 */
+	tspp_filter.mode = TSPP_MODE_RAW;
+	tspp_filter.source = source;
+	tspp_filter.decrypt = 0;
+
+	ret = tspp_add_filter(0, channel_id, &tspp_filter);
+	if (!ret) {
+		mpq_dmx_tspp_info.tsif[tsif].accept_all_filter_exists_flag = 1;
+		MPQ_DVB_DBG_PRINT(
+				"%s: accept all filter added successfully\n",
+				__func__);
+	}
+
+	return ret;
+}
+
+/**
+ * Remove the filter that accepts all packets from the last entry
+ * of the TSPP HW filter table.
+ *
+ * @channel_id: Channel ID number.
+ * @source: TSPP source.
+ *
+ * Return  error status
+ */
+static int mpq_tspp_remove_accept_all_filter(int channel_id,
+				enum tspp_source source)
+{
+	struct tspp_filter tspp_filter;
+	int tsif = TSPP_GET_TSIF_NUM(channel_id);
+	int ret;
+
+	MPQ_DVB_DBG_PRINT("%s: executed, channel id = %d, source = %d\n",
+		__func__, channel_id, source);
+
+	if (mpq_dmx_tspp_info.tsif[tsif].accept_all_filter_exists_flag == 0) {
+		MPQ_DVB_DBG_PRINT("%s: accept all filter doesn't exist\n",
+				__func__);
+		return 0;
+	}
+
+	tspp_filter.priority = TSPP_LAST_HW_FILTER_INDEX;
+
+	ret = tspp_remove_filter(0, channel_id, &tspp_filter);
+	if (!ret) {
+		mpq_dmx_tspp_info.tsif[tsif].accept_all_filter_exists_flag = 0;
+		MPQ_DVB_DBG_PRINT(
+			"%s: accept all filter removed successfully\n",
+			__func__);
+	}
+
+	return ret;
+}
+
+/**
+ * Add filters designed to accept all packets except NULL packets, i.e.
+ * packets with PID = 0x1FFF.
+ * This function is called after user-defined filters were removed,
+ * so it assumes that the first 13 HW filters in the TSPP filter
+ * table are free for use.
+ *
+ * @channel_id: Channel ID number.
+ * @source: TSPP source.
+ *
+ * Return  0 on success, -1 otherwise
+ */
+static int mpq_tspp_add_null_blocking_filters(int channel_id,
+				enum tspp_source source)
+{
+	struct tspp_filter tspp_filter;
+	int ret = 0;
+	int i, j;
+	u16 full_pid_mask = 0x1FFF;
+	u8 mask_shift;
+	u8 pid_shift;
+	int tsif = TSPP_GET_TSIF_NUM(channel_id);
+
+	MPQ_DVB_DBG_PRINT("%s: executed, channel id = %d, source = %d\n",
+		__func__, channel_id, source);
+
+	/*
+	 * Add a total of 13 filters that will accept packets with
+	 * every PID other than 0x1FFF, which is the NULL PID.
+	 *
+	 * Filter 0: accept all PIDs with bit 12 clear, i.e.
+	 * PID = 0x0000 .. 0x0FFF (4096 PIDs in total):
+	 * Mask = 0x1000, PID = 0x0000.
+	 *
+	 * Filter 12: Accept PID 0x1FFE:
+	 * Mask = 0x1FFF, PID = 0x1FFE.
+	 *
+	 * In general: For N = 0 .. 12,
+	 * Filter <N>: accept all PIDs with <N> MSBits set and bit <N-1> clear.
+	 * Filter <N> Mask = N+1 MSBits set, others clear.
+	 * Filter <N> PID = <N> MSBits set, others clear.
+	 */
+
+	/*
+	 * Include TTS in RAW packets, if you change this to
+	 * TSPP_MODE_RAW_NO_SUFFIX you must also change TSPP_RAW_TTS_SIZE
+	 * accordingly.
+	 */
+	tspp_filter.mode = TSPP_MODE_RAW;
+	tspp_filter.source = source;
+	tspp_filter.decrypt = 0;
+
+	for (i = 0; i < TSPP_BLOCK_NULLS_FILTERS_NUM; i++) {
+		tspp_filter.priority = mpq_tspp_allocate_hw_filter_index(tsif);
+		if (tspp_filter.priority != i) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: got unexpected HW index %d, expected %d\n",
+				__func__, tspp_filter.priority, i);
+			ret = -1;
+			break;
+		}
+		mask_shift = (TSPP_BLOCK_NULLS_FILTERS_NUM - 1 - i);
+		pid_shift = (TSPP_BLOCK_NULLS_FILTERS_NUM - i);
+		tspp_filter.mask =
+			((full_pid_mask >> mask_shift) << mask_shift);
+		tspp_filter.pid = ((full_pid_mask >> pid_shift) << pid_shift);
+
+		if (tspp_add_filter(0, channel_id, &tspp_filter)) {
+			ret = -1;
+			break;
+		}
+	}
+
+	if (ret) {
+		/* cleanup on failure */
+		for (j = 0; j < i; j++) {
+			tspp_filter.priority = j;
+			mpq_tspp_release_hw_filter_index(tsif, j);
+			tspp_remove_filter(0, channel_id, &tspp_filter);
+		}
+	} else {
+		MPQ_DVB_DBG_PRINT(
+			"%s: NULL blocking filters added successfully\n",
+			__func__);
+	}
+
+	return ret;
+}
+
+/**
+ * Remove filters designed to accept all packets except NULL packets, i.e.
+ * packets with PID = 0x1FFF.
+ *
+ * @channel_id: Channel ID number.
+ *
+ * @source: TSPP source.
+ *
+ * Return  0 on success, -1 otherwise
+ */
+static int mpq_tspp_remove_null_blocking_filters(int channel_id,
+				enum tspp_source source)
+{
+	struct tspp_filter tspp_filter;
+	int tsif = TSPP_GET_TSIF_NUM(channel_id);
+	int ret = 0;
+	int i;
+
+	MPQ_DVB_DBG_PRINT("%s: executed, channel id = %d, source = %d\n",
+		__func__, channel_id, source);
+
+	for (i = 0; i < TSPP_BLOCK_NULLS_FILTERS_NUM; i++) {
+		tspp_filter.priority = i;
+		if (tspp_remove_filter(0, channel_id, &tspp_filter)) {
+			MPQ_DVB_ERR_PRINT("%s: failed to remove filter %d\n",
+				__func__, i);
+			ret = -1;
+		}
+
+		mpq_tspp_release_hw_filter_index(tsif, i);
+	}
+
+	return ret;
+}
+
+/**
+ * Add all current user-defined filters (up to 15) as HW filters
+ *
+ * @channel_id: Channel ID number.
+ *
+ * @source: TSPP source.
+ *
+ * Return  0 on success, -1 otherwise
+ */
+static int mpq_tspp_add_all_user_filters(int channel_id,
+				enum tspp_source source)
+{
+	struct tspp_filter tspp_filter;
+	int tsif = TSPP_GET_TSIF_NUM(channel_id);
+	int slot;
+	u16 added_count = 0;
+	u16 total_filters_count = 0;
+
+	MPQ_DVB_DBG_PRINT("%s: executed\n", __func__);
+
+	/*
+	 * Include TTS in RAW packets, if you change this to
+	 * TSPP_MODE_RAW_NO_SUFFIX you must also change TSPP_RAW_TTS_SIZE
+	 * accordingly.
+	 */
+	tspp_filter.mode = TSPP_MODE_RAW;
+	tspp_filter.source = source;
+	tspp_filter.decrypt = 0;
+
+	for (slot = 0; slot < TSPP_MAX_PID_FILTER_NUM; slot++) {
+		if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid == -1)
+			continue;
+
+		/*
+		 * count total number of user filters to verify that it is
+		 * exactly TSPP_MAX_HW_PID_FILTER_NUM as expected.
+		 */
+		total_filters_count++;
+
+		if (added_count > TSPP_MAX_HW_PID_FILTER_NUM)
+			continue;
+
+		tspp_filter.priority = mpq_tspp_allocate_hw_filter_index(tsif);
+
+		if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid ==
+				TSPP_PASS_THROUGH_PID) {
+			/* pass all pids */
+			tspp_filter.pid = 0;
+			tspp_filter.mask = 0;
+		} else {
+			tspp_filter.pid =
+				mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid;
+			tspp_filter.mask = TSPP_PID_MASK;
+		}
+
+		MPQ_DVB_DBG_PRINT(
+			"%s: adding HW filter, PID = %d, mask = 0x%X, index = %d\n",
+				__func__, tspp_filter.pid, tspp_filter.mask,
+				tspp_filter.priority);
+
+		if (!tspp_add_filter(0, channel_id, &tspp_filter)) {
+			mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index =
+				tspp_filter.priority;
+			added_count++;
+		} else {
+			MPQ_DVB_ERR_PRINT("%s: tspp_add_filter failed\n",
+						__func__);
+		}
+	}
+
+	if ((added_count != TSPP_MAX_HW_PID_FILTER_NUM) ||
+		(added_count != total_filters_count))
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * Remove all user-defined HW filters
+ *
+ * @channel_id: Channel ID number.
+ *
+ * @source: TSPP source.
+ *
+ * Return  0 on success, -1 otherwise
+ */
+static int mpq_tspp_remove_all_user_filters(int channel_id,
+				enum tspp_source source)
+{
+	struct tspp_filter tspp_filter;
+	int ret = 0;
+	int tsif = TSPP_GET_TSIF_NUM(channel_id);
+	int i;
+
+	MPQ_DVB_DBG_PRINT("%s: executed\n", __func__);
+
+	for (i = 0; i < TSPP_MAX_HW_PID_FILTER_NUM; i++) {
+		tspp_filter.priority = i;
+		MPQ_DVB_DBG_PRINT("%s: Removing HW filter %d\n",
+			__func__, tspp_filter.priority);
+		if (tspp_remove_filter(0, channel_id, &tspp_filter))
+			ret = -1;
+
+		mpq_tspp_release_hw_filter_index(tsif, i);
+		mpq_dmx_tspp_info.tsif[tsif].filters[i].hw_index = -1;
+	}
+
+	return ret;
+}
+
+/**
  * Configure TSPP channel to filter the PID of new feed.
  *
  * @feed: The feed to configure the channel with
@@ -492,16 +890,22 @@
 	struct tspp_select_source tspp_source;
 	struct tspp_filter tspp_filter;
 	int tsif;
-	int ret;
+	int ret = 0;
+	int slot;
 	int channel_id;
 	int *channel_ref_count;
 	u32 buffer_size;
+	int restore_user_filters = 0;
+	int remove_accept_all_filter = 0;
+	int remove_null_blocking_filters = 0;
 
 	tspp_source.clk_inverse = clock_inv;
 	tspp_source.data_inverse = 0;
 	tspp_source.sync_inverse = 0;
 	tspp_source.enable_inverse = 0;
 
+	MPQ_DVB_DBG_PRINT("%s: executed, PID = %d\n", __func__, feed->pid);
+
 	switch (tsif_mode) {
 	case 1:
 		tspp_source.mode = TSPP_TSIF_MODE_1;
@@ -539,10 +943,10 @@
 	 * Can happen if we play and record same PES or PCR
 	 * piggypacked on video packet.
 	 */
-	ret = mpq_tspp_get_filter_slot(tsif, feed->pid);
-	if (ret >= 0) {
+	slot = mpq_tspp_get_filter_slot(tsif, feed->pid);
+	if (slot >= 0) {
 		/* PID already configured */
-		mpq_dmx_tspp_info.tsif[tsif].filters[ret].ref_count++;
+		mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count++;
 		mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
 		return 0;
 	}
@@ -627,64 +1031,196 @@
 	}
 
 	/* add new PID to the existing pipe */
-	ret = mpq_tspp_get_free_filter_slot(tsif, channel_id);
-	if (ret < 0) {
+	slot = mpq_tspp_get_free_filter_slot(tsif);
+	if (slot < 0) {
 		MPQ_DVB_ERR_PRINT(
-			"%s: mpq_allocate_filter_slot(%d, %d) failed\n",
-			__func__,
-			tsif,
-			channel_id);
+			"%s: mpq_tspp_get_free_filter_slot(%d) failed\n",
+			__func__, tsif);
 
 		goto add_channel_unregister_notif;
 	}
 
-	mpq_dmx_tspp_info.tsif[tsif].filters[ret].pid = feed->pid;
-	mpq_dmx_tspp_info.tsif[tsif].filters[ret].ref_count++;
+	if (feed->pid == TSPP_PASS_THROUGH_PID)
+		mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 1;
+	else if (feed->pid == TSPP_NULL_PACKETS_PID)
+		mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 1;
 
-	tspp_filter.priority = ret;
-	if (feed->pid == TSPP_PASS_THROUGH_PID) {
-		/* pass all pids */
-		tspp_filter.pid = 0;
-		tspp_filter.mask = 0;
+	mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = feed->pid;
+	mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count++;
+
+	tspp_filter.priority = -1;
+
+	if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count <
+					TSPP_MAX_HW_PID_FILTER_NUM) {
+		/* HW filtering mode */
+		tspp_filter.priority = mpq_tspp_allocate_hw_filter_index(tsif);
+		if (tspp_filter.priority < 0)
+			goto add_channel_free_filter_slot;
+
+		if (feed->pid == TSPP_PASS_THROUGH_PID) {
+			/* pass all pids */
+			tspp_filter.pid = 0;
+			tspp_filter.mask = 0;
+		} else {
+			tspp_filter.pid = feed->pid;
+			tspp_filter.mask = TSPP_PID_MASK;
+		}
+
+		/*
+		 * Include TTS in RAW packets, if you change this to
+		 * TSPP_MODE_RAW_NO_SUFFIX you must also change
+		 * TSPP_RAW_TTS_SIZE accordingly.
+		 */
+		tspp_filter.mode = TSPP_MODE_RAW;
+		tspp_filter.source = tspp_source.source;
+		tspp_filter.decrypt = 0;
+		ret = tspp_add_filter(0, channel_id, &tspp_filter);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: tspp_add_filter(%d) failed (%d)\n",
+				__func__,
+				channel_id,
+				ret);
+
+			goto add_channel_free_filter_slot;
+		}
+		mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index =
+			tspp_filter.priority;
+
+		MPQ_DVB_DBG_PRINT(
+			"%s: HW filtering mode: added TSPP HW filter, PID = %d, mask = 0x%X, index = %d\n",
+			__func__, tspp_filter.pid, tspp_filter.mask,
+			tspp_filter.priority);
+	} else if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count ==
+					TSPP_MAX_HW_PID_FILTER_NUM) {
+		/* Crossing the threshold - from HW to SW filtering mode */
+
+		/* Add a temporary filter to accept all packets */
+		ret = mpq_tspp_add_accept_all_filter(channel_id,
+					tspp_source.source);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_tspp_add_accept_all_filter(%d, %d) failed\n",
+				__func__, channel_id, tspp_source.source);
+
+			goto add_channel_free_filter_slot;
+		}
+
+		/* Remove all existing user filters */
+		ret = mpq_tspp_remove_all_user_filters(channel_id,
+					tspp_source.source);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_tspp_remove_all_user_filters(%d, %d) failed\n",
+				__func__, channel_id, tspp_source.source);
+
+			restore_user_filters = 1;
+			remove_accept_all_filter = 1;
+
+			goto add_channel_free_filter_slot;
+		}
+
+		/* Add HW filters to block NULL packets */
+		ret = mpq_tspp_add_null_blocking_filters(channel_id,
+					tspp_source.source);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_tspp_add_null_blocking_filters(%d, %d) failed\n",
+				__func__, channel_id, tspp_source.source);
+
+			restore_user_filters = 1;
+			remove_accept_all_filter = 1;
+
+			goto add_channel_free_filter_slot;
+		}
+
+		/* Remove filters that accepts all packets, if necessary */
+		if ((mpq_dmx_tspp_info.tsif[tsif].pass_all_flag == 0) &&
+			(mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag == 0)) {
+
+			ret = mpq_tspp_remove_accept_all_filter(channel_id,
+						tspp_source.source);
+			if (ret < 0) {
+				MPQ_DVB_ERR_PRINT(
+					"%s: mpq_tspp_remove_accept_all_filter(%d, %d) failed\n",
+					__func__, channel_id,
+					tspp_source.source);
+
+				remove_null_blocking_filters = 1;
+				restore_user_filters = 1;
+				remove_accept_all_filter = 1;
+
+				goto add_channel_free_filter_slot;
+			}
+		}
 	} else {
-		tspp_filter.pid = feed->pid;
-		tspp_filter.mask = TSPP_PID_MASK;
-	}
+		/* Already working in SW filtering mode */
+		if (mpq_dmx_tspp_info.tsif[tsif].pass_all_flag ||
+			mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag) {
 
-	/*
-	 * Include TTS in RAW packets, if you change this to
-	 * TSPP_MODE_RAW_NO_SUFFIX you must also change TSPP_RAW_TTS_SIZE
-	 * accordingly.
-	 */
-	tspp_filter.mode = TSPP_MODE_RAW;
-	tspp_filter.source = tspp_source.source;
-	tspp_filter.decrypt = 0;
-	ret = tspp_add_filter(0, channel_id, &tspp_filter);
-	if (ret < 0) {
-		MPQ_DVB_ERR_PRINT(
-			"%s: tspp_add_filter(%d) failed (%d)\n",
-			__func__,
-			channel_id,
-			ret);
+			ret = mpq_tspp_add_accept_all_filter(channel_id,
+						tspp_source.source);
+			if (ret < 0) {
+				MPQ_DVB_ERR_PRINT(
+					"%s: mpq_tspp_add_accept_all_filter(%d, %d) failed\n",
+					__func__, channel_id,
+					tspp_source.source);
 
-		goto add_channel_free_filter_slot;
+				goto add_channel_free_filter_slot;
+			}
+		}
 	}
 
 	(*channel_ref_count)++;
+	mpq_dmx_tspp_info.tsif[tsif].current_filter_count++;
+
+	MPQ_DVB_DBG_PRINT("%s: success, current_filter_count = %d\n",
+		__func__, mpq_dmx_tspp_info.tsif[tsif].current_filter_count);
 
 	mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
 	return 0;
 
 add_channel_free_filter_slot:
-	mpq_dmx_tspp_info.tsif[tsif].filters[tspp_filter.priority].pid = -1;
-	mpq_dmx_tspp_info.tsif[tsif].filters[tspp_filter.priority].ref_count--;
+	/* restore internal database state */
+	mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = -1;
+	mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count--;
+
+	/* release HW index if we allocated one */
+	if (tspp_filter.priority >= 0) {
+		mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index = -1;
+		mpq_tspp_release_hw_filter_index(tsif, tspp_filter.priority);
+	}
+
+	/* restore HW filter table state if necessary */
+	if (remove_null_blocking_filters)
+		mpq_tspp_remove_null_blocking_filters(channel_id,
+						tspp_source.source);
+
+	if (restore_user_filters)
+		mpq_tspp_add_all_user_filters(channel_id, tspp_source.source);
+
+	if (remove_accept_all_filter)
+		mpq_tspp_remove_accept_all_filter(channel_id,
+						tspp_source.source);
+
+	/* restore flags. we can only get here if we changed the flags. */
+	if (feed->pid == TSPP_PASS_THROUGH_PID)
+		mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 0;
+	else if (feed->pid == TSPP_NULL_PACKETS_PID)
+		mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 0;
+
 add_channel_unregister_notif:
-	tspp_unregister_notification(0, channel_id);
+	if (*channel_ref_count == 0) {
+		tspp_unregister_notification(0, channel_id);
+		tspp_close_stream(0, channel_id);
+	}
 add_channel_close_ch:
-	tspp_close_channel(0, channel_id);
+	if (*channel_ref_count == 0)
+		tspp_close_channel(0, channel_id);
 add_channel_failed:
-	if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC)
-		mpq_dmx_channel_mem_free(tsif);
+	if (*channel_ref_count == 0)
+		if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC)
+			mpq_dmx_channel_mem_free(tsif);
 
 	mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
 	return ret;
@@ -705,16 +1241,26 @@
 	int tsif;
 	int ret;
 	int channel_id;
+	int slot;
 	atomic_t *data_cnt;
 	int *channel_ref_count;
+	enum tspp_source tspp_source;
 	struct tspp_filter tspp_filter;
 	struct mpq_demux *mpq_demux = feed->demux->priv;
+	int restore_null_blocking_filters = 0;
+	int remove_accept_all_filter = 0;
+	int remove_user_filters = 0;
+	int accept_all_filter_existed = 0;
+
+	MPQ_DVB_DBG_PRINT("%s: executed, PID = %d\n", __func__, feed->pid);
 
 	/* determine the TSIF we are reading from */
 	if (mpq_demux->source == DMX_SOURCE_FRONT0) {
 		tsif = 0;
+		tspp_source = TSPP_SOURCE_TSIF0;
 	} else if (mpq_demux->source == DMX_SOURCE_FRONT1) {
 		tsif = 1;
+		tspp_source = TSPP_SOURCE_TSIF1;
 	} else {
 		/* invalid source */
 		MPQ_DVB_ERR_PRINT(
@@ -744,9 +1290,9 @@
 		goto remove_channel_failed;
 	}
 
-	tspp_filter.priority = mpq_tspp_get_filter_slot(tsif, feed->pid);
+	slot = mpq_tspp_get_filter_slot(tsif, feed->pid);
 
-	if (tspp_filter.priority < 0) {
+	if (slot < 0) {
 		/* invalid feed provided as it has no filter allocated */
 		MPQ_DVB_ERR_PRINT(
 			"%s: mpq_tspp_get_filter_slot failed (%d,%d)\n",
@@ -758,10 +1304,10 @@
 		goto remove_channel_failed;
 	}
 
-	mpq_dmx_tspp_info.tsif[tsif].filters[tspp_filter.priority].ref_count--;
+	/* since filter was found, ref_count > 0 so it's ok to decrement it */
+	mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count--;
 
-	if (mpq_dmx_tspp_info.tsif[tsif].
-		filters[tspp_filter.priority].ref_count) {
+	if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count) {
 		/*
 		 * there are still references to this pid, do not
 		 * remove the filter yet
@@ -770,21 +1316,120 @@
 		return 0;
 	}
 
-	ret = tspp_remove_filter(0, channel_id, &tspp_filter);
-	if (ret < 0) {
-		/* invalid feed provided as it has no filter allocated */
-		MPQ_DVB_ERR_PRINT(
-			"%s: tspp_remove_filter failed (%d,%d)\n",
-			__func__,
-			channel_id,
-			tspp_filter.priority);
+	if (feed->pid == TSPP_PASS_THROUGH_PID)
+		mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 0;
+	else if (feed->pid == TSPP_NULL_PACKETS_PID)
+		mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 0;
 
-		goto remove_channel_failed_restore_count;
+	mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = -1;
+
+	if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count <=
+					TSPP_MAX_HW_PID_FILTER_NUM) {
+		/* staying in HW filtering mode */
+		tspp_filter.priority =
+			mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index;
+		ret = tspp_remove_filter(0, channel_id, &tspp_filter);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: tspp_remove_filter failed (%d,%d)\n",
+				__func__,
+				channel_id,
+				tspp_filter.priority);
+
+			goto remove_channel_failed_restore_count;
+		}
+		mpq_tspp_release_hw_filter_index(tsif, tspp_filter.priority);
+		mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index = -1;
+
+		MPQ_DVB_DBG_PRINT(
+			"%s: HW filtering mode: Removed TSPP HW filter, PID = %d, index = %d\n",
+			__func__, feed->pid, tspp_filter.priority);
+	} else  if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count ==
+					(TSPP_MAX_HW_PID_FILTER_NUM + 1)) {
+		/* Crossing the threshold - from SW to HW filtering mode */
+
+		accept_all_filter_existed =
+			mpq_dmx_tspp_info.tsif[tsif].
+				accept_all_filter_exists_flag;
+
+		/* Add a temporary filter to accept all packets */
+		ret = mpq_tspp_add_accept_all_filter(channel_id,
+					tspp_source);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_tspp_add_accept_all_filter(%d, %d) failed\n",
+				__func__, channel_id, tspp_source);
+
+			goto remove_channel_failed_restore_count;
+		}
+
+		ret = mpq_tspp_remove_null_blocking_filters(channel_id,
+					tspp_source);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_tspp_remove_null_blocking_filters(%d, %d) failed\n",
+				__func__, channel_id, tspp_source);
+
+			restore_null_blocking_filters = 1;
+			if (!accept_all_filter_existed)
+				remove_accept_all_filter = 1;
+
+			goto remove_channel_failed_restore_count;
+		}
+
+		ret = mpq_tspp_add_all_user_filters(channel_id,
+					tspp_source);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_tspp_add_all_user_filters(%d, %d) failed\n",
+				__func__, channel_id, tspp_source);
+
+			remove_user_filters = 1;
+			restore_null_blocking_filters = 1;
+			if (!accept_all_filter_existed)
+				remove_accept_all_filter = 1;
+
+			goto remove_channel_failed_restore_count;
+		}
+
+		ret = mpq_tspp_remove_accept_all_filter(channel_id,
+					tspp_source);
+		if (ret < 0) {
+			MPQ_DVB_ERR_PRINT(
+				"%s: mpq_tspp_remove_accept_all_filter(%d, %d) failed\n",
+				__func__, channel_id, tspp_source);
+
+			remove_user_filters = 1;
+			restore_null_blocking_filters = 1;
+			if (!accept_all_filter_existed)
+				remove_accept_all_filter = 1;
+
+			goto remove_channel_failed_restore_count;
+		}
+	} else {
+		/* staying in SW filtering mode */
+		if ((mpq_dmx_tspp_info.tsif[tsif].pass_all_flag == 0) &&
+			(mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag == 0)) {
+
+			ret = mpq_tspp_remove_accept_all_filter(channel_id,
+						tspp_source);
+			if (ret < 0) {
+				MPQ_DVB_ERR_PRINT(
+					"%s: mpq_tspp_remove_accept_all_filter(%d, %d) failed\n",
+					__func__, channel_id,
+					tspp_source);
+
+				goto remove_channel_failed_restore_count;
+			}
+		}
 	}
 
-	mpq_dmx_tspp_info.tsif[tsif].filters[tspp_filter.priority].pid = -1;
+	mpq_dmx_tspp_info.tsif[tsif].current_filter_count--;
 	(*channel_ref_count)--;
 
+	MPQ_DVB_DBG_PRINT("%s: success, current_filter_count = %d\n",
+		__func__, mpq_dmx_tspp_info.tsif[tsif].current_filter_count);
+
 	if (*channel_ref_count == 0) {
 		/* channel is not used any more, release it */
 		tspp_unregister_notification(0, channel_id);
@@ -800,7 +1445,24 @@
 	return 0;
 
 remove_channel_failed_restore_count:
-	mpq_dmx_tspp_info.tsif[tsif].filters[tspp_filter.priority].ref_count++;
+	/* restore internal database state */
+	mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = feed->pid;
+	mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count++;
+
+	if (remove_user_filters)
+		mpq_tspp_remove_all_user_filters(channel_id, tspp_source);
+
+	if (restore_null_blocking_filters)
+		mpq_tspp_add_null_blocking_filters(channel_id, tspp_source);
+
+	if (remove_accept_all_filter)
+		mpq_tspp_remove_accept_all_filter(channel_id, tspp_source);
+
+	/* restore flags. we can only get here if we changed the flags. */
+	if (feed->pid == TSPP_PASS_THROUGH_PID)
+		mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 1;
+	else if (feed->pid == TSPP_NULL_PACKETS_PID)
+		mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 1;
 
 remove_channel_failed:
 	mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
@@ -1130,8 +1792,17 @@
 		for (j = 0; j < TSPP_MAX_PID_FILTER_NUM; j++) {
 			mpq_dmx_tspp_info.tsif[i].filters[j].pid = -1;
 			mpq_dmx_tspp_info.tsif[i].filters[j].ref_count = 0;
+			mpq_dmx_tspp_info.tsif[i].filters[j].hw_index = -1;
 		}
 
+		for (j = 0; j < TSPP_MAX_HW_PID_FILTER_NUM; j++)
+			mpq_dmx_tspp_info.tsif[i].hw_indexes[j] = 0;
+
+		mpq_dmx_tspp_info.tsif[i].current_filter_count = 0;
+		mpq_dmx_tspp_info.tsif[i].pass_nulls_flag = 0;
+		mpq_dmx_tspp_info.tsif[i].pass_all_flag = 0;
+		mpq_dmx_tspp_info.tsif[i].accept_all_filter_exists_flag = 0;
+
 		snprintf(mpq_dmx_tspp_info.tsif[i].name,
 				TSIF_NAME_LENGTH,
 				"dmx_tsif%d",
diff --git a/drivers/power/pm8921-bms.c b/drivers/power/pm8921-bms.c
index 1050af7..4a95780 100644
--- a/drivers/power/pm8921-bms.c
+++ b/drivers/power/pm8921-bms.c
@@ -23,6 +23,7 @@
 #include <linux/mfd/pm8xxx/pm8921-charger.h>
 #include <linux/mfd/pm8xxx/ccadc.h>
 #include <linux/mfd/pm8xxx/batterydata-lib.h>
+#include <linux/mfd/pm8xxx/batt-alarm.h>
 #include <linux/interrupt.h>
 #include <linux/bitops.h>
 #include <linux/debugfs.h>
@@ -120,6 +121,9 @@
 	unsigned int		charging_began;
 	unsigned int		start_percent;
 	unsigned int		end_percent;
+	unsigned int		alarm_low_mv;
+	unsigned int		alarm_high_mv;
+
 	int			charge_time_us;
 	int			catch_up_time_us;
 	enum battery_type	batt_type;
@@ -192,6 +196,13 @@
 static int last_real_fcc_mah = -EINVAL;
 static int last_real_fcc_batt_temp = -EINVAL;
 
+static int pm8921_battery_gauge_alarm_notify(struct notifier_block *nb,
+				unsigned long status, void *unused);
+
+static struct notifier_block alarm_notifier = {
+	.notifier_call = pm8921_battery_gauge_alarm_notify,
+};
+
 static int bms_ops_set(const char *val, const struct kernel_param *kp)
 {
 	if (*(int *)kp->arg == -EINVAL)
@@ -379,6 +390,124 @@
 	return val;
 }
 
+static int pm8921_bms_enable_batt_alarm(struct pm8921_bms_chip *chip)
+{
+	int rc = 0;
+
+	rc = pm8xxx_batt_alarm_enable(PM8XXX_BATT_ALARM_LOWER_COMPARATOR);
+	if (!rc)
+		rc = pm8xxx_batt_alarm_disable(
+				PM8XXX_BATT_ALARM_UPPER_COMPARATOR);
+	if (rc) {
+		pr_err("unable to set batt alarm state rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int pm8921_bms_configure_batt_alarm(struct pm8921_bms_chip *chip)
+{
+	int rc = 0;
+
+	rc = pm8xxx_batt_alarm_disable(PM8XXX_BATT_ALARM_UPPER_COMPARATOR);
+	if (!rc)
+		rc = pm8xxx_batt_alarm_disable(
+			PM8XXX_BATT_ALARM_LOWER_COMPARATOR);
+	if (rc) {
+		pr_err("unable to set batt alarm state rc=%d\n", rc);
+		return rc;
+	}
+
+	/*
+	* The batt-alarm driver requires sane values for both min / max,
+	* regardless of whether they're both activated.
+	*/
+	rc = pm8xxx_batt_alarm_threshold_set(
+			PM8XXX_BATT_ALARM_LOWER_COMPARATOR,
+					chip->alarm_low_mv);
+	if (!rc)
+		rc = pm8xxx_batt_alarm_threshold_set(
+			PM8XXX_BATT_ALARM_UPPER_COMPARATOR,
+					chip->alarm_high_mv);
+	if (rc) {
+		pr_err("unable to set batt alarm threshold rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = pm8xxx_batt_alarm_hold_time_set(
+			PM8XXX_BATT_ALARM_HOLD_TIME_16_MS);
+	if (rc) {
+		pr_err("unable to set batt alarm hold time rc=%d\n", rc);
+		return rc;
+	}
+
+	/* PWM enabled at 2Hz */
+	rc = pm8xxx_batt_alarm_pwm_rate_set(1, 7, 4);
+	if (rc) {
+		pr_err("unable to set batt alarm pwm rate rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = pm8xxx_batt_alarm_register_notifier(&alarm_notifier);
+	if (rc) {
+		pr_err("unable to register alarm notifier rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int pm8921_battery_gauge_alarm_notify(struct notifier_block *nb,
+		unsigned long status, void *unused)
+{
+	int rc;
+
+	if (!the_chip) {
+		pr_err("not initialized\n");
+		return -EINVAL;
+	}
+
+	switch (status) {
+	case 0:
+		pr_debug("spurious interrupt\n");
+		break;
+	case 1:
+		pr_debug("Low voltage alarm triggered\n");
+		/*
+		 * hold the low voltage wakelock until the soc
+		 * work finds it appropriate to release it.
+		 */
+		wake_lock(&the_chip->low_voltage_wake_lock);
+		the_chip->low_voltage_wake_lock_held = 1;
+
+		rc = pm8xxx_batt_alarm_disable(
+				PM8XXX_BATT_ALARM_LOWER_COMPARATOR);
+		if (!rc)
+			rc = pm8xxx_batt_alarm_enable(
+				PM8XXX_BATT_ALARM_UPPER_COMPARATOR);
+		if (rc)
+			pr_err("unable to set alarm state rc=%d\n", rc);
+		break;
+	case 2:
+		rc = pm8xxx_batt_alarm_disable(
+			PM8XXX_BATT_ALARM_UPPER_COMPARATOR);
+		if (!rc)
+			rc = pm8xxx_batt_alarm_enable(
+				PM8XXX_BATT_ALARM_LOWER_COMPARATOR);
+		if (rc)
+			pr_err("unable to set alarm state rc=%d\n", rc);
+
+		break;
+	default:
+		pr_err("error received\n");
+		break;
+	}
+
+	return 0;
+};
+
+
 #define HOLD_OREG_DATA		BIT(1)
 static int pm_bms_lock_output_data(struct pm8921_bms_chip *chip)
 {
@@ -599,6 +728,23 @@
 	return 0;
 }
 
+static int get_batt_temp(struct pm8921_bms_chip *chip, int *batt_temp)
+{
+	int rc;
+	struct pm8xxx_adc_chan_result result;
+
+	rc = pm8xxx_adc_read(chip->batt_temp_channel, &result);
+	if (rc) {
+		pr_err("error reading batt_temp_channel = %d, rc = %d\n",
+					chip->batt_temp_channel, rc);
+		return rc;
+	}
+	*batt_temp = result.physical;
+	pr_debug("batt_temp phy = %lld meas = 0x%llx\n", result.physical,
+						result.measurement);
+	return 0;
+}
+
 #define BMS_MODE_BIT	BIT(6)
 #define EN_VBAT_BIT	BIT(5)
 #define OVERRIDE_MODE_DELAY_MS	20
@@ -609,6 +755,7 @@
 	int16_t vbat_raw;
 	int vsense_uv;
 	int usb_chg;
+	int batt_temp;
 
 	mutex_lock(&the_chip->bms_output_lock);
 
@@ -623,12 +770,13 @@
 	pm_bms_read_output_data(the_chip, VBATT_AVG, &vbat_raw);
 	pm_bms_unlock_output_data(the_chip);
 	pm_bms_masked_write(the_chip, BMS_CONTROL,
-			BMS_MODE_BIT | EN_VBAT_BIT, 0);
+		BMS_MODE_BIT | EN_VBAT_BIT, 0);
 
 	pm8xxx_writeb(the_chip->dev->parent, BMS_S1_DELAY, 0x0B);
 
 	mutex_unlock(&the_chip->bms_output_lock);
 
+	get_batt_temp(the_chip, &batt_temp);
 	usb_chg = usb_chg_plugged_in(the_chip);
 
 	convert_vbatt_raw_to_uv(the_chip, usb_chg, vbat_raw, vbat_uv);
@@ -1599,6 +1747,7 @@
 static void very_low_voltage_check(struct pm8921_bms_chip *chip,
 					int ibat_ua, int vbat_uv)
 {
+	int rc;
 	/*
 	 * if battery is very low (v_cutoff voltage + 20mv) hold
 	 * a wakelock untill soc = 0%
@@ -1617,6 +1766,9 @@
 		chip->low_voltage_wake_lock_held = 0;
 		wake_unlock(&chip->low_voltage_wake_lock);
 		chip->soc_calc_period = chip->normal_voltage_calc_ms;
+		rc = pm8921_bms_enable_batt_alarm(chip);
+		if (rc)
+			pr_err("Unable to enable batt alarm\n");
 	}
 }
 
@@ -1911,23 +2063,6 @@
 		power_supply_changed(chip->batt_psy);
 }
 
-static int get_batt_temp(struct pm8921_bms_chip *chip, int *batt_temp)
-{
-	int rc;
-	struct pm8xxx_adc_chan_result result;
-
-	rc = pm8xxx_adc_read(chip->batt_temp_channel, &result);
-	if (rc) {
-		pr_err("error reading batt_temp_channel = %d, rc = %d\n",
-					chip->batt_temp_channel, rc);
-		return rc;
-	}
-	*batt_temp = result.physical;
-	pr_debug("batt_temp phy = %lld meas = 0x%llx\n", result.physical,
-						result.measurement);
-	return 0;
-}
-
 #define MIN_DELTA_625_UV	1000
 static void calib_hkadc(struct pm8921_bms_chip *chip)
 {
@@ -3148,6 +3283,9 @@
 	chip->ocv_dis_high_soc = pdata->ocv_dis_high_soc;
 	chip->ocv_dis_low_soc = pdata->ocv_dis_low_soc;
 
+	chip->alarm_low_mv = pdata->alarm_low_mv;
+	chip->alarm_high_mv = pdata->alarm_high_mv;
+
 	mutex_init(&chip->calib_mutex);
 	INIT_WORK(&chip->calib_hkadc_work, calibrate_hkadc_work);
 
@@ -3188,6 +3326,18 @@
 	pm8921_bms_enable_irq(chip, PM8921_BMS_GOOD_OCV);
 	pm8921_bms_enable_irq(chip, PM8921_BMS_OCV_FOR_R);
 
+	rc = pm8921_bms_configure_batt_alarm(chip);
+	if (rc) {
+		pr_err("Couldn't configure battery alarm! rc=%d\n", rc);
+		goto free_irqs;
+	}
+
+	rc = pm8921_bms_enable_batt_alarm(chip);
+	if (rc) {
+		pr_err("Couldn't enable battery alarm! rc=%d\n", rc);
+		goto free_irqs;
+	}
+
 	calculate_soc_work(&(chip->calculate_soc_delayed_work.work));
 
 	rc = get_battery_uvolts(chip, &vbatt);
diff --git a/drivers/usb/misc/ks_bridge.c b/drivers/usb/misc/ks_bridge.c
index 5a3dfa3..b0785f6 100644
--- a/drivers/usb/misc/ks_bridge.c
+++ b/drivers/usb/misc/ks_bridge.c
@@ -33,6 +33,24 @@
 #define DRIVER_DESC	"USB host ks bridge driver"
 #define DRIVER_VERSION	"1.0"
 
+enum bus_id {
+	BUS_HSIC,
+	BUS_USB,
+	BUS_UNDEF,
+};
+
+#define BUSNAME_LEN	20
+
+static enum bus_id str_to_busid(const char *name)
+{
+	if (!strncasecmp("msm_hsic_host", name, BUSNAME_LEN))
+		return BUS_HSIC;
+	if (!strncasecmp("msm_ehci_host.0", name, BUSNAME_LEN))
+		return BUS_USB;
+
+	return BUS_UNDEF;
+}
+
 struct data_pkt {
 	int			n_read;
 	char			*buf;
@@ -44,9 +62,9 @@
 #define FILE_OPENED		BIT(0)
 #define USB_DEV_CONNECTED	BIT(1)
 #define NO_RX_REQS		10
-#define NO_BRIDGE_INSTANCES	2
-#define BOOT_BRIDGE_INDEX	0
-#define EFS_BRIDGE_INDEX	1
+#define NO_BRIDGE_INSTANCES	4
+#define EFS_HSIC_BRIDGE_INDEX	2
+#define EFS_USB_BRIDGE_INDEX	3
 #define MAX_DATA_PKT_SIZE	16384
 #define PENDING_URB_TIMEOUT	10
 
@@ -60,7 +78,7 @@
 	struct list_head	to_ks_list;
 	wait_queue_head_t	ks_wait_q;
 	wait_queue_head_t	pending_urb_wait;
-	struct miscdevice	*fs_dev;
+	struct miscdevice	fs_dev;
 	atomic_t		tx_pending_cnt;
 	atomic_t		rx_pending_cnt;
 
@@ -148,7 +166,7 @@
 	int ret;
 	unsigned long flags;
 	struct ks_bridge *ksb = fp->private_data;
-	struct data_pkt *pkt;
+	struct data_pkt *pkt = NULL;
 	size_t space, copied;
 
 read_start:
@@ -169,43 +187,53 @@
 
 	space = count;
 	copied = 0;
-	while (!list_empty(&ksb->to_ks_list) && space) {
+	while (!list_empty(&ksb->to_ks_list) && space &&
+			test_bit(USB_DEV_CONNECTED, &ksb->flags)) {
 		size_t len;
 
 		pkt = list_first_entry(&ksb->to_ks_list, struct data_pkt, list);
+		list_del_init(&pkt->list);
 		len = min_t(size_t, space, pkt->len - pkt->n_read);
 		spin_unlock_irqrestore(&ksb->lock, flags);
 
 		ret = copy_to_user(buf + copied, pkt->buf + pkt->n_read, len);
 		if (ret) {
-			pr_err("copy_to_user failed err:%d\n", ret);
+			dev_err(ksb->fs_dev.this_device,
+					"copy_to_user failed err:%d\n", ret);
 			ksb_free_data_pkt(pkt);
-			return ret;
+			return -EFAULT;
 		}
 
 		pkt->n_read += len;
 		space -= len;
 		copied += len;
 
-		spin_lock_irqsave(&ksb->lock, flags);
 		if (pkt->n_read == pkt->len) {
 			/*
 			 * re-init the packet and queue it
 			 * for more data.
 			 */
-			list_del_init(&pkt->list);
 			pkt->n_read = 0;
 			pkt->len = MAX_DATA_PKT_SIZE;
-			spin_unlock_irqrestore(&ksb->lock, flags);
 			submit_one_urb(ksb, GFP_KERNEL, pkt);
-			spin_lock_irqsave(&ksb->lock, flags);
+			pkt = NULL;
 		}
+		spin_lock_irqsave(&ksb->lock, flags);
+	}
+
+	/* put the partial packet back in the list */
+	if (!space && pkt && pkt->n_read != pkt->len) {
+		if (test_bit(USB_DEV_CONNECTED, &ksb->flags))
+			list_add(&pkt->list, &ksb->to_ks_list);
+		else
+			ksb_free_data_pkt(pkt);
 	}
 	spin_unlock_irqrestore(&ksb->lock, flags);
 
 	dbg_log_event(ksb, "KS_READ", copied, 0);
 
-	pr_debug("count:%d space:%d copied:%d", count, space, copied);
+	dev_dbg(ksb->fs_dev.this_device, "count:%d space:%d copied:%d", count,
+			space, copied);
 
 	return copied;
 }
@@ -216,13 +244,14 @@
 	struct ks_bridge *ksb = pkt->ctxt;
 
 	dbg_log_event(ksb, "C TX_URB", urb->status, 0);
-	pr_debug("status:%d", urb->status);
+	dev_dbg(&ksb->udev->dev, "status:%d", urb->status);
 
 	if (test_bit(USB_DEV_CONNECTED, &ksb->flags))
 		usb_autopm_put_interface_async(ksb->ifc);
 
 	if (urb->status < 0)
-		pr_err_ratelimited("urb failed with err:%d", urb->status);
+		pr_err_ratelimited("%s: urb failed with err:%d",
+				ksb->fs_dev.name, urb->status);
 
 	ksb_free_data_pkt(pkt);
 
@@ -248,14 +277,16 @@
 
 		urb = usb_alloc_urb(0, GFP_KERNEL);
 		if (!urb) {
-			pr_err_ratelimited("unable to allocate urb");
+			pr_err_ratelimited("%s: unable to allocate urb",
+					ksb->fs_dev.name);
 			ksb_free_data_pkt(pkt);
 			return;
 		}
 
 		ret = usb_autopm_get_interface(ksb->ifc);
 		if (ret < 0 && ret != -EAGAIN && ret != -EACCES) {
-			pr_err_ratelimited("autopm_get failed:%d", ret);
+			pr_err_ratelimited("%s: autopm_get failed:%d",
+					ksb->fs_dev.name, ret);
 			usb_free_urb(urb);
 			ksb_free_data_pkt(pkt);
 			return;
@@ -269,7 +300,7 @@
 		atomic_inc(&ksb->tx_pending_cnt);
 		ret = usb_submit_urb(urb, GFP_KERNEL);
 		if (ret) {
-			pr_err("out urb submission failed");
+			dev_err(&ksb->udev->dev, "out urb submission failed");
 			usb_unanchor_urb(urb);
 			usb_free_urb(urb);
 			ksb_free_data_pkt(pkt);
@@ -302,13 +333,15 @@
 
 	pkt = ksb_alloc_data_pkt(count, GFP_KERNEL, ksb);
 	if (IS_ERR(pkt)) {
-		pr_err("unable to allocate data packet");
+		dev_err(ksb->fs_dev.this_device,
+				"unable to allocate data packet");
 		return PTR_ERR(pkt);
 	}
 
 	ret = copy_from_user(pkt->buf, buf, count);
 	if (ret) {
-		pr_err("copy_from_user failed: err:%d", ret);
+		dev_err(ksb->fs_dev.this_device,
+				"copy_from_user failed: err:%d", ret);
 		ksb_free_data_pkt(pkt);
 		return ret;
 	}
@@ -322,39 +355,19 @@
 	return count;
 }
 
-static int efs_fs_open(struct inode *ip, struct file *fp)
-{
-	struct ks_bridge *ksb = __ksb[EFS_BRIDGE_INDEX];
-
-	pr_debug(":%s", ksb->name);
-	dbg_log_event(ksb, "EFS-FS-OPEN", 0, 0);
-
-	if (!ksb) {
-		pr_err("ksb is being removed");
-		return -ENODEV;
-	}
-
-	fp->private_data = ksb;
-	set_bit(FILE_OPENED, &ksb->flags);
-
-	if (test_bit(USB_DEV_CONNECTED, &ksb->flags))
-		queue_work(ksb->wq, &ksb->start_rx_work);
-
-	return 0;
-}
-
 static int ksb_fs_open(struct inode *ip, struct file *fp)
 {
-	struct ks_bridge *ksb = __ksb[BOOT_BRIDGE_INDEX];
+	struct miscdevice *mdev = fp->private_data;
+	struct ks_bridge *ksb = container_of(mdev, struct ks_bridge, fs_dev);
 
-	pr_debug(":%s", ksb->name);
-	dbg_log_event(ksb, "KS-FS-OPEN", 0, 0);
-
-	if (!ksb) {
-		pr_err("ksb is being removed");
+	if (IS_ERR(ksb)) {
+		pr_err("ksb device not found");
 		return -ENODEV;
 	}
 
+	dev_dbg(ksb->fs_dev.this_device, ":%s", ksb->fs_dev.name);
+	dbg_log_event(ksb, "FS-OPEN", 0, 0);
+
 	fp->private_data = ksb;
 	set_bit(FILE_OPENED, &ksb->flags);
 
@@ -368,7 +381,7 @@
 {
 	struct ks_bridge	*ksb = fp->private_data;
 
-	pr_debug(":%s", ksb->name);
+	dev_dbg(ksb->fs_dev.this_device, ":%s", ksb->fs_dev.name);
 	dbg_log_event(ksb, "FS-RELEASE", 0, 0);
 
 	clear_bit(FILE_OPENED, &ksb->flags);
@@ -385,35 +398,49 @@
 	.release = ksb_fs_release,
 };
 
-static struct miscdevice ksb_fboot_dev = {
-	.minor = MISC_DYNAMIC_MINOR,
-	.name = "ks_bridge",
-	.fops = &ksb_fops,
+static struct miscdevice ksb_fboot_dev[] = {
+	{
+		.minor = MISC_DYNAMIC_MINOR,
+		.name = "ks_hsic_bridge",
+		.fops = &ksb_fops,
+	},
+	{
+		.minor = MISC_DYNAMIC_MINOR,
+		.name = "ks_usb_bridge",
+		.fops = &ksb_fops,
+	},
 };
 
 static const struct file_operations efs_fops = {
 	.owner = THIS_MODULE,
 	.read = ksb_fs_read,
 	.write = ksb_fs_write,
-	.open = efs_fs_open,
+	.open = ksb_fs_open,
 	.release = ksb_fs_release,
 };
 
-static struct miscdevice ksb_efs_dev = {
+static struct miscdevice ksb_efs_hsic_dev = {
 	.minor = MISC_DYNAMIC_MINOR,
-	.name = "efs_bridge",
+	.name = "efs_hsic_bridge",
 	.fops = &efs_fops,
 };
 
+static struct miscdevice ksb_efs_usb_dev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "efs_usb_bridge",
+	.fops = &efs_fops,
+};
 static const struct usb_device_id ksb_usb_ids[] = {
 	{ USB_DEVICE(0x5c6, 0x9008),
 	.driver_info = (unsigned long)&ksb_fboot_dev, },
 	{ USB_DEVICE(0x5c6, 0x9048),
-	.driver_info = (unsigned long)&ksb_efs_dev, },
+	.driver_info = (unsigned long)&ksb_efs_hsic_dev, },
 	{ USB_DEVICE(0x5c6, 0x904C),
-	.driver_info = (unsigned long)&ksb_efs_dev, },
+	.driver_info = (unsigned long)&ksb_efs_hsic_dev, },
 	{ USB_DEVICE(0x5c6, 0x9075),
-	.driver_info = (unsigned long)&ksb_efs_dev, },
+	.driver_info = (unsigned long)&ksb_efs_hsic_dev, },
+	{ USB_DEVICE(0x5c6, 0x9079),
+	.driver_info = (unsigned long)&ksb_efs_usb_dev, },
 
 	{} /* terminating entry */
 };
@@ -428,7 +455,7 @@
 
 	urb = usb_alloc_urb(0, flags);
 	if (!urb) {
-		pr_err("unable to allocate urb");
+		dev_err(&ksb->udev->dev, "unable to allocate urb");
 		ksb_free_data_pkt(pkt);
 		return;
 	}
@@ -448,7 +475,7 @@
 	atomic_inc(&ksb->rx_pending_cnt);
 	ret = usb_submit_urb(urb, flags);
 	if (ret) {
-		pr_err("in urb submission failed");
+		dev_err(&ksb->udev->dev, "in urb submission failed");
 		usb_unanchor_urb(urb);
 		usb_free_urb(urb);
 		ksb_free_data_pkt(pkt);
@@ -465,20 +492,29 @@
 {
 	struct data_pkt *pkt = urb->context;
 	struct ks_bridge *ksb = pkt->ctxt;
+	bool wakeup = true;
 
 	dbg_log_event(ksb, "C RX_URB", urb->status, urb->actual_length);
 
-	pr_debug("status:%d actual:%d", urb->status, urb->actual_length);
+	dev_dbg(&ksb->udev->dev, "status:%d actual:%d", urb->status,
+			urb->actual_length);
 
 	/*non zero len of data received while unlinking urb*/
-	if (urb->status == -ENOENT && urb->actual_length > 0)
+	if (urb->status == -ENOENT && (urb->actual_length > 0)) {
+		/*
+		 * If we wakeup the reader process now, it may
+		 * queue the URB before its reject flag gets
+		 * cleared.
+		 */
+		wakeup = false;
 		goto add_to_list;
+	}
 
 	if (urb->status < 0) {
 		if (urb->status != -ESHUTDOWN && urb->status != -ENOENT
 				&& urb->status != -EPROTO)
-			pr_err_ratelimited("urb failed with err:%d",
-					urb->status);
+			pr_err_ratelimited("%s: urb failed with err:%d",
+					ksb->fs_dev.name, urb->status);
 		ksb_free_data_pkt(pkt);
 		goto done;
 	}
@@ -493,10 +529,9 @@
 	pkt->len = urb->actual_length;
 	list_add_tail(&pkt->list, &ksb->to_ks_list);
 	spin_unlock(&ksb->lock);
-
 	/* wake up read thread */
-	wake_up(&ksb->ks_wait_q);
-
+	if (wakeup)
+		wake_up(&ksb->ks_wait_q);
 done:
 	atomic_dec(&ksb->rx_pending_cnt);
 	wake_up(&ksb->pending_urb_wait);
@@ -515,7 +550,8 @@
 	ret = usb_autopm_get_interface(ksb->ifc);
 	if (ret < 0) {
 		if (ret != -EAGAIN && ret != -EACCES) {
-			pr_err_ratelimited("autopm_get failed:%d", ret);
+			pr_err_ratelimited("%s: autopm_get failed:%d",
+					ksb->fs_dev.name, ret);
 			return;
 		}
 		put = false;
@@ -527,13 +563,13 @@
 
 		pkt = ksb_alloc_data_pkt(MAX_DATA_PKT_SIZE, GFP_KERNEL, ksb);
 		if (IS_ERR(pkt)) {
-			pr_err("unable to allocate data pkt");
+			dev_err(&ksb->udev->dev, "unable to allocate data pkt");
 			break;
 		}
 
 		urb = usb_alloc_urb(0, GFP_KERNEL);
 		if (!urb) {
-			pr_err("unable to allocate urb");
+			dev_err(&ksb->udev->dev, "unable to allocate urb");
 			ksb_free_data_pkt(pkt);
 			break;
 		}
@@ -548,7 +584,7 @@
 		atomic_inc(&ksb->rx_pending_cnt);
 		ret = usb_submit_urb(urb, GFP_KERNEL);
 		if (ret) {
-			pr_err("in urb submission failed");
+			dev_err(&ksb->udev->dev, "in urb submission failed");
 			usb_unanchor_urb(urb);
 			usb_free_urb(urb);
 			ksb_free_data_pkt(pkt);
@@ -573,21 +609,40 @@
 	struct ks_bridge		*ksb;
 	unsigned long			flags;
 	struct data_pkt			*pkt;
+	struct miscdevice		*mdev, *fbdev;
+	struct usb_device		*udev;
+	unsigned int			bus_id;
 
 	ifc_num = ifc->cur_altsetting->desc.bInterfaceNumber;
 
+	udev = interface_to_usbdev(ifc);
+	fbdev = mdev = (struct miscdevice *)id->driver_info;
+
+	bus_id = str_to_busid(udev->bus->bus_name);
+	if (bus_id == BUS_UNDEF) {
+		dev_err(&udev->dev, "unknown usb bus %s, probe failed\n",
+				udev->bus->bus_name);
+		return -ENODEV;
+	}
+
 	switch (id->idProduct) {
 	case 0x9008:
 		if (ifc_num != 0)
 			return -ENODEV;
-		ksb = __ksb[BOOT_BRIDGE_INDEX];
+		ksb = __ksb[bus_id];
+		mdev = &fbdev[bus_id];
 		break;
 	case 0x9048:
 	case 0x904C:
 	case 0x9075:
 		if (ifc_num != 2)
 			return -ENODEV;
-		ksb = __ksb[EFS_BRIDGE_INDEX];
+		ksb = __ksb[EFS_HSIC_BRIDGE_INDEX];
+		break;
+	case 0x9079:
+		if (ifc_num != 2)
+			return -ENODEV;
+		ksb = __ksb[EFS_USB_BRIDGE_INDEX];
 		break;
 	default:
 		return -ENODEV;
@@ -613,7 +668,8 @@
 	}
 
 	if (!(ksb->in_epAddr && ksb->out_epAddr)) {
-		pr_err("could not find bulk in and bulk out endpoints");
+		dev_err(&udev->dev,
+			"could not find bulk in and bulk out endpoints");
 		usb_put_dev(ksb->udev);
 		ksb->ifc = NULL;
 		return -ENODEV;
@@ -645,13 +701,15 @@
 	}
 	spin_unlock_irqrestore(&ksb->lock, flags);
 
-	ksb->fs_dev = (struct miscdevice *)id->driver_info;
-	misc_register(ksb->fs_dev);
+	ksb->fs_dev = *mdev;
+	misc_register(&ksb->fs_dev);
 
-	ifc->needs_remote_wakeup = 1;
-	usb_enable_autosuspend(ksb->udev);
+	if (device_can_wakeup(&ksb->udev->dev)) {
+		ifc->needs_remote_wakeup = 1;
+		usb_enable_autosuspend(ksb->udev);
+	}
 
-	pr_debug("usb dev connected");
+	dev_dbg(&udev->dev, "usb dev connected");
 
 	return 0;
 }
@@ -659,11 +717,26 @@
 static int ksb_usb_suspend(struct usb_interface *ifc, pm_message_t message)
 {
 	struct ks_bridge *ksb = usb_get_intfdata(ifc);
+	unsigned long flags;
 
 	dbg_log_event(ksb, "SUSPEND", 0, 0);
 
 	usb_kill_anchored_urbs(&ksb->submitted);
 
+	spin_lock_irqsave(&ksb->lock, flags);
+	if (!list_empty(&ksb->to_ks_list)) {
+		spin_unlock_irqrestore(&ksb->lock, flags);
+		dbg_log_event(ksb, "SUSPEND ABORT", 0, 0);
+		/*
+		 * Now wakeup the reader process and queue
+		 * Rx URBs for more data.
+		 */
+		wake_up(&ksb->ks_wait_q);
+		queue_work(ksb->wq, &ksb->start_rx_work);
+		return -EBUSY;
+	}
+	spin_unlock_irqrestore(&ksb->lock, flags);
+
 	return 0;
 }
 
@@ -692,7 +765,7 @@
 	cancel_work_sync(&ksb->to_mdm_work);
 	cancel_work_sync(&ksb->start_rx_work);
 
-	misc_deregister(ksb->fs_dev);
+	misc_deregister(&ksb->fs_dev);
 
 	usb_kill_anchored_urbs(&ksb->submitted);
 
diff --git a/include/linux/mfd/pm8xxx/pm8921-bms.h b/include/linux/mfd/pm8xxx/pm8921-bms.h
index 28b210b..91b769d 100644
--- a/include/linux/mfd/pm8xxx/pm8921-bms.h
+++ b/include/linux/mfd/pm8xxx/pm8921-bms.h
@@ -53,6 +53,8 @@
 	unsigned int			v_cutoff;
 	unsigned int			max_voltage_uv;
 	unsigned int			rconn_mohm;
+	unsigned int			alarm_low_mv;
+	unsigned int			alarm_high_mv;
 	int				enable_fcc_learning;
 	int				shutdown_soc_valid_limit;
 	int				ignore_shutdown_soc;
diff --git a/include/linux/netfilter_ipv4/ipt_NATTYPE.h b/include/linux/netfilter_ipv4/ipt_NATTYPE.h
index b612290..88311c9 100644
--- a/include/linux/netfilter_ipv4/ipt_NATTYPE.h
+++ b/include/linux/netfilter_ipv4/ipt_NATTYPE.h
@@ -21,5 +21,7 @@
 	u_int16_t type;
 };
 
+extern bool nattype_refresh_timer(unsigned long nattype);
+
 #endif /*_IPT_NATTYPE_H_target*/
 
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index ab86036..eeb5258 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -100,6 +100,11 @@
 #include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
 
+/* Handle NATTYPE Stuff,only if NATTYPE module was defined */
+#if defined(CONFIG_IP_NF_TARGET_NATTYPE_MODULE)
+#include <linux/netfilter_ipv4/ipt_NATTYPE.h>
+#endif
+
 struct nf_conn {
 	/* Usage count in here is 1 for hash table/destruct timer, 1 per skb,
            plus 1 for any connection(s) we are `master' for */
@@ -134,6 +139,10 @@
 	struct net *ct_net;
 #endif
 
+#if defined(CONFIG_IP_NF_TARGET_NATTYPE_MODULE)
+	unsigned long nattype_entry;
+#endif
+
 	/* Storage reserved for other modules, must be the last member */
 	union nf_conntrack_proto proto;
 };
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index 5f889f1..a1da44f 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -302,6 +302,199 @@
 		__entry->alloc_migratetype == __entry->fallback_migratetype)
 );
 
+
+DECLARE_EVENT_CLASS(ion_alloc,
+
+	TP_PROTO(const char *client_name,
+		 const char *heap_name,
+		 size_t len,
+		 unsigned int mask,
+		 unsigned int flags),
+
+	TP_ARGS(client_name, heap_name, len, mask, flags),
+
+	TP_STRUCT__entry(
+		__field(const char *,	client_name)
+		__field(const char *,	heap_name)
+		__field(size_t,		len)
+		__field(unsigned int,	mask)
+		__field(unsigned int,	flags)
+	),
+
+	TP_fast_assign(
+		__entry->client_name	= client_name;
+		__entry->heap_name	= heap_name;
+		__entry->len		= len;
+		__entry->mask		= mask;
+		__entry->flags		= flags;
+	),
+
+	TP_printk("client_name=%s heap_name=%s len=%zu mask=0x%x flags=0x%x",
+		__entry->client_name,
+		__entry->heap_name,
+		__entry->len,
+		__entry->mask,
+		__entry->flags)
+);
+
+DEFINE_EVENT(ion_alloc, ion_alloc_buffer_start,
+
+	TP_PROTO(const char *client_name,
+		 const char *heap_name,
+		 size_t len,
+		 unsigned int mask,
+		 unsigned int flags),
+
+	TP_ARGS(client_name, heap_name, len, mask, flags)
+);
+
+DEFINE_EVENT(ion_alloc, ion_alloc_buffer_end,
+
+	TP_PROTO(const char *client_name,
+		 const char *heap_name,
+		 size_t len,
+		 unsigned int mask,
+		 unsigned int flags),
+
+	TP_ARGS(client_name, heap_name, len, mask, flags)
+);
+
+DECLARE_EVENT_CLASS(ion_alloc_error,
+
+	TP_PROTO(const char *client_name,
+		 const char *heap_name,
+		 size_t len,
+		 unsigned int mask,
+		 unsigned int flags,
+		 long error),
+
+	TP_ARGS(client_name, heap_name, len, mask, flags, error),
+
+	TP_STRUCT__entry(
+		__field(const char *,	client_name)
+		__field(const char *,	heap_name)
+		__field(size_t,		len)
+		__field(unsigned int,	mask)
+		__field(unsigned int,	flags)
+		__field(long,		error)
+	),
+
+	TP_fast_assign(
+		__entry->client_name	= client_name;
+		__entry->heap_name	= heap_name;
+		__entry->len		= len;
+		__entry->mask		= mask;
+		__entry->flags		= flags;
+		__entry->error		= error;
+	),
+
+	TP_printk(
+	"client_name=%s heap_name=%s len=%zu mask=0x%x flags=0x%x error=%ld",
+		__entry->client_name,
+		__entry->heap_name,
+		__entry->len,
+		__entry->mask,
+		__entry->flags,
+		__entry->error)
+);
+
+
+DEFINE_EVENT(ion_alloc_error, ion_alloc_buffer_fallback,
+
+	TP_PROTO(const char *client_name,
+		 const char *heap_name,
+		 size_t len,
+		 unsigned int mask,
+		 unsigned int flags,
+		 long error),
+
+	TP_ARGS(client_name, heap_name, len, mask, flags, error)
+);
+
+DEFINE_EVENT(ion_alloc_error, ion_alloc_buffer_fail,
+
+	TP_PROTO(const char *client_name,
+		 const char *heap_name,
+		 size_t len,
+		 unsigned int mask,
+		 unsigned int flags,
+		 long error),
+
+	TP_ARGS(client_name, heap_name, len, mask, flags, error)
+);
+
+
+DECLARE_EVENT_CLASS(alloc_retry,
+
+	TP_PROTO(int tries),
+
+	TP_ARGS(tries),
+
+	TP_STRUCT__entry(
+		__field(int, tries)
+	),
+
+	TP_fast_assign(
+		__entry->tries = tries;
+	),
+
+	TP_printk("tries=%d",
+		__entry->tries)
+);
+
+DEFINE_EVENT(alloc_retry, ion_cp_alloc_retry,
+
+	TP_PROTO(int tries),
+
+	TP_ARGS(tries)
+);
+
+DEFINE_EVENT(alloc_retry, migrate_retry,
+
+	TP_PROTO(int tries),
+
+	TP_ARGS(tries)
+);
+
+DEFINE_EVENT(alloc_retry, dma_alloc_contiguous_retry,
+
+	TP_PROTO(int tries),
+
+	TP_ARGS(tries)
+);
+
+DECLARE_EVENT_CLASS(migrate_pages,
+
+	TP_PROTO(int mode),
+
+	TP_ARGS(mode),
+
+	TP_STRUCT__entry(
+		__field(int, mode)
+	),
+
+	TP_fast_assign(
+		__entry->mode = mode;
+	),
+
+	TP_printk("mode=%d",
+		__entry->mode)
+);
+
+DEFINE_EVENT(migrate_pages, migrate_pages_start,
+
+	TP_PROTO(int mode),
+
+	TP_ARGS(mode)
+);
+
+DEFINE_EVENT(migrate_pages, migrate_pages_end,
+
+	TP_PROTO(int mode),
+
+	TP_ARGS(mode)
+);
+
 #endif /* _TRACE_KMEM_H */
 
 /* This part must be outside protection */
diff --git a/mm/compaction.c b/mm/compaction.c
index da7d35e..353f1c5 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -750,6 +750,7 @@
 	struct zoneref *z;
 	struct zone *zone;
 	int rc = COMPACT_SKIPPED;
+	int alloc_flags = 0;
 
 	/*
 	 * Check whether it is worth even starting compaction. The order check is
@@ -761,6 +762,10 @@
 
 	count_vm_event(COMPACTSTALL);
 
+#ifdef CONFIG_CMA
+	if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
+		alloc_flags |= ALLOC_CMA;
+#endif
 	/* Compact each zone in the list */
 	for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
 								nodemask) {
@@ -770,7 +775,8 @@
 		rc = max(status, rc);
 
 		/* If a normal allocation would succeed, stop compacting */
-		if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
+		if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0,
+				      alloc_flags))
 			break;
 	}
 
diff --git a/mm/internal.h b/mm/internal.h
index aee4761..8c6fd44 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -342,3 +342,17 @@
 extern u64 hwpoison_filter_flags_value;
 extern u64 hwpoison_filter_memcg;
 extern u32 hwpoison_filter_enable;
+
+/* The ALLOC_WMARK bits are used as an index to zone->watermark */
+#define ALLOC_WMARK_MIN		WMARK_MIN
+#define ALLOC_WMARK_LOW		WMARK_LOW
+#define ALLOC_WMARK_HIGH	WMARK_HIGH
+#define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
+
+/* Mask to get the watermark bits */
+#define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
+
+#define ALLOC_HARDER		0x10 /* try to alloc harder */
+#define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
+#define ALLOC_CPUSET		0x40 /* check for correct cpuset */
+#define ALLOC_CMA		0x80 /* allow allocations from CMA areas */
diff --git a/mm/migrate.c b/mm/migrate.c
index 1107238..79a791f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -34,6 +34,7 @@
 #include <linux/syscalls.h>
 #include <linux/hugetlb.h>
 #include <linux/gfp.h>
+#include <trace/events/kmem.h>
 
 #include <asm/tlbflush.h>
 
@@ -974,6 +975,7 @@
 	int swapwrite = current->flags & PF_SWAPWRITE;
 	int rc;
 
+	trace_migrate_pages_start(mode);
 	if (!swapwrite)
 		current->flags |= PF_SWAPWRITE;
 
@@ -992,6 +994,7 @@
 				goto out;
 			case -EAGAIN:
 				retry++;
+				trace_migrate_retry(retry);
 				break;
 			case 0:
 				break;
@@ -1007,6 +1010,7 @@
 	if (!swapwrite)
 		current->flags &= ~PF_SWAPWRITE;
 
+	trace_migrate_pages_end(mode);
 	if (rc)
 		return rc;
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 73ac1b0..6e42cc2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1524,19 +1524,6 @@
 	return NULL;
 }
 
-/* The ALLOC_WMARK bits are used as an index to zone->watermark */
-#define ALLOC_WMARK_MIN		WMARK_MIN
-#define ALLOC_WMARK_LOW		WMARK_LOW
-#define ALLOC_WMARK_HIGH	WMARK_HIGH
-#define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
-
-/* Mask to get the watermark bits */
-#define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
-
-#define ALLOC_HARDER		0x10 /* try to alloc harder */
-#define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
-#define ALLOC_CPUSET		0x40 /* check for correct cpuset */
-
 #ifdef CONFIG_FAIL_PAGE_ALLOC
 
 static struct {
@@ -1631,7 +1618,11 @@
 		min -= min / 2;
 	if (alloc_flags & ALLOC_HARDER)
 		min -= min / 4;
-
+#ifdef CONFIG_CMA
+	/* If allocation can't use CMA areas don't use free CMA pages */
+	if (!(alloc_flags & ALLOC_CMA))
+		free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
+#endif
 	if (free_pages <= min + lowmem_reserve)
 		return false;
 	for (o = 0; o < order; o++) {
@@ -2300,7 +2291,10 @@
 		     unlikely(test_thread_flag(TIF_MEMDIE))))
 			alloc_flags |= ALLOC_NO_WATERMARKS;
 	}
-
+#ifdef CONFIG_CMA
+	if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
+		alloc_flags |= ALLOC_CMA;
+#endif
 	return alloc_flags;
 }
 
@@ -2509,6 +2503,7 @@
 	struct page *page = NULL;
 	int migratetype = allocflags_to_migratetype(gfp_mask);
 	unsigned int cpuset_mems_cookie;
+	int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET;
 
 	gfp_mask &= gfp_allowed_mask;
 
@@ -2537,9 +2532,13 @@
 	if (!preferred_zone)
 		goto out;
 
+#ifdef CONFIG_CMA
+	if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
+		alloc_flags |= ALLOC_CMA;
+#endif
 	/* First allocation attempt */
 	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
-			zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
+			zonelist, high_zoneidx, alloc_flags,
 			preferred_zone, migratetype);
 	if (unlikely(!page))
 		page = __alloc_pages_slowpath(gfp_mask, order,
diff --git a/net/ipv4/netfilter/ipt_NATTYPE.c b/net/ipv4/netfilter/ipt_NATTYPE.c
index 6b28794..2bb18ca 100644
--- a/net/ipv4/netfilter/ipt_NATTYPE.c
+++ b/net/ipv4/netfilter/ipt_NATTYPE.c
@@ -58,6 +58,7 @@
 struct ipt_nattype {
 	struct list_head list;
 	struct timer_list timeout;
+	unsigned char is_valid;
 	unsigned short proto;		/* Protocol: TCP or UDP */
 	struct nf_nat_ipv4_range range;	/* LAN side source information */
 	unsigned short nat_port;	/* Routed NAT port */
@@ -101,14 +102,23 @@
  * nattype_refresh_timer()
  *	Refresh the timer for this object.
  */
-static bool nattype_refresh_timer(struct ipt_nattype *nte)
+bool nattype_refresh_timer(unsigned long nat_type)
 {
-
+	struct ipt_nattype *nte = (struct ipt_nattype *)nat_type;
+	if (!nte)
+		return false;
+	spin_lock_bh(&nattype_lock);
+	if (!nte->is_valid) {
+		spin_unlock_bh(&nattype_lock);
+		return false;
+	}
 	if (del_timer(&nte->timeout)) {
 		nte->timeout.expires = jiffies + NATTYPE_TIMEOUT * HZ;
 		add_timer(&nte->timeout);
+		spin_unlock_bh(&nattype_lock);
 		return true;
 	}
+	spin_unlock_bh(&nattype_lock);
 	return false;
 }
 
@@ -128,6 +138,7 @@
 	nattype_nte_debug_print(nte, "timeout");
 	spin_lock_bh(&nattype_lock);
 	list_del(&nte->list);
+	memset(nte, 0, sizeof(struct ipt_nattype));
 	spin_unlock_bh(&nattype_lock);
 	nattype_free(nte);
 }
@@ -309,6 +320,7 @@
 		 */
 		DEBUGP("Expand ingress conntrack=%p, type=%d, src[%pI4:%d]\n",
 			ct, ctinfo, &newrange.min_ip, ntohs(newrange.min.all));
+		ct->nattype_entry = (unsigned long)nte;
 		ret = nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
 		DEBUGP("Expand returned: %d\n", ret);
 		return ret;
@@ -348,21 +360,19 @@
 			 */
 			if (!nattype_packet_in_match(nte, skb, info))
 				continue;
-
+			spin_unlock_bh(&nattype_lock);
 			/*
 			 * Refresh the timer, if we fail, break
 			 * out and forward fail as though we never
 			 * found the entry.
 			 */
-			if (!nattype_refresh_timer(nte))
+			if (!nattype_refresh_timer((unsigned long)nte))
 				break;
-
 			/*
 			 * The entry is found and refreshed, the
 			 * entry values should not change so print
 			 * them outside the lock.
 			 */
-			spin_unlock_bh(&nattype_lock);
 			nattype_nte_debug_print(nte, "refresh");
 			DEBUGP("FORWARD_IN_ACCEPT\n");
 			return NF_ACCEPT;
@@ -431,22 +441,20 @@
 	list_for_each_entry(nte2, &nattype_list, list) {
 		if (!nattype_compare(nte, nte2))
 			continue;
-
+		spin_unlock_bh(&nattype_lock);
 		/*
 		 * If we can not refresh this entry, insert our new
 		 * entry as this one is timed out and will be removed
 		 * from the list shortly.
 		 */
-		if (!nattype_refresh_timer(nte2))
+		if (!nattype_refresh_timer((unsigned long)nte2))
 			break;
-
 		/*
 		 * Found and refreshed an existing entry.  Its values
 		 * do not change so print the values outside of the lock.
 		 *
 		 * Free up the new entry.
 		 */
-		spin_unlock_bh(&nattype_lock);
 		nattype_nte_debug_print(nte2, "refresh");
 		nattype_free(nte);
 		return XT_CONTINUE;
@@ -458,6 +466,8 @@
 	nte->timeout.expires = jiffies + (NATTYPE_TIMEOUT  * HZ);
 	add_timer(&nte->timeout);
 	list_add(&nte->list, &nattype_list);
+	ct->nattype_entry = (unsigned long)nte;
+	nte->is_valid = 1;
 	spin_unlock_bh(&nattype_lock);
 	nattype_nte_debug_print(nte, "ADD");
 	return XT_CONTINUE;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 729f157..13925ac 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -833,6 +833,10 @@
 #ifdef CONFIG_NF_CONNTRACK_SECMARK
 		ct->secmark = exp->master->secmark;
 #endif
+/* Intialize the NAT type entry. */
+#if defined(CONFIG_IP_NF_TARGET_NATTYPE_MODULE)
+		ct->nattype_entry = 0;
+#endif
 		nf_conntrack_get(&ct->master->ct_general);
 		NF_CT_STAT_INC(net, expect_new);
 	} else {
@@ -1095,6 +1099,11 @@
 			mod_timer_pending(&ct->timeout, newtime);
 	}
 
+/* Refresh the NAT type entry. */
+#if defined(CONFIG_IP_NF_TARGET_NATTYPE_MODULE)
+	(void)nattype_refresh_timer(ct->nattype_entry);
+#endif
+
 acct:
 	if (do_acct) {
 		struct nf_conn_counter *acct;