Merge "msm: ADSPRPC: Fix for null-ptr-dereference"
diff --git a/Documentation/devicetree/bindings/batterydata/batterydata.txt b/Documentation/devicetree/bindings/batterydata/batterydata.txt
new file mode 100644
index 0000000..39f9375
--- /dev/null
+++ b/Documentation/devicetree/bindings/batterydata/batterydata.txt
@@ -0,0 +1,221 @@
+Battery Profile Data
+
+Battery Data is a collection of battery profile data made available to
+the QPNP Charger and BMS drivers via device tree.
+
+qcom,battery-data node required properties:
+- qcom,rpull-up-kohm : The vadc pullup resistor's resistance value in kOhms.
+- qcom,vref-batt-therm-uv : The vadc voltage used to make readings.
+			For Qualcomm Technologies, Inc. VADCs, this should be
+			1800000uV.
+
+qcom,battery-data node optional properties:
+- qcom,batt-id-range-pct : The area of variation between upper and lower bound
+			for which a given battery ID resistance is valid. This
+			value is expressed as a percentage of the specified kohm
+			resistance provided by qcom,batt-id-kohm.
+
+qcom,battery-data can also include any number of children nodes. These children
+nodes will be treated as battery profile data nodes.
+
+Profile data node required properties:
+- qcom,fcc-mah : Full charge count of the battery in milliamp-hours
+- qcom,default-rbatt-mohm : The nominal battery resistance value
+- qcom,rbatt-capacitive-mohm : The capacitive resistance of the battery.
+- qcom,flat-ocv-threshold-uv : The threshold under which the battery can be
+			considered to be in the flat portion of the discharge
+			curve.
+- qcom,max-voltage-uv : The maximum rated voltage of the battery
+- qcom,v-cutoff-uv : The cutoff voltage of the battery at which the device
+			should shutdown gracefully.
+- qcom,chg-term-ua : The termination charging current of the battery.
+- qcom,batt-id-kohm : The battery id resistance of the battery. It can be
+			used as an array which could support multiple IDs for one battery
+			module when the ID resistance of some battery modules goes across
+			several ranges.
+- qcom,battery-type : A string indicating the type of battery.
+- qcom,fg-profile-data : An array of hexadecimal values used to configure more
+			complex fuel gauge peripherals which have a large amount
+			of coefficients used in hardware state machines and thus
+			influencing the final output of the state of charge read
+			by software.
+
+Profile data node optional properties:
+- qcom,chg-rslow-comp-c1 : A constant for rslow compensation in the fuel gauge.
+			This will be provided by the profiling tool for
+			additional fuel gauge accuracy during charging.
+- qcom,chg-rslow-comp-c2 : A constant for rslow compensation in the fuel gauge.
+			This will be provided by the profiling tool for
+			additional fuel gauge accuracy during charging.
+- qcom,chg-rslow-comp-thr : A constant for rslow compensation in the fuel gauge.
+			This will be provided by the profiling tool for
+			additional fuel gauge accuracy during charging.
+- qcom,chg-rs-to-rslow: A constant for rslow compensation in the fuel gauge.
+			This will be provided by the profiling tool for
+			additional fuel gauge accuracy during charging.
+- qcom,fastchg-current-ma: Specifies the maximum fastcharge current.
+- qcom,fg-cc-cv-threshold-mv: Voltage threshold in mV for transition from constant
+			charge (CC) to constant voltage (CV). This value should
+			be 10 mV less than the float voltage.
+			This property should only be specified if
+			"qcom,autoadjust-vfloat" property is specified in the
+			charger driver to ensure a proper operation.
+- qcom,thermal-coefficients: Byte array of thermal coefficients for reading
+			battery thermistor. This should be exactly 6 bytes
+			in length.
+			Example: [01 02 03 04 05 06]
+
+Profile data node required subnodes:
+- qcom,fcc-temp-lut : An 1-dimensional lookup table node that encodes
+			temperature to fcc lookup. The units for this lookup
+			table should be degrees celsius to milliamp-hours.
+- qcom,pc-temp-ocv-lut : A 2-dimensional lookup table node that encodes
+			temperature and percent charge to open circuit voltage
+			lookup. The units for this lookup table should be
+			degrees celsius and percent to millivolts.
+- qcom,rbatt-sf-lut : A 2-dimentional lookup table node that encodes
+			temperature and percent charge to battery internal
+			resistance lookup. The units for this lookup table
+			should be degrees celsius and percent to milliohms.
+
+Profile data node optional subnodes:
+- qcom,ibat-acc-luit: A 2-dimentional lookup table that encodes temperature
+			and battery current to battery ACC (apparent charge
+			capacity). The units for this lookup table should be
+			temperature in degrees celsius, ibat in milli-amps
+			and ACC in milli-ampere-hour.
+
+Lookup table required properties:
+- qcom,lut-col-legend : An array that encodes the legend of the lookup table's
+			columns. The length of this array will determine the
+			lookup table's width.
+- qcom,lut-data : An array that encodes the lookup table's data. The size of this
+			array should be equal to the size of qcom,lut-col-legend
+			multiplied by 1 if it's a 1-dimensional table, or
+			the size of qcom,lut-row-legend if it's a 2-dimensional
+			table. The data should be in a flattened row-major
+			representation.
+
+Lookup table optional properties:
+- qcom,lut-row-legend : An array that encodes the legend of the lookup table's rows.
+			If this property exists, then it is assumed that the
+			lookup table is a 2-dimensional table.
+
+Example:
+
+In msm8974-mtp.dtsi:
+
+mtp_batterydata: qcom,battery-data {
+	qcom,rpull-up-kohm = <100>;
+	qcom,vref-batt-therm-uv = <1800000>;
+
+	/include/ "batterydata-palladium.dtsi"
+	/include/ "batterydata-mtp-3000mah.dtsi"
+};
+
+&pm8941_bms {
+	qcom,battery-data = <&mtp_batterydata>;
+};
+
+In batterydata-palladium.dtsi:
+
+qcom,palladium-batterydata {
+	qcom,fcc-mah = <1500>;
+	qcom,default-rbatt-mohm = <236>;
+	qcom,rbatt-capacitive-mohm = <50>;
+	qcom,flat-ocv-threshold-uv = <3800000>;
+	qcom,max-voltage-uv = <4200000>;
+	qcom,v-cutoff-uv = <3400000>;
+	qcom,chg-term-ua = <100000>;
+	qcom,batt-id-kohm = <75>;
+	qcom,battery-type = "palladium_1500mah";
+
+	qcom,fcc-temp-lut {
+		qcom,lut-col-legend = <(-20) 0 25 40 65>;
+		qcom,lut-data = <1492 1492 1493 1483 1502>;
+	};
+
+	qcom,pc-temp-ocv-lut {
+		qcom,lut-col-legend = <(-20) 0 25 40 65>;
+		qcom,lut-row-legend = <100 95 90 85 80 75 70>,
+				<65 60 55 50 45 40 35>,
+				<30 25 20 15 10 9 8>,
+				<7 6 5 4 3 2 1 0>;
+		qcom,lut-data = <4173 4167 4163 4156 4154>,
+			<4104 4107 4108 4102 4104>,
+			<4057 4072 4069 4061 4060>,
+			<3973 4009 4019 4016 4020>,
+			<3932 3959 3981 3982 3983>,
+			<3899 3928 3954 3950 3950>,
+			<3868 3895 3925 3921 3920>,
+			<3837 3866 3898 3894 3892>,
+			<3812 3841 3853 3856 3862>,
+			<3794 3818 3825 3823 3822>,
+			<3780 3799 3804 3804 3803>,
+			<3768 3787 3790 3788 3788>,
+			<3757 3779 3778 3775 3776>,
+			<3747 3772 3771 3766 3765>,
+			<3736 3763 3766 3760 3746>,
+			<3725 3749 3756 3747 3729>,
+			<3714 3718 3734 3724 3706>,
+			<3701 3703 3696 3689 3668>,
+			<3675 3695 3682 3675 3662>,
+			<3670 3691 3680 3673 3661>,
+			<3661 3686 3679 3672 3656>,
+			<3649 3680 3676 3669 3641>,
+			<3633 3669 3667 3655 3606>,
+			<3610 3647 3640 3620 3560>,
+			<3580 3607 3596 3572 3501>,
+			<3533 3548 3537 3512 3425>,
+			<3457 3468 3459 3429 3324>,
+			<3328 3348 3340 3297 3172>,
+			<3000 3000 3000 3000 3000>;
+	};
+
+	qcom,rbatt-sf-lut {
+		qcom,lut-col-legend = <(-20) 0 25 40 65>;
+		qcom,lut-row-legend = <100 95 90 85 80 75 70>,
+				<65 60 55 50 45 40 35>,
+				<30 25 20 15 10 9 8>,
+				<7 6 5 4 3 2 1 0>;
+		qcom,lut-data = <357 187 100 91 91>,
+			<400 208 105 94 94>,
+			<390 204 106 95 96>,
+			<391 201 108 98 98>,
+			<391 202 110 98 100>,
+			<390 200 110 99 102>,
+			<389 200 110 99 102>,
+			<393 202 101 93 100>,
+			<407 205 99 89 94>,
+			<428 208 100 91 96>,
+			<455 212 102 92 98>,
+			<495 220 104 93 101>,
+			<561 232 107 95 102>,
+			<634 245 112 98 98>,
+			<714 258 114 98 98>,
+			<791 266 114 97 100>,
+			<871 289 108 95 97>,
+			<973 340 124 108 105>,
+			<489 241 109 96 99>,
+			<511 246 110 96 99>,
+			<534 252 111 95 98>,
+			<579 263 112 96 96>,
+			<636 276 111 95 97>,
+			<730 294 109 96 99>,
+			<868 328 112 98 104>,
+			<1089 374 119 101 115>,
+			<1559 457 128 105 213>,
+			<12886 1026 637 422 3269>,
+			<170899 127211 98968 88907 77102>;
+	};
+
+	qcom,ibat-acc-lut {
+		qcom,lut-col-legend = <(-20) 0 25>;
+		qcom,lut-row-legend = <0 250 500 1000>;
+		qcom,lut-data = <1470 1470 1473>,
+				<1406 1406 1430>,
+				<1247 1247 1414>,
+				<764 764 1338>;
+	};
+};
+
diff --git a/Documentation/devicetree/bindings/devfreq/arm-memlat-mon.txt b/Documentation/devicetree/bindings/devfreq/arm-memlat-mon.txt
index 6f2fac7..3786412 100644
--- a/Documentation/devicetree/bindings/devfreq/arm-memlat-mon.txt
+++ b/Documentation/devicetree/bindings/devfreq/arm-memlat-mon.txt
@@ -4,7 +4,7 @@
 to measure the parameters for latency driven memory access patterns.
 
 Required properties:
-- compatible:			Must be "qcom,arm-memlat-mon"
+- compatible:			Must be "qcom,arm-memlat-mon" or "qcom,arm-cpu-mon"
 - qcom,cpulist:			List of CPU phandles to be monitored in a cluster
 - qcom,target-dev:		The DT device that corresponds to this master port
 - qcom,core-dev-table:		A mapping table of core frequency to a required bandwidth vote at the
diff --git a/Documentation/devicetree/bindings/regulator/mem-acc-regulator.txt b/Documentation/devicetree/bindings/regulator/mem-acc-regulator.txt
index 5515457..f4f549a 100644
--- a/Documentation/devicetree/bindings/regulator/mem-acc-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/mem-acc-regulator.txt
@@ -159,6 +159,21 @@
 				not specified, then "qcom,override-cornerX-reg-config" must contain a single
 				register configuration sequence list which is then applied unconditionally.
 				This property can only be specified if qcom,cornerX-reg-config property is already defined.
+- qcom,override-acc-range-fuse-list:	Array of tuples define the selection parameters used for selecting the override
+				mem-acc configuration. The fused values for these selection parameters are used by the
+				qcom,override-fuse-range-map to identify the correct set of override properties.
+				Each tuple contains 4 elements as defined below:
+				  [0] => the fuse row number of the selector
+				  [1] => LSB bit position of the bits
+				  [2] => number of bits
+				  [3] => fuse reading method, 0 for direct reading or 1 for SCM reading
+- qcom,override-fuse-range-map:	Array of tuples where each tuple specifies the allowed range for all the selection parameters
+				defined in qcom,override-acc-range-fuse-list. The fused values of these selection parameters
+				are compared against their allowed range in each tuple starting from 0th tuple and use the
+				first matched tuple index to select the right tuples from the other override properties.
+				Either qcom,override-fuse-range-map or qcom,override-fuse-version-map is used to select
+				the override configuration. The qcom,override-fuse-range-map is used if both the
+				properties are specified.
 
 mem_acc_vreg_corner: regulator@fd4aa044 {
 	compatible = "qcom,mem-acc-regulator";
@@ -184,6 +199,13 @@
 	qcom,override-fuse-version-map = <0>,
 					 <2>,
 					 <(-1)>;
+	qcom,override-acc-range-fuse-list =
+					<37 40 3 0>,
+					<36 30 8 0>;
+	qcom,override-fuse-range-map =
+				<0 0>, <  0   0>, <49 63>,
+				<1 1>, <  0   0>, <50 63>,
+				<0 1>, < 95 255>, < 0 63>;
 	qcom,override-corner-acc-map =	<0 0 1>,
 					<0 1 2>,
 					<0 1 1>;
diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt
index 6838afd..5d3b232 100644
--- a/Documentation/devicetree/bindings/usb/dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/dwc3.txt
@@ -59,6 +59,8 @@
  - snps,xhci-imod-value: Interrupt moderation interval for host mode
 	(in increments of 250nsec).
  - usb-core-id: Differentiates between different controllers present on a device.
+ - snps,bus-suspend-enable: If present then controller supports low power mode
+	during bus suspend.
 
 This is usually a subnode to DWC3 glue to which it is connected.
 
diff --git a/Makefile b/Makefile
index d4ee805..1e85d9b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 59
+SUBLEVEL = 60
 EXTRAVERSION =
 NAME = Roaring Lionus
 
diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-qrd.dtsi
new file mode 100644
index 0000000..6506f98
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-qrd.dtsi
@@ -0,0 +1,484 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	led_flash_rear: qcom,camera-flash@0 {
+		cell-index = <0>;
+		reg = <0x00 0x00>;
+		compatible = "qcom,camera-flash";
+		flash-source = <&pm660l_flash0 &pm660l_flash1>;
+		torch-source = <&pm660l_torch0 &pm660l_torch1>;
+		switch-source = <&pm660l_switch0>;
+		status = "ok";
+	};
+
+	actuator_regulator: gpio-regulator@0 {
+		compatible = "regulator-fixed";
+		reg = <0x00 0x00>;
+		regulator-name = "actuator_regulator";
+		regulator-min-microvolt = <2800000>;
+		regulator-max-microvolt = <2800000>;
+		regulator-enable-ramp-delay = <100>;
+		enable-active-high;
+		gpio = <&tlmm 27 0>;
+		vin-supply = <&pm660l_bob>;
+	};
+
+	cam_avdd_gpio_regulator: gpio-regulator@1 {
+		compatible = "regulator-fixed";
+		reg = <0x01 0x00>;
+		regulator-name = "cam_avdd_gpio_regulator";
+		regulator-min-microvolt = <2850000>;
+		regulator-max-microvolt = <2850000>;
+		regulator-enable-ramp-delay = <135>;
+		enable-active-high;
+		gpio = <&tlmm 100 0>;
+		vin-supply = <&pm660l_bob>;
+	};
+
+	cam_dvdd_gpio_regulator: gpio-regulator@2 {
+		compatible = "regulator-fixed";
+		reg = <0x02 0x00>;
+		regulator-name = "cam_dvdd_gpio_regulator";
+		regulator-min-microvolt = <1200000>;
+		regulator-max-microvolt = <1200000>;
+		regulator-enable-ramp-delay = <233>;
+		enable-active-high;
+		gpio = <&pm660l_gpios 4 0>;
+		vin-supply = <&pm660_s6>;
+	};
+
+	cam_iovdd_gpio_regulator: gpio-regulator@3 {
+		compatible = "regulator-fixed";
+		reg = <0x03 0x00>;
+		regulator-name = "cam_iovdd_gpio_regulator";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		regulator-enable-ramp-delay = <233>;
+		enable-active-high;
+		gpio = <&tlmm 29 0>;
+		vin-supply = <&pm660_s4>;
+	};
+
+	cam_rear_avdd_gpio_regulator: gpio-regulator@4 {
+		compatible = "regulator-fixed";
+		reg = <0x04 0x00>;
+		regulator-name = "cam_rear_avdd_gpio_regulator";
+		regulator-min-microvolt = <2850000>;
+		regulator-max-microvolt = <2850000>;
+		regulator-enable-ramp-delay = <233>;
+		enable-active-high;
+		gpio = <&tlmm 8 0>;
+		vin-supply = <&pm660l_bob>;
+	};
+
+	cam_rear_dvdd_gpio_regulator: gpio-regulator@5 {
+		compatible = "regulator-fixed";
+		reg = <0x05 0x00>;
+		regulator-name = "cam_rear_dvdd_gpio_regulator";
+		regulator-min-microvolt = <1200000>;
+		regulator-max-microvolt = <1200000>;
+		regulator-enable-ramp-delay = <233>;
+		enable-active-high;
+		gpio = <&pm660l_gpios 3 0>;
+		vin-supply = <&pm660_s6>;
+	};
+};
+
+&tlmm {
+	cam_sensor_rear_active: cam_sensor_rear_active {
+		/* RESET */
+		mux {
+			pins = "gpio30";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio30";
+			bias-disable; /* No PULL */
+			drive-strength = <2>; /* 2 MA */
+		};
+	};
+
+	cam_sensor_rear_suspend: cam_sensor_rear_suspend {
+		/* RESET */
+		mux {
+			pins = "gpio30";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio30";
+			bias-disable; /* No PULL */
+			drive-strength = <2>; /* 2 MA */
+		};
+	};
+
+	cam_sensor_rear2_active: cam_sensor_rear2_active {
+		/* RESET */
+		mux {
+			pins = "gpio9";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio9";
+			bias-disable; /* No PULL */
+			drive-strength = <2>; /* 2 MA */
+		};
+	};
+
+	cam_sensor_rear2_suspend: cam_sensor_rear2_suspend {
+		/* RESET */
+		mux {
+			pins = "gpio9";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio9";
+			bias-disable; /* No PULL */
+			drive-strength = <2>; /* 2 MA */
+		};
+	};
+
+	cam_sensor_front_active: cam_sensor_front_active {
+		/* RESET */
+		mux {
+			pins = "gpio28";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio28";
+			bias-disable; /* No PULL */
+			drive-strength = <2>; /* 2 MA */
+		};
+	};
+
+	cam_sensor_front_suspend: cam_sensor_front_suspend {
+		/* RESET */
+		mux {
+			pins = "gpio28";
+			function = "gpio";
+		};
+
+		config {
+			pins = "gpio28";
+			bias-disable; /* No PULL */
+			drive-strength = <2>; /* 2 MA */
+		};
+	};
+};
+
+&cam_cci {
+	actuator_rear: qcom,actuator@0 {
+		cell-index = <0>;
+		reg = <0x0>;
+		compatible = "qcom,actuator";
+		cci-master = <0>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
+	};
+
+	actuator_rear_aux: qcom,actuator@1 {
+		cell-index = <1>;
+		reg = <0x1>;
+		compatible = "qcom,actuator";
+		cci-master = <1>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
+	};
+
+	actuator_front: qcom,actuator@2 {
+		cell-index = <2>;
+		reg = <0x2>;
+		compatible = "qcom,actuator";
+		cci-master = <1>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
+	};
+
+	eeprom_rear: qcom,eeprom@0 {
+		cell-index = <0>;
+		reg = <0>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&cam_iovdd_gpio_regulator>;
+		cam_vana-supply = <&cam_rear_avdd_gpio_regulator>;
+		cam_vdig-supply = <&cam_rear_dvdd_gpio_regulator>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 0 0 0>;
+		rgltr-max-voltage = <0 0 0 0>;
+		rgltr-load-current = <0 0 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_rear_active>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_rear_suspend>;
+		gpios = <&tlmm 13 0>,
+			<&tlmm 30 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET0";
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	eeprom_rear_aux: qcom,eeprom@1 {
+		cell-index = <1>;
+		reg = <0x1>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&cam_iovdd_gpio_regulator>;
+		cam_vana-supply = <&cam_avdd_gpio_regulator>;
+		cam_vdig-supply = <&cam_dvdd_gpio_regulator>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 0 0 0>;
+		rgltr-max-voltage = <0 0 0 0>;
+		rgltr-load-current = <0 0 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				 &cam_sensor_front_active>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				 &cam_sensor_front_suspend>;
+		gpios = <&tlmm 14 0>,
+			<&tlmm 28 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1";
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	eeprom_front: qcom,eeprom@2 {
+		cell-index = <2>;
+		reg = <0x2>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&cam_iovdd_gpio_regulator>;
+		cam_vana-supply = <&cam_avdd_gpio_regulator>;
+		cam_vdig-supply = <&cam_dvdd_gpio_regulator>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 0 0 0>;
+		rgltr-max-voltage = <0 0 0 0>;
+		rgltr-load-current = <0 0 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_active
+				 &cam_sensor_rear2_active>;
+		pinctrl-1 = <&cam_sensor_mclk2_suspend
+				 &cam_sensor_rear2_suspend>;
+		gpios = <&tlmm 15 0>,
+			<&tlmm 9 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
+					"CAM_RESET2";
+		sensor-position = <1>;
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor@0 {
+		cell-index = <0>;
+		compatible = "qcom,cam-sensor";
+		reg = <0x0>;
+		csiphy-sd-index = <0>;
+		sensor-position-roll = <90>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		led-flash-src = <&led_flash_rear>;
+		actuator-src = <&actuator_rear>;
+		eeprom-src = <&eeprom_rear>;
+		cam_vio-supply = <&cam_iovdd_gpio_regulator>;
+		cam_vana-supply = <&cam_rear_avdd_gpio_regulator>;
+		cam_vdig-supply = <&cam_rear_dvdd_gpio_regulator>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 0 0 0>;
+		rgltr-max-voltage = <0 0 0 0>;
+		rgltr-load-current = <0 0 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_rear_active>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_rear_suspend>;
+		gpios = <&tlmm 13 0>,
+			<&tlmm 30 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET0";
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor@1 {
+		cell-index = <1>;
+		compatible = "qcom,cam-sensor";
+		reg = <0x1>;
+		csiphy-sd-index = <1>;
+		sensor-position-roll = <90>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		led-flash-src = <&led_flash_rear>;
+		actuator-src = <&actuator_rear_aux>;
+		eeprom-src = <&eeprom_rear_aux>;
+		cam_vio-supply = <&cam_iovdd_gpio_regulator>;
+		cam_vana-supply = <&cam_avdd_gpio_regulator>;
+		cam_vdig-supply = <&cam_dvdd_gpio_regulator>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 0 0 0>;
+		rgltr-max-voltage = <0 0 0 0>;
+		rgltr-load-current = <0 0 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				&cam_sensor_front_active>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				&cam_sensor_front_suspend>;
+		gpios = <&tlmm 14 0>,
+			<&tlmm 28 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor@2 {
+		cell-index = <2>;
+		compatible = "qcom,cam-sensor";
+		reg = <0x02>;
+		csiphy-sd-index = <2>;
+		sensor-position-roll = <270>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <0>;
+		eeprom-src = <&eeprom_front>;
+		actuator-src = <&actuator_front>;
+		cam_vio-supply = <&cam_iovdd_gpio_regulator>;
+		cam_vana-supply = <&cam_avdd_gpio_regulator>;
+		cam_vdig-supply = <&cam_dvdd_gpio_regulator>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 0 0 0>;
+		rgltr-max-voltage = <0 0 0 0>;
+		rgltr-load-current = <0 0 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_active
+				 &cam_sensor_rear2_active>;
+		pinctrl-1 = <&cam_sensor_mclk2_suspend
+				 &cam_sensor_rear2_suspend>;
+		gpios = <&tlmm 15 0>,
+			<&tlmm 9 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
+					"CAM_RESET2";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+};
+
+&pm660l_gpios {
+	gpio@c300 { /* GPIO4 -CAMERA SENSOR 1/2 VDIG*/
+		qcom,mode = <1>;                /* Output */
+		qcom,pull = <5>;                /* No Pull */
+		qcom,vin-sel = <0>;             /* VIN1 GPIO_LV */
+		qcom,src-sel = <0>;             /* GPIO */
+		qcom,invert = <0>;              /* Invert */
+		qcom,master-en = <1>;           /* Enable GPIO */
+		status = "ok";
+	};
+
+	gpio@c200 { /* GPIO3 -CAMERA SENSOR 0 VDIG*/
+		qcom,mode = <1>;                /* Output */
+		qcom,pull = <5>;                /* No Pull */
+		qcom,vin-sel = <0>;             /* VIN1 GPIO_LV */
+		qcom,src-sel = <0>;             /* GPIO */
+		qcom,invert = <0>;              /* Invert */
+		qcom,master-en = <1>;           /* Enable GPIO */
+		status = "ok";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
index 610e1f6..4be4880 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
@@ -11,6 +11,7 @@
  */
 
 #include <dt-bindings/gpio/gpio.h>
+#include "sdm670-camera-sensor-qrd.dtsi"
 #include "sdm670-pmic-overlay.dtsi"
 #include "sdm670-audio-overlay.dtsi"
 #include "smb1355.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr.dtsi
new file mode 100644
index 0000000..8ad5f3c
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr.dtsi
@@ -0,0 +1,379 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	led_flash_rear: qcom,camera-flash@0 {
+		cell-index = <0>;
+		reg = <0x00 0x00>;
+		compatible = "qcom,camera-flash";
+		flash-source = <&pmi8998_flash0 &pmi8998_flash1>;
+		torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
+		switch-source = <&pmi8998_switch0>;
+		status = "ok";
+	};
+
+	led_flash_front: qcom,camera-flash@1 {
+		cell-index = <1>;
+		reg = <0x01 0x00>;
+		compatible = "qcom,camera-flash";
+		flash-source = <&pmi8998_flash2>;
+		torch-source = <&pmi8998_torch2>;
+		switch-source = <&pmi8998_switch1>;
+		status = "ok";
+	};
+
+	actuator_regulator: gpio-regulator@0 {
+		compatible = "regulator-fixed";
+		reg = <0x00 0x00>;
+		regulator-name = "actuator_regulator";
+		regulator-min-microvolt = <2800000>;
+		regulator-max-microvolt = <2800000>;
+		regulator-enable-ramp-delay = <100>;
+		enable-active-high;
+		gpio = <&tlmm 27 0>;
+		vin-supply = <&pmi8998_bob>;
+	};
+
+	camera_rear_ldo: gpio-regulator@1 {
+		compatible = "regulator-fixed";
+		reg = <0x01 0x00>;
+		regulator-name = "camera_rear_ldo";
+		regulator-min-microvolt = <1050000>;
+		regulator-max-microvolt = <1050000>;
+		regulator-enable-ramp-delay = <135>;
+		enable-active-high;
+		gpio = <&pm8998_gpios 12 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&camera_rear_dvdd_en_default>;
+		vin-supply = <&pm8998_s3>;
+	};
+
+	camera_ldo: gpio-regulator@2 {
+		compatible = "regulator-fixed";
+		reg = <0x02 0x00>;
+		regulator-name = "camera_ldo";
+		regulator-min-microvolt = <1050000>;
+		regulator-max-microvolt = <1050000>;
+		regulator-enable-ramp-delay = <233>;
+		enable-active-high;
+		gpio = <&pm8998_gpios 9 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&camera_dvdd_en_default>;
+		vin-supply = <&pm8998_s3>;
+	};
+};
+
+&cam_cci {
+	actuator_rear: qcom,actuator@0 {
+		cell-index = <0>;
+		reg = <0x0>;
+		compatible = "qcom,actuator";
+		cci-master = <0>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
+	};
+
+	actuator_rear_aux: qcom,actuator@1 {
+		cell-index = <1>;
+		reg = <0x1>;
+		compatible = "qcom,actuator";
+		cci-master = <1>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
+	};
+
+	ois_rear: qcom,ois@0 {
+		cell-index = <0>;
+		reg = <0x0>;
+		compatible = "qcom,ois";
+		cci-master = <0>;
+		cam_vaf-supply = <&actuator_regulator>;
+		regulator-names = "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <2800000>;
+		rgltr-max-voltage = <2800000>;
+		rgltr-load-current = <0>;
+		status = "disabled";
+	};
+
+	eeprom_rear: qcom,eeprom@0 {
+		cell-index = <0>;
+		reg = <0>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&pm8998_lvs1>;
+		cam_vana-supply = <&pmi8998_bob>;
+		cam_vdig-supply = <&camera_rear_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 3312000 1050000 0>;
+		rgltr-max-voltage = <0 3600000 1050000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_rear_active>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_rear_suspend>;
+		gpios = <&tlmm 13 0>,
+			<&tlmm 80 0>,
+			<&tlmm 79 0>,
+			<&tlmm 27 0>;
+		gpio-reset = <1>;
+		gpio-vana = <2>;
+		gpio-vaf = <3>;
+		gpio-req-tbl-num = <0 1 2 3>;
+		gpio-req-tbl-flags = <1 0 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET0",
+					"CAM_VANA0",
+					"CAM_VAF";
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	eeprom_rear_aux: qcom,eeprom@1 {
+		cell-index = <1>;
+		reg = <0x1>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&pm8998_lvs1>;
+		cam_vana-supply = <&pmi8998_bob>;
+		cam_vdig-supply = <&camera_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 3312000 1050000 0>;
+		rgltr-max-voltage = <0 3600000 1050000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				 &cam_sensor_front_active>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				 &cam_sensor_front_suspend>;
+		gpios = <&tlmm 14 0>,
+			<&tlmm 28 0>,
+			<&tlmm 8 0>,
+			<&tlmm 27 0>;
+		gpio-reset = <1>;
+		gpio-vana = <2>;
+		gpio-vaf = <3>;
+		gpio-req-tbl-num = <0 1 2 3>;
+		gpio-req-tbl-flags = <1 0 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1",
+					"CAM_VANA1",
+					"CAM_VAF";
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	eeprom_front: qcom,eeprom@2 {
+		cell-index = <2>;
+		reg = <0x2>;
+		compatible = "qcom,eeprom";
+		cam_vdig-supply = <&camera_ldo>;
+		cam_vio-supply = <&pm8998_lvs1>;
+		cam_vana-supply = <&pmi8998_bob>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1050000 0 3312000 0>;
+		rgltr-max-voltage = <1050000 0 3600000 0>;
+		rgltr-load-current = <105000 0 80000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_active
+				 &cam_sensor_rear2_active>;
+		pinctrl-1 = <&cam_sensor_mclk2_suspend
+				 &cam_sensor_rear2_suspend>;
+		gpios = <&tlmm 15 0>,
+			<&tlmm 9 0>,
+			<&tlmm 8 0>;
+		gpio-reset = <1>;
+		gpio-vana = <2>;
+		gpio-req-tbl-num = <0 1 2>;
+		gpio-req-tbl-flags = <1 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
+					"CAM_RESET2",
+					"CAM_VANA2";
+		sensor-position = <1>;
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor@0 {
+		cell-index = <0>;
+		compatible = "qcom,cam-sensor";
+		reg = <0x0>;
+		csiphy-sd-index = <0>;
+		sensor-position-roll = <270>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		led-flash-src = <&led_flash_rear>;
+		actuator-src = <&actuator_rear>;
+		ois-src = <&ois_rear>;
+		eeprom-src = <&eeprom_rear>;
+		cam_vio-supply = <&pm8998_lvs1>;
+		cam_vana-supply = <&pmi8998_bob>;
+		cam_vdig-supply = <&camera_rear_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 3312000 1050000 0>;
+		rgltr-max-voltage = <0 3600000 1050000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_rear_active>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_rear_suspend>;
+		gpios = <&tlmm 13 0>,
+			<&tlmm 80 0>,
+			<&tlmm 79 0>;
+		gpio-reset = <1>;
+		gpio-vana = <2>;
+		gpio-req-tbl-num = <0 1 2>;
+		gpio-req-tbl-flags = <1 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET0",
+					"CAM_VANA";
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor@1 {
+		cell-index = <1>;
+		compatible = "qcom,cam-sensor";
+		reg = <0x1>;
+		csiphy-sd-index = <1>;
+		sensor-position-roll = <90>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		eeprom-src = <&eeprom_rear_aux>;
+		actuator-src = <&actuator_rear_aux>;
+		led-flash-src = <&led_flash_front>;
+		cam_vio-supply = <&pm8998_lvs1>;
+		cam_vana-supply = <&pmi8998_bob>;
+		cam_vdig-supply = <&camera_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 3312000 1050000 0>;
+		rgltr-max-voltage = <0 3600000 1050000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk1_active
+				 &cam_sensor_front_active>;
+		pinctrl-1 = <&cam_sensor_mclk1_suspend
+				 &cam_sensor_front_suspend>;
+		gpios = <&tlmm 14 0>,
+			<&tlmm 28 0>,
+			<&tlmm 8 0>;
+		gpio-reset = <1>;
+		gpio-vana = <2>;
+		gpio-req-tbl-num = <0 1 2>;
+		gpio-req-tbl-flags = <1 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
+					"CAM_RESET2",
+					"CAM_VANA1";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+	qcom,cam-sensor@2 {
+		cell-index = <2>;
+		compatible = "qcom,cam-sensor";
+		reg = <0x02>;
+		csiphy-sd-index = <2>;
+		sensor-position-roll = <270>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <0>;
+		eeprom-src = <&eeprom_front>;
+		cam_vdig-supply = <&camera_ldo>;
+		cam_vio-supply = <&pm8998_lvs1>;
+		cam_vana-supply = <&pmi8998_bob>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1050000 0 3312000 0>;
+		rgltr-max-voltage = <1050000 0 3600000 0>;
+		rgltr-load-current = <105000 0 80000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_active
+				&cam_sensor_rear2_active>;
+		pinctrl-1 = <&cam_sensor_mclk2_suspend
+				&cam_sensor_rear2_suspend>;
+		gpios = <&tlmm 15 0>,
+			<&tlmm 9 0>,
+			<&tlmm 8 0>;
+		gpio-reset = <1>;
+		gpio-vana = <2>;
+		gpio-req-tbl-num = <0 1 2>;
+		gpio-req-tbl-flags = <1 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK1",
+					"CAM_RESET1",
+					"CAM_VANA1";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-qvr-overlay.dts
index b1b81d1..58f5782 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-qvr-overlay.dts
@@ -22,6 +22,7 @@
 #include "sdm845-sde-display.dtsi"
 #include "sdm845-qvr.dtsi"
 #include "sdm845-qvr-audio-overlay.dtsi"
+#include "sdm845-camera-sensor-qvr.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM845 v2 QVR";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
index c06b806..5513c92 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
@@ -15,6 +15,7 @@
 
 #include "sdm845-v2.dtsi"
 #include "sdm845-qvr.dtsi"
+#include "sdm845-camera-sensor-qvr.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM845 QVR";
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 70963c8..fc0df0f 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -601,8 +601,7 @@
 		break;
 #endif
 	case KVM_CAP_PPC_HTM:
-		r = cpu_has_feature(CPU_FTR_TM_COMP) &&
-		    is_kvmppc_hv_enabled(kvm);
+		r = cpu_has_feature(CPU_FTR_TM_COMP) && hv_enabled;
 		break;
 	default:
 		r = 0;
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 1a11c03..f418c8e 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -1506,7 +1506,7 @@
 	int cid = fl->cid;
 	int interrupted = 0;
 	int err = 0;
-	struct timespec invoket;
+	struct timespec invoket = {0};
 
 	if (fl->profile)
 		getnstimeofday(&invoket);
diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c
index 5699ad6..6377677 100644
--- a/drivers/char/diag/diag_memorydevice.c
+++ b/drivers/char/diag/diag_memorydevice.c
@@ -199,6 +199,7 @@
 
 		found = 1;
 		driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
+		atomic_inc(&driver->data_ready_notif[i]);
 		pr_debug("diag: wake up logging process\n");
 		wake_up_interruptible(&driver->wait_q);
 	}
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 36f8546..c6d8b7c 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -25,6 +25,8 @@
 #include <linux/atomic.h>
 #include "diagfwd_bridge.h"
 
+#define THRESHOLD_CLIENT_LIMIT	50
+
 /* Size of the USB buffers used for read and write*/
 #define USB_MAX_OUT_BUF 4096
 #define APPS_BUF_SIZE	4096
@@ -525,6 +527,7 @@
 	wait_queue_head_t wait_q;
 	struct diag_client_map *client_map;
 	int *data_ready;
+	atomic_t data_ready_notif[THRESHOLD_CLIENT_LIMIT];
 	int num_clients;
 	int polling_reg_flag;
 	int use_device_tree;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 54e6486..5daa194 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -136,7 +136,6 @@
 
 /* This is the max number of user-space clients supported at initialization*/
 static unsigned int max_clients = 15;
-static unsigned int threshold_client_limit = 50;
 module_param(max_clients, uint, 0000);
 
 /* Timer variables */
@@ -324,7 +323,7 @@
 		if (i < driver->num_clients) {
 			diag_add_client(i, file);
 		} else {
-			if (i < threshold_client_limit) {
+			if (i < THRESHOLD_CLIENT_LIMIT) {
 				driver->num_clients++;
 				temp = krealloc(driver->client_map
 					, (driver->num_clients) * sizeof(struct
@@ -354,11 +353,17 @@
 			}
 		}
 		driver->data_ready[i] = 0x0;
+		atomic_set(&driver->data_ready_notif[i], 0);
 		driver->data_ready[i] |= MSG_MASKS_TYPE;
+		atomic_inc(&driver->data_ready_notif[i]);
 		driver->data_ready[i] |= EVENT_MASKS_TYPE;
+		atomic_inc(&driver->data_ready_notif[i]);
 		driver->data_ready[i] |= LOG_MASKS_TYPE;
+		atomic_inc(&driver->data_ready_notif[i]);
 		driver->data_ready[i] |= DCI_LOG_MASKS_TYPE;
+		atomic_inc(&driver->data_ready_notif[i]);
 		driver->data_ready[i] |= DCI_EVENT_MASKS_TYPE;
+		atomic_inc(&driver->data_ready_notif[i]);
 
 		if (driver->ref_count == 0)
 			diag_mempool_init();
@@ -1834,6 +1839,7 @@
 	}
 
 	driver->data_ready[i] |= DEINIT_TYPE;
+	atomic_inc(&driver->data_ready_notif[i]);
 	mutex_unlock(&driver->diagchar_mutex);
 	wake_up_interruptible(&driver->wait_q);
 
@@ -2966,16 +2972,6 @@
 	return 0;
 }
 
-static int check_data_ready(int index)
-{
-	int data_type = 0;
-
-	mutex_lock(&driver->diagchar_mutex);
-	data_type = driver->data_ready[index];
-	mutex_unlock(&driver->diagchar_mutex);
-	return data_type;
-}
-
 static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
 			  loff_t *ppos)
 {
@@ -3002,7 +2998,8 @@
 		pr_err("diag: bad address from user side\n");
 		return -EFAULT;
 	}
-	wait_event_interruptible(driver->wait_q, (check_data_ready(index)) > 0);
+	wait_event_interruptible(driver->wait_q,
+			atomic_read(&driver->data_ready_notif[index]) > 0);
 
 	mutex_lock(&driver->diagchar_mutex);
 
@@ -3013,6 +3010,7 @@
 		/*Copy the type of data being passed*/
 		data_type = driver->data_ready[index] & USER_SPACE_DATA_TYPE;
 		driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
 		COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
 		if (ret == -EFAULT)
 			goto exit;
@@ -3029,11 +3027,13 @@
 		 * memory device any more, the condition needs to be cleared.
 		 */
 		driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
 	}
 
 	if (driver->data_ready[index] & HDLC_SUPPORT_TYPE) {
 		data_type = driver->data_ready[index] & HDLC_SUPPORT_TYPE;
 		driver->data_ready[index] ^= HDLC_SUPPORT_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
 		COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
 		if (ret == -EFAULT)
 			goto exit;
@@ -3058,6 +3058,7 @@
 		if (ret == -EFAULT)
 			goto exit;
 		driver->data_ready[index] ^= DEINIT_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
 		mutex_unlock(&driver->diagchar_mutex);
 		diag_remove_client_entry(file);
 		return ret;
@@ -3075,6 +3076,7 @@
 		if (write_len > 0)
 			ret += write_len;
 		driver->data_ready[index] ^= MSG_MASKS_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
 		goto exit;
 	}
 
@@ -3101,6 +3103,7 @@
 				goto exit;
 		}
 		driver->data_ready[index] ^= EVENT_MASKS_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
 		goto exit;
 	}
 
@@ -3117,6 +3120,7 @@
 		if (write_len > 0)
 			ret += write_len;
 		driver->data_ready[index] ^= LOG_MASKS_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
 		goto exit;
 	}
 
@@ -3133,6 +3137,7 @@
 		if (ret == -EFAULT)
 			goto exit;
 		driver->data_ready[index] ^= PKT_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
 		driver->in_busy_pktdata = 0;
 		goto exit;
 	}
@@ -3150,6 +3155,7 @@
 			goto exit;
 
 		driver->data_ready[index] ^= DCI_PKT_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
 		driver->in_busy_dcipktdata = 0;
 		goto exit;
 	}
@@ -3171,6 +3177,7 @@
 			goto exit;
 
 		driver->data_ready[index] ^= DCI_EVENT_MASKS_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
 		goto exit;
 	}
 
@@ -3190,6 +3197,7 @@
 		if (ret == -EFAULT)
 			goto exit;
 		driver->data_ready[index] ^= DCI_LOG_MASKS_TYPE;
+		atomic_dec(&driver->data_ready_notif[index]);
 		goto exit;
 	}
 
@@ -3221,6 +3229,7 @@
 			exit_stat = diag_copy_dci(buf, count, entry, &ret);
 			mutex_lock(&driver->diagchar_mutex);
 			driver->data_ready[index] ^= DCI_DATA_TYPE;
+			atomic_dec(&driver->data_ready_notif[index]);
 			mutex_unlock(&driver->diagchar_mutex);
 			if (exit_stat == 1) {
 				mutex_unlock(&driver->dci_mutex);
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index cc64f47..25dc91b 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -226,6 +226,7 @@
 			 * situation.
 			 */
 			driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
+			atomic_inc(&driver->data_ready_notif[i]);
 			pr_debug("diag: Force wakeup of logging process\n");
 			wake_up_interruptible(&driver->wait_q);
 			break;
@@ -491,8 +492,10 @@
 
 	mutex_lock(&driver->diagchar_mutex);
 	for (i = 0; i < driver->num_clients; i++)
-		if (driver->client_map[i].pid != 0)
+		if (driver->client_map[i].pid != 0) {
 			driver->data_ready[i] |= type;
+			atomic_inc(&driver->data_ready_notif[i]);
+		}
 	wake_up_interruptible(&driver->wait_q);
 	mutex_unlock(&driver->diagchar_mutex);
 }
@@ -509,6 +512,8 @@
 					driver->client_map[j].pid ==
 					driver->md_session_map[i]->pid) {
 					driver->data_ready[j] |= type;
+					atomic_inc(
+						&driver->data_ready_notif[j]);
 					break;
 				}
 			}
@@ -524,6 +529,7 @@
 	for (i = 0; i < driver->num_clients; i++)
 		if (driver->client_map[i].pid == process_id) {
 			driver->data_ready[i] |= data_type;
+			atomic_inc(&driver->data_ready_notif[i]);
 			break;
 		}
 	wake_up_interruptible(&driver->wait_q);
@@ -1793,6 +1799,9 @@
 	}
 	kmemleak_not_leak(driver->data_ready);
 
+	for (i = 0; i < THRESHOLD_CLIENT_LIMIT; i++)
+		atomic_set(&driver->data_ready_notif[i], 0);
+
 	if (driver->apps_req_buf == NULL) {
 		driver->apps_req_buf = kzalloc(DIAG_MAX_REQ_SIZE, GFP_KERNEL);
 		if (!driver->apps_req_buf)
diff --git a/drivers/devfreq/arm-memlat-mon.c b/drivers/devfreq/arm-memlat-mon.c
index 9943c8c..1dca479 100644
--- a/drivers/devfreq/arm-memlat-mon.c
+++ b/drivers/devfreq/arm-memlat-mon.c
@@ -31,6 +31,7 @@
 #include "governor.h"
 #include "governor_memlat.h"
 #include <linux/perf_event.h>
+#include <linux/of_device.h>
 
 enum ev_index {
 	INST_IDX,
@@ -63,6 +64,10 @@
 	struct list_head mon_list;
 };
 
+struct memlat_mon_spec {
+	bool is_compute;
+};
+
 #define to_cpustats(cpu_grp, cpu) \
 	(&cpu_grp->cpustats[cpu - cpumask_first(&cpu_grp->cpus)])
 #define to_devstats(cpu_grp, cpu) \
@@ -96,6 +101,9 @@
 	unsigned long ev_count;
 	u64 total, enabled, running;
 
+	if (!event->pevent)
+		return 0;
+
 	total = perf_event_read_value(event->pevent, &enabled, &running);
 	ev_count = total - event->prev_count;
 	event->prev_count = total;
@@ -314,6 +322,7 @@
 	struct device *dev = &pdev->dev;
 	struct memlat_hwmon *hw;
 	struct cpu_grp_info *cpu_grp;
+	const struct memlat_mon_spec *spec;
 	int cpu, ret;
 	u32 event_id;
 
@@ -348,6 +357,22 @@
 
 	cpu_grp->event_ids[CYC_IDX] = CYC_EV;
 
+	for_each_cpu(cpu, &cpu_grp->cpus)
+		to_devstats(cpu_grp, cpu)->id = cpu;
+
+	hw->start_hwmon = &start_hwmon;
+	hw->stop_hwmon = &stop_hwmon;
+	hw->get_cnt = &get_cnt;
+
+	spec = of_device_get_match_data(dev);
+	if (spec && spec->is_compute) {
+		ret = register_compute(dev, hw);
+		if (ret)
+			pr_err("Compute Gov registration failed\n");
+
+		return ret;
+	}
+
 	ret = of_property_read_u32(dev->of_node, "qcom,cachemiss-ev",
 				   &event_id);
 	if (ret) {
@@ -372,24 +397,21 @@
 	else
 		cpu_grp->event_ids[STALL_CYC_IDX] = event_id;
 
-	for_each_cpu(cpu, &cpu_grp->cpus)
-		to_devstats(cpu_grp, cpu)->id = cpu;
-
-	hw->start_hwmon = &start_hwmon;
-	hw->stop_hwmon = &stop_hwmon;
-	hw->get_cnt = &get_cnt;
-
 	ret = register_memlat(dev, hw);
-	if (ret) {
+	if (ret)
 		pr_err("Mem Latency Gov registration failed\n");
-		return ret;
-	}
 
-	return 0;
+	return ret;
 }
 
+static const struct memlat_mon_spec spec[] = {
+	[0] = { false },
+	[1] = { true },
+};
+
 static const struct of_device_id memlat_match_table[] = {
-	{ .compatible = "qcom,arm-memlat-mon" },
+	{ .compatible = "qcom,arm-memlat-mon", .data = &spec[0] },
+	{ .compatible = "qcom,arm-cpu-mon", .data = &spec[1] },
 	{}
 };
 
diff --git a/drivers/devfreq/governor_memlat.c b/drivers/devfreq/governor_memlat.c
index 9688502..12a90d4 100644
--- a/drivers/devfreq/governor_memlat.c
+++ b/drivers/devfreq/governor_memlat.c
@@ -48,7 +48,8 @@
 static LIST_HEAD(memlat_list);
 static DEFINE_MUTEX(list_lock);
 
-static int use_cnt;
+static int memlat_use_cnt;
+static int compute_use_cnt;
 static DEFINE_MUTEX(state_lock);
 
 #define show_attr(name) \
@@ -240,8 +241,7 @@
 		if (hw->core_stats[i].mem_count)
 			ratio /= hw->core_stats[i].mem_count;
 
-		if (!hw->core_stats[i].inst_count
-		    || !hw->core_stats[i].freq)
+		if (!hw->core_stats[i].freq)
 			continue;
 
 		trace_memlat_dev_meas(dev_name(df->dev.parent),
@@ -280,16 +280,26 @@
 gov_attr(ratio_ceil, 1U, 10000U);
 gov_attr(stall_floor, 0U, 100U);
 
-static struct attribute *dev_attr[] = {
+static struct attribute *memlat_dev_attr[] = {
 	&dev_attr_ratio_ceil.attr,
 	&dev_attr_stall_floor.attr,
 	&dev_attr_freq_map.attr,
 	NULL,
 };
 
-static struct attribute_group dev_attr_group = {
+static struct attribute *compute_dev_attr[] = {
+	&dev_attr_freq_map.attr,
+	NULL,
+};
+
+static struct attribute_group memlat_dev_attr_group = {
 	.name = "mem_latency",
-	.attrs = dev_attr,
+	.attrs = memlat_dev_attr,
+};
+
+static struct attribute_group compute_dev_attr_group = {
+	.name = "compute",
+	.attrs = compute_dev_attr,
 };
 
 #define MIN_MS	10U
@@ -338,6 +348,12 @@
 	.event_handler = devfreq_memlat_ev_handler,
 };
 
+static struct devfreq_governor devfreq_gov_compute = {
+	.name = "compute",
+	.get_target_freq = devfreq_memlat_get_freq,
+	.event_handler = devfreq_memlat_ev_handler,
+};
+
 #define NUM_COLS	2
 static struct core_dev_map *init_core_dev_map(struct device *dev,
 		char *prop_name)
@@ -380,20 +396,17 @@
 	return tbl;
 }
 
-int register_memlat(struct device *dev, struct memlat_hwmon *hw)
+static struct memlat_node *register_common(struct device *dev,
+					   struct memlat_hwmon *hw)
 {
-	int ret = 0;
 	struct memlat_node *node;
 
 	if (!hw->dev && !hw->of_node)
-		return -EINVAL;
+		return ERR_PTR(-EINVAL);
 
 	node = devm_kzalloc(dev, sizeof(*node), GFP_KERNEL);
 	if (!node)
-		return -ENOMEM;
-
-	node->gov = &devfreq_gov_memlat;
-	node->attr_grp = &dev_attr_group;
+		return ERR_PTR(-ENOMEM);
 
 	node->ratio_ceil = 10;
 	node->hw = hw;
@@ -401,20 +414,68 @@
 	hw->freq_map = init_core_dev_map(dev, "qcom,core-dev-table");
 	if (!hw->freq_map) {
 		dev_err(dev, "Couldn't find the core-dev freq table!\n");
-		return -EINVAL;
+		return ERR_PTR(-EINVAL);
 	}
 
 	mutex_lock(&list_lock);
 	list_add_tail(&node->list, &memlat_list);
 	mutex_unlock(&list_lock);
 
+	return node;
+}
+
+int register_compute(struct device *dev, struct memlat_hwmon *hw)
+{
+	struct memlat_node *node;
+	int ret = 0;
+
+	node = register_common(dev, hw);
+	if (IS_ERR(node)) {
+		ret = PTR_ERR(node);
+		goto out;
+	}
+
 	mutex_lock(&state_lock);
-	if (!use_cnt)
-		ret = devfreq_add_governor(&devfreq_gov_memlat);
+	node->gov = &devfreq_gov_compute;
+	node->attr_grp = &compute_dev_attr_group;
+
+	if (!compute_use_cnt)
+		ret = devfreq_add_governor(&devfreq_gov_compute);
 	if (!ret)
-		use_cnt++;
+		compute_use_cnt++;
 	mutex_unlock(&state_lock);
 
+out:
+	if (!ret)
+		dev_info(dev, "Compute governor registered.\n");
+	else
+		dev_err(dev, "Compute governor registration failed!\n");
+
+	return ret;
+}
+
+int register_memlat(struct device *dev, struct memlat_hwmon *hw)
+{
+	struct memlat_node *node;
+	int ret = 0;
+
+	node = register_common(dev, hw);
+	if (IS_ERR(node)) {
+		ret = PTR_ERR(node);
+		goto out;
+	}
+
+	mutex_lock(&state_lock);
+	node->gov = &devfreq_gov_memlat;
+	node->attr_grp = &memlat_dev_attr_group;
+
+	if (!memlat_use_cnt)
+		ret = devfreq_add_governor(&devfreq_gov_memlat);
+	if (!ret)
+		memlat_use_cnt++;
+	mutex_unlock(&state_lock);
+
+out:
 	if (!ret)
 		dev_info(dev, "Memory Latency governor registered.\n");
 	else
diff --git a/drivers/devfreq/governor_memlat.h b/drivers/devfreq/governor_memlat.h
index f2ba534..6491c6c 100644
--- a/drivers/devfreq/governor_memlat.h
+++ b/drivers/devfreq/governor_memlat.h
@@ -74,10 +74,16 @@
 
 #ifdef CONFIG_DEVFREQ_GOV_MEMLAT
 int register_memlat(struct device *dev, struct memlat_hwmon *hw);
+int register_compute(struct device *dev, struct memlat_hwmon *hw);
 int update_memlat(struct memlat_hwmon *hw);
 #else
 static inline int register_memlat(struct device *dev,
-					struct memlat_hwmon *hw)
+				  struct memlat_hwmon *hw)
+{
+	return 0;
+}
+static inline int register_compute(struct device *dev,
+				   struct memlat_hwmon *hw)
 {
 	return 0;
 }
diff --git a/drivers/edac/kryo3xx_arm64_edac.c b/drivers/edac/kryo3xx_arm64_edac.c
index 5ca93a6..a300c7f 100644
--- a/drivers/edac/kryo3xx_arm64_edac.c
+++ b/drivers/edac/kryo3xx_arm64_edac.c
@@ -360,11 +360,34 @@
 	return IRQ_HANDLED;
 }
 
+static void initialize_registers(void *info)
+{
+	set_errxctlr_el1();
+	set_errxmisc_overflow();
+}
+
+static void init_regs_on_cpu(bool all_cpus)
+{
+	int cpu;
+
+	write_errselr_el1(0);
+	if (all_cpus) {
+		for_each_possible_cpu(cpu)
+			smp_call_function_single(cpu, initialize_registers,
+						NULL, 1);
+	} else
+		initialize_registers(NULL);
+
+	write_errselr_el1(1);
+	initialize_registers(NULL);
+}
+
 static int kryo3xx_pmu_cpu_pm_notify(struct notifier_block *self,
 				unsigned long action, void *v)
 {
 	switch (action) {
 	case CPU_PM_EXIT:
+		init_regs_on_cpu(false);
 		kryo3xx_check_l3_scu_error(panic_handler_drvdata->edev_ctl);
 		kryo3xx_check_l1_l2_ecc(panic_handler_drvdata->edev_ctl);
 		break;
@@ -373,23 +396,14 @@
 	return NOTIFY_OK;
 }
 
-static void initialize_registers(void *info)
-{
-	set_errxctlr_el1();
-	set_errxmisc_overflow();
-}
-
 static int kryo3xx_cpu_erp_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct erp_drvdata *drv;
 	int rc = 0;
 	int fail = 0;
-	int cpu;
 
-	for_each_possible_cpu(cpu)
-		smp_call_function_single(cpu, initialize_registers, NULL, 1);
-
+	init_regs_on_cpu(true);
 
 	drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
 
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 08cd0bd..3907439 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -825,7 +825,7 @@
 {
 	uint32_t reference_clock, tmp;
 	struct cgs_display_info info = {0};
-	struct cgs_mode_info mode_info;
+	struct cgs_mode_info mode_info = {0};
 
 	info.mode_info = &mode_info;
 
@@ -3718,10 +3718,9 @@
 	uint32_t ref_clock;
 	uint32_t refresh_rate = 0;
 	struct cgs_display_info info = {0};
-	struct cgs_mode_info mode_info;
+	struct cgs_mode_info mode_info = {0};
 
 	info.mode_info = &mode_info;
-
 	cgs_get_active_displays_info(hwmgr->device, &info);
 	num_active_displays = info.display_count;
 
@@ -3737,6 +3736,7 @@
 	frame_time_in_us = 1000000 / refresh_rate;
 
 	pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
+
 	data->frame_time_x2 = frame_time_in_us * 2 / 100;
 
 	display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index dc9d7f4..b4c115d 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -376,7 +376,7 @@
 	struct dsi_display *dsi_display = display;
 	struct dsi_panel *panel;
 	u32 status_mode;
-	int rc = 0;
+	int rc = 0x1;
 
 	if (dsi_display == NULL)
 		return -EINVAL;
@@ -385,6 +385,14 @@
 
 	status_mode = panel->esd_config.status_mode;
 
+	mutex_lock(&dsi_display->display_lock);
+
+	if (!panel->panel_initialized) {
+		pr_debug("Panel not initialized\n");
+		mutex_unlock(&dsi_display->display_lock);
+		return rc;
+	}
+
 	dsi_display_clk_ctrl(dsi_display->dsi_clk_handle,
 		DSI_ALL_CLKS, DSI_CLK_ON);
 
@@ -401,6 +409,7 @@
 
 	dsi_display_clk_ctrl(dsi_display->dsi_clk_handle,
 		DSI_ALL_CLKS, DSI_CLK_OFF);
+	mutex_unlock(&dsi_display->display_lock);
 
 	return rc;
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 653b469..bd77dce 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -4059,10 +4059,6 @@
 
 	SDE_DEBUG("crtc%d\n", crtc->base.id);
 
-	for (i = 0; i < cstate->num_connectors; i++)
-		sde_connector_schedule_status_work(cstate->connectors[i],
-							false);
-
 	if (sde_kms_is_suspend_state(crtc->dev))
 		_sde_crtc_set_suspend(crtc, true);
 
@@ -4159,15 +4155,13 @@
 	struct sde_crtc_irq_info *node = NULL;
 	struct drm_event event;
 	u32 power_on;
-	int ret, i;
-	struct sde_crtc_state *cstate;
+	int ret;
 
 	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
 		SDE_ERROR("invalid crtc\n");
 		return;
 	}
 	priv = crtc->dev->dev_private;
-	cstate = to_sde_crtc_state(crtc->state);
 
 	SDE_DEBUG("crtc%d\n", crtc->base.id);
 	SDE_EVT32_VERBOSE(DRMID(crtc));
@@ -4231,9 +4225,6 @@
 		SDE_POWER_EVENT_POST_ENABLE | SDE_POWER_EVENT_POST_DISABLE |
 		SDE_POWER_EVENT_PRE_DISABLE,
 		sde_crtc_handle_power_event, crtc, sde_crtc->name);
-
-	for (i = 0; i < cstate->num_connectors; i++)
-		sde_connector_schedule_status_work(cstate->connectors[i], true);
 }
 
 struct plane_state {
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 28eb4e6..b3e3077 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -2267,6 +2267,7 @@
 	struct msm_compression_info *comp_info = NULL;
 	struct drm_display_mode *cur_mode = NULL;
 	struct msm_mode_info mode_info;
+	struct drm_connector *drm_conn = NULL;
 
 	if (!drm_enc) {
 		SDE_ERROR("invalid encoder\n");
@@ -2342,6 +2343,10 @@
 		sde_enc->cur_master->ops.enable(sde_enc->cur_master);
 
 	_sde_encoder_virt_enable_helper(drm_enc);
+
+	/* Enable ESD thread */
+	drm_conn = sde_enc->cur_master->connector;
+	sde_connector_schedule_status_work(drm_conn, true);
 }
 
 static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
@@ -2349,6 +2354,7 @@
 	struct sde_encoder_virt *sde_enc = NULL;
 	struct msm_drm_private *priv;
 	struct sde_kms *sde_kms;
+	struct drm_connector *drm_conn = NULL;
 	int i = 0;
 
 	if (!drm_enc) {
@@ -2370,6 +2376,10 @@
 
 	SDE_EVT32(DRMID(drm_enc));
 
+	/* Disable ESD thread */
+	drm_conn = sde_enc->cur_master->connector;
+	sde_connector_schedule_status_work(drm_conn, false);
+
 	/* wait for idle */
 	sde_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
 
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index dac07b7..9cc3d5f 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -156,7 +156,7 @@
 	{A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
 	{A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
 	{A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
-	{A6XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+	{A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
 	{A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
 	{A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
 	{A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 681dce1..b8c50d8 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1240,6 +1240,7 @@
 	{ "ELAN0605", 0 },
 	{ "ELAN0609", 0 },
 	{ "ELAN060B", 0 },
+	{ "ELAN0611", 0 },
 	{ "ELAN1000", 0 },
 	{ }
 };
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index abf09ac..339a0e2 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -230,13 +230,17 @@
 
 	/* Walk  this report and pull out the info we need */
 	while (i < length) {
-		prefix = report[i];
-
-		/* Skip over prefix */
-		i++;
+		prefix = report[i++];
 
 		/* Determine data size and save the data in the proper variable */
-		size = PREF_SIZE(prefix);
+		size = (1U << PREF_SIZE(prefix)) >> 1;
+		if (i + size > length) {
+			dev_err(ddev,
+				"Not enough data (need %d, have %d)\n",
+				i + size, length);
+			break;
+		}
+
 		switch (size) {
 		case 1:
 			data = report[i];
@@ -244,8 +248,7 @@
 		case 2:
 			data16 = get_unaligned_le16(&report[i]);
 			break;
-		case 3:
-			size = 4;
+		case 4:
 			data32 = get_unaligned_le32(&report[i]);
 			break;
 		}
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
index bd073a1..66099e2 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
@@ -1269,14 +1269,14 @@
 	if (!soc_info)
 		return -EINVAL;
 
+	if (disble_irq)
+		rc |= cam_soc_util_irq_disable(soc_info);
+
 	if (disable_clocks)
 		cam_soc_util_clk_disable_default(soc_info);
 
 	cam_soc_util_regulator_disable_default(soc_info);
 
-	if (disble_irq)
-		rc |= cam_soc_util_irq_disable(soc_info);
-
 	if (soc_info->pinctrl_info.pinctrl &&
 		soc_info->pinctrl_info.gpio_state_suspend)
 		rc = pinctrl_select_state(soc_info->pinctrl_info.pinctrl,
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 68ef0a4..b0c8085 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -342,7 +342,7 @@
 
 	/* enter the selected mode */
 	mod_reg_val = readl(priv->base + SUN4I_REG_MSEL_ADDR);
-	if (priv->can.ctrlmode & CAN_CTRLMODE_PRESUME_ACK)
+	if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
 		mod_reg_val |= SUN4I_MSEL_LOOPBACK_MODE;
 	else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
 		mod_reg_val |= SUN4I_MSEL_LISTEN_ONLY_MODE;
@@ -811,7 +811,6 @@
 	priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING |
 				       CAN_CTRLMODE_LISTENONLY |
 				       CAN_CTRLMODE_LOOPBACK |
-				       CAN_CTRLMODE_PRESUME_ACK |
 				       CAN_CTRLMODE_3_SAMPLES;
 	priv->base = addr;
 	priv->clk = clk;
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index d51e0c4..4224e06 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -137,6 +137,7 @@
 #define CMD_RESET_ERROR_COUNTER		49
 #define CMD_TX_ACKNOWLEDGE		50
 #define CMD_CAN_ERROR_EVENT		51
+#define CMD_FLUSH_QUEUE_REPLY		68
 
 #define CMD_LEAF_USB_THROTTLE		77
 #define CMD_LEAF_LOG_MESSAGE		106
@@ -1301,6 +1302,11 @@
 			goto warn;
 		break;
 
+	case CMD_FLUSH_QUEUE_REPLY:
+		if (dev->family != KVASER_LEAF)
+			goto warn;
+		break;
+
 	default:
 warn:		dev_warn(dev->udev->dev.parent,
 			 "Unhandled message (%d)\n", msg->id);
@@ -1609,7 +1615,8 @@
 	if (err)
 		netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
 
-	if (kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel))
+	err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel);
+	if (err)
 		netdev_warn(netdev, "Cannot reset card, error %d\n", err);
 
 	err = kvaser_usb_stop_chip(priv);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index 7aa7ffd..a249567 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -86,7 +86,9 @@
 	__stringify(ADD_VLAN_IFACE),
 	__stringify(DEL_VLAN_IFACE),
 	__stringify(ADD_L2TP_VLAN_MAPPING),
-	__stringify(DEL_L2TP_VLAN_MAPPING)
+	__stringify(DEL_L2TP_VLAN_MAPPING),
+	__stringify(IPA_PER_CLIENT_STATS_CONNECT_EVENT),
+	__stringify(IPA_PER_CLIENT_STATS_DISCONNECT_EVENT),
 };
 
 const char *ipa_hdr_l2_type_name[] = {
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c
index dd59140..5228b2d 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1436,6 +1436,66 @@
 			start_ipv6_filter_idx),
 	},
 	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			rule_id_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			rule_id_len),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			rule_id),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			dst_pipe_id_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			dst_pipe_id_len),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= QMI_IPA_MAX_CLIENT_DST_PIPES_V01,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			dst_pipe_id),
+	},
+	{
 		.data_type	= QMI_EOTI,
 		.is_array	= NO_ARRAY,
 		.tlv_type	= QMI_COMMON_TLV_TYPE,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index fb7b3a7..d3c2ca3 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -1016,6 +1016,49 @@
 			break;
 		}
 		break;
+
+	case IPA_IOC_ADD_RT_RULE_EXT:
+		if (copy_from_user(header,
+				(const void __user *)arg,
+				sizeof(struct ipa_ioc_add_rt_rule_ext))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_add_rt_rule_ext *)header)->num_rules;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_rt_rule_ext) +
+		   pre_entry * sizeof(struct ipa_rt_rule_add_ext);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(
+			((struct ipa_ioc_add_rt_rule_ext *)param)->num_rules
+			!= pre_entry)) {
+			IPAERR(" prevent memory corruption(%d not match %d)\n",
+				((struct ipa_ioc_add_rt_rule_ext *)param)->
+				num_rules,
+				pre_entry);
+			retval = -EINVAL;
+			break;
+		}
+		if (ipa3_add_rt_rule_ext(
+			(struct ipa_ioc_add_rt_rule_ext *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
 	case IPA_IOC_ADD_RT_RULE_AFTER:
 		if (copy_from_user(header, (const void __user *)arg,
 			sizeof(struct ipa_ioc_add_rt_rule_after))) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 1539a0e..5da83e5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -70,7 +70,9 @@
 	__stringify(ADD_VLAN_IFACE),
 	__stringify(DEL_VLAN_IFACE),
 	__stringify(ADD_L2TP_VLAN_MAPPING),
-	__stringify(DEL_L2TP_VLAN_MAPPING)
+	__stringify(DEL_L2TP_VLAN_MAPPING),
+	__stringify(IPA_PER_CLIENT_STATS_CONNECT_EVENT),
+	__stringify(IPA_PER_CLIENT_STATS_DISCONNECT_EVENT),
 };
 
 const char *ipa3_hdr_l2_type_name[] = {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index f764a61..ad925c5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -433,6 +433,7 @@
 	int id;
 	u16 prio;
 	u16 rule_id;
+	u16 rule_id_valid;
 };
 
 /**
@@ -1760,6 +1761,8 @@
  */
 int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules);
 
+int ipa3_add_rt_rule_ext(struct ipa_ioc_add_rt_rule_ext *rules);
+
 int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules);
 
 int ipa3_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index e3a3821..1c8715a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -758,6 +758,57 @@
 		resp.resp.error, "ipa_install_filter");
 }
 
+/* sending ul-filter-install-request to modem*/
+int ipa3_qmi_ul_filter_request_send(
+	struct ipa_configure_ul_firewall_rules_req_msg_v01 *req)
+{
+	struct ipa_configure_ul_firewall_rules_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	IPAWANDBG("IPACM pass %u rules to Q6\n",
+		req->firewall_rules_list_len);
+
+	mutex_lock(&ipa3_qmi_lock);
+	if (ipa3_qmi_ctx != NULL) {
+		/* cache the qmi_filter_request */
+		memcpy(
+		&(ipa3_qmi_ctx->ipa_configure_ul_firewall_rules_req_msg_cache[
+		ipa3_qmi_ctx->num_ipa_configure_ul_firewall_rules_req_msg]),
+		req,
+		sizeof(struct
+		ipa_configure_ul_firewall_rules_req_msg_v01));
+		ipa3_qmi_ctx->num_ipa_configure_ul_firewall_rules_req_msg++;
+		ipa3_qmi_ctx->num_ipa_configure_ul_firewall_rules_req_msg %=
+			MAX_NUM_QMI_RULE_CACHE;
+	}
+	mutex_unlock(&ipa3_qmi_lock);
+
+	req_desc.max_msg_len =
+		QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_V01;
+	req_desc.ei_array =
+		ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei;
+
+	memset(&resp, 0,
+		sizeof(struct ipa_configure_ul_firewall_rules_resp_msg_v01));
+	resp_desc.max_msg_len =
+		QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_V01;
+	resp_desc.ei_array =
+		ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei;
+
+	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc,
+		req,
+		sizeof(
+		struct ipa_configure_ul_firewall_rules_req_msg_v01),
+		&resp_desc, &resp, sizeof(resp),
+		QMI_SEND_REQ_TIMEOUT_MS);
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_V01, resp.resp.result,
+		resp.resp.error, "ipa_received_ul_firewall_filter");
+}
+
 int ipa3_qmi_enable_force_clear_datapath_send(
 	struct ipa_enable_force_clear_datapath_req_msg_v01 *req)
 {
@@ -967,6 +1018,7 @@
 			       void *ind_cb_priv)
 {
 	struct ipa_data_usage_quota_reached_ind_msg_v01 qmi_ind;
+	struct ipa_configure_ul_firewall_rules_ind_msg_v01 qmi_ul_firewall_ind;
 	struct msg_desc qmi_ind_desc;
 	int rc = 0;
 
@@ -995,6 +1047,36 @@
 		ipa3_broadcast_quota_reach_ind(qmi_ind.apn.mux_id,
 			IPA_UPSTEAM_MODEM);
 	}
+
+	if (msg_id == QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_V01) {
+		memset(&qmi_ul_firewall_ind, 0, sizeof(
+			struct ipa_configure_ul_firewall_rules_ind_msg_v01));
+		qmi_ind_desc.max_msg_len =
+			QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_MAX_MSG_LEN_V01;
+		qmi_ind_desc.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_V01;
+		qmi_ind_desc.ei_array =
+			ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei;
+
+		rc = qmi_kernel_decode(
+			&qmi_ind_desc, &qmi_ul_firewall_ind, msg, msg_len);
+		if (rc < 0) {
+			IPAWANERR("Error decoding msg_id %d\n", msg_id);
+			return;
+		}
+
+		IPAWANDBG("UL firewall rules install indication on Q6");
+		if (qmi_ul_firewall_ind.result.is_success ==
+				QMI_IPA_UL_FIREWALL_STATUS_SUCCESS_V01) {
+			IPAWANDBG(" : Success\n");
+			IPAWANDBG
+			("Mux ID : %d\n", qmi_ul_firewall_ind.result.mux_id);
+		} else if (qmi_ul_firewall_ind.result.is_success ==
+				QMI_IPA_UL_FIREWALL_STATUS_FAILURE_V01){
+			IPAWANERR(": Failure\n");
+		} else {
+			IPAWANERR(": Unexpected Result");
+		}
+	}
 }
 
 static void ipa3_q6_clnt_svc_arrive(struct work_struct *work)
@@ -1446,6 +1528,74 @@
 		resp.resp.error, "ipa_stop_data_usage_quota_req_msg_v01");
 }
 
+int ipa3_qmi_enable_per_client_stats(
+	struct ipa_enable_per_client_stats_req_msg_v01 *req,
+	struct ipa_enable_per_client_stats_resp_msg_v01 *resp)
+{
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	req_desc.max_msg_len =
+		QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id =
+		QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01;
+	req_desc.ei_array =
+		ipa3_enable_per_client_stats_req_msg_data_v01_ei;
+
+	resp_desc.max_msg_len =
+		QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id =
+		QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_V01;
+	resp_desc.ei_array =
+		ipa3_enable_per_client_stats_resp_msg_data_v01_ei;
+
+	IPAWANDBG("Sending QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01\n");
+
+	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
+		sizeof(struct ipa_enable_per_client_stats_req_msg_v01),
+		&resp_desc, resp,
+		sizeof(struct ipa_enable_per_client_stats_resp_msg_v01),
+		QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+	IPAWANDBG("QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_V01 received\n");
+
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01, resp->resp.result,
+		resp->resp.error, "ipa3_qmi_enable_per_client_stats");
+}
+
+int ipa3_qmi_get_per_client_packet_stats(
+	struct ipa_get_stats_per_client_req_msg_v01 *req,
+	struct ipa_get_stats_per_client_resp_msg_v01 *resp)
+{
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	req_desc.max_msg_len = QMI_IPA_GET_STATS_PER_CLIENT_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01;
+	req_desc.ei_array = ipa3_get_stats_per_client_req_msg_data_v01_ei;
+
+	resp_desc.max_msg_len =
+		QMI_IPA_GET_STATS_PER_CLIENT_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_GET_STATS_PER_CLIENT_RESP_V01;
+	resp_desc.ei_array = ipa3_get_stats_per_client_resp_msg_data_v01_ei;
+
+	IPAWANDBG("Sending QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01\n");
+
+	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
+			sizeof(struct ipa_get_stats_per_client_req_msg_v01),
+			&resp_desc, resp,
+			sizeof(struct ipa_get_stats_per_client_resp_msg_v01),
+			QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+	IPAWANDBG("QMI_IPA_GET_STATS_PER_CLIENT_RESP_V01 received\n");
+
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01, resp->resp.result,
+		resp->resp.error,
+		"struct ipa_get_stats_per_client_req_msg_v01");
+}
+
 void ipa3_qmi_init(void)
 {
 	mutex_init(&ipa3_qmi_lock);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
index d3a4ba0..3351a33 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
@@ -32,54 +32,62 @@
 
 #define IPAWANDBG(fmt, args...) \
 	do { \
-		pr_debug(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+		pr_debug(DEV_NAME " %s:%d " fmt, __func__,\
+				__LINE__, ## args); \
 		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
-			DEV_NAME " %s:%d " fmt, ## args); \
+				DEV_NAME " %s:%d " fmt, ## args); \
 		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
-			DEV_NAME " %s:%d " fmt, ## args); \
+				DEV_NAME " %s:%d " fmt, ## args); \
 	} while (0)
 
 
 #define IPAWANDBG_LOW(fmt, args...) \
 	do { \
-		pr_debug(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+		pr_debug(DEV_NAME " %s:%d " fmt, __func__,\
+				__LINE__, ## args); \
 		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
-			DEV_NAME " %s:%d " fmt, ## args); \
+				DEV_NAME " %s:%d " fmt, ## args); \
 	} while (0)
 
 #define IPAWANERR(fmt, args...) \
 	do { \
-		pr_err(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+		pr_err(DEV_NAME " %s:%d " fmt, __func__,\
+				__LINE__, ## args); \
 		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
-			DEV_NAME " %s:%d " fmt, ## args); \
+				DEV_NAME " %s:%d " fmt, ## args); \
 		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
-			DEV_NAME " %s:%d " fmt, ## args); \
+				DEV_NAME " %s:%d " fmt, ## args); \
 	} while (0)
 
 #define IPAWANINFO(fmt, args...) \
 	do { \
-		pr_info(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+		pr_info(DEV_NAME " %s:%d " fmt, __func__,\
+				__LINE__, ## args); \
 		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
-			DEV_NAME " %s:%d " fmt, ## args); \
+				DEV_NAME " %s:%d " fmt, ## args); \
 		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
-			DEV_NAME " %s:%d " fmt, ## args); \
+				DEV_NAME " %s:%d " fmt, ## args); \
 	} while (0)
 
 extern struct ipa3_qmi_context *ipa3_qmi_ctx;
 
 struct ipa3_qmi_context {
-struct ipa_ioc_ext_intf_prop q6_ul_filter_rule[MAX_NUM_Q6_RULE];
-u32 q6_ul_filter_rule_hdl[MAX_NUM_Q6_RULE];
-int num_ipa_install_fltr_rule_req_msg;
-struct ipa_install_fltr_rule_req_msg_v01
+	struct ipa_ioc_ext_intf_prop q6_ul_filter_rule[MAX_NUM_Q6_RULE];
+	u32 q6_ul_filter_rule_hdl[MAX_NUM_Q6_RULE];
+	int num_ipa_install_fltr_rule_req_msg;
+	struct ipa_install_fltr_rule_req_msg_v01
 		ipa_install_fltr_rule_req_msg_cache[MAX_NUM_QMI_RULE_CACHE];
-int num_ipa_install_fltr_rule_req_ex_msg;
-struct ipa_install_fltr_rule_req_ex_msg_v01
+	int num_ipa_install_fltr_rule_req_ex_msg;
+	struct ipa_install_fltr_rule_req_ex_msg_v01
 		ipa_install_fltr_rule_req_ex_msg_cache[MAX_NUM_QMI_RULE_CACHE];
-int num_ipa_fltr_installed_notif_req_msg;
-struct ipa_fltr_installed_notif_req_msg_v01
+	int num_ipa_fltr_installed_notif_req_msg;
+	struct ipa_fltr_installed_notif_req_msg_v01
 		ipa_fltr_installed_notif_req_msg_cache[MAX_NUM_QMI_RULE_CACHE];
-bool modem_cfg_emb_pipe_flt;
+	int num_ipa_configure_ul_firewall_rules_req_msg;
+	struct ipa_configure_ul_firewall_rules_req_msg_v01
+		ipa_configure_ul_firewall_rules_req_msg_cache
+			[MAX_NUM_QMI_RULE_CACHE];
+	bool modem_cfg_emb_pipe_flt;
 };
 
 struct ipa3_rmnet_mux_val {
@@ -95,16 +103,24 @@
 extern struct elem_info ipa3_init_modem_driver_resp_msg_data_v01_ei[];
 extern struct elem_info ipa3_indication_reg_req_msg_data_v01_ei[];
 extern struct elem_info ipa3_indication_reg_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_master_driver_init_complt_ind_msg_data_v01_ei[];
+
+extern struct elem_info
+	ipa3_master_driver_init_complt_ind_msg_data_v01_ei[];
 extern struct elem_info ipa3_install_fltr_rule_req_msg_data_v01_ei[];
 extern struct elem_info ipa3_install_fltr_rule_resp_msg_data_v01_ei[];
 extern struct elem_info ipa3_fltr_installed_notif_req_msg_data_v01_ei[];
-extern struct elem_info ipa3_fltr_installed_notif_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_enable_force_clear_datapath_req_msg_data_v01_ei[];
-extern struct elem_info ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_disable_force_clear_datapath_req_msg_data_v01_ei[];
+
+extern struct elem_info
+	ipa3_fltr_installed_notif_resp_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_enable_force_clear_datapath_req_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_disable_force_clear_datapath_req_msg_data_v01_ei[];
 extern struct elem_info
 	ipa3_disable_force_clear_datapath_resp_msg_data_v01_ei[];
+
 extern struct elem_info ipa3_config_req_msg_data_v01_ei[];
 extern struct elem_info ipa3_config_resp_msg_data_v01_ei[];
 extern struct elem_info ipa3_get_data_stats_req_msg_data_v01_ei[];
@@ -112,14 +128,44 @@
 extern struct elem_info ipa3_get_apn_data_stats_req_msg_data_v01_ei[];
 extern struct elem_info ipa3_get_apn_data_stats_resp_msg_data_v01_ei[];
 extern struct elem_info ipa3_set_data_usage_quota_req_msg_data_v01_ei[];
-extern struct elem_info ipa3_set_data_usage_quota_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_data_usage_quota_reached_ind_msg_data_v01_ei[];
-extern struct elem_info ipa3_stop_data_usage_quota_req_msg_data_v01_ei[];
-extern struct elem_info ipa3_stop_data_usage_quota_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei[];
-extern struct elem_info ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[];
-extern struct elem_info ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[];
+
+extern struct elem_info
+	ipa3_set_data_usage_quota_resp_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_data_usage_quota_reached_ind_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_stop_data_usage_quota_req_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_stop_data_usage_quota_resp_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_ul_firewall_rule_type_data_v01_ei[];
+extern struct elem_info
+	ipa3_ul_firewall_config_result_type_data_v01_ei[];
+extern struct
+	elem_info ipa3_per_client_stats_info_type_data_v01_ei[];
+extern struct elem_info
+	ipa3_enable_per_client_stats_req_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_enable_per_client_stats_resp_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_get_stats_per_client_req_msg_data_v01_ei[];
+
+extern struct elem_info
+	ipa3_get_stats_per_client_resp_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei[];
 
 /**
  * struct ipa3_rmnet_context - IPA rmnet context
@@ -148,6 +194,9 @@
 int ipa3_qmi_filter_request_ex_send(
 	struct ipa_install_fltr_rule_req_ex_msg_v01 *req);
 
+int ipa3_qmi_ul_filter_request_send(
+	struct ipa_configure_ul_firewall_rules_req_msg_v01 *req);
+
 /* sending filter-installed-notify-request to modem*/
 int ipa3_qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01
 		*req);
@@ -194,6 +243,16 @@
 	struct wan_ioctl_query_tether_stats_all *data);
 
 int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data);
+int rmnet_ipa3_set_lan_client_info(struct wan_ioctl_lan_client_info *data);
+
+int rmnet_ipa3_clear_lan_client_info(struct wan_ioctl_lan_client_info *data);
+
+int rmnet_ipa3_send_lan_client_msg(struct wan_ioctl_send_lan_client_msg *data);
+
+int rmnet_ipa3_enable_per_client_stats(bool *data);
+
+int rmnet_ipa3_query_per_client_stats(
+	struct wan_ioctl_query_per_client_stats *data);
 
 int ipa3_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
 	struct ipa_get_data_stats_resp_msg_v01 *resp);
@@ -210,6 +269,13 @@
 int ipa3_wwan_set_modem_perf_profile(int throughput);
 
 int ipa3_wwan_set_modem_state(struct wan_ioctl_notify_wan_state *state);
+int ipa3_qmi_enable_per_client_stats(
+	struct ipa_enable_per_client_stats_req_msg_v01 *req,
+	struct ipa_enable_per_client_stats_resp_msg_v01 *resp);
+
+int ipa3_qmi_get_per_client_packet_stats(
+	struct ipa_get_stats_per_client_req_msg_v01 *req,
+	struct ipa_get_stats_per_client_resp_msg_v01 *resp);
 
 void ipa3_qmi_init(void);
 
@@ -231,6 +297,12 @@
 	return -EPERM;
 }
 
+static inline int ipa3_qmi_ul_filter_request_send(
+	struct ipa_configure_ul_firewall_rules_req_msg_v01 *req)
+{
+	return -EPERM;
+}
+
 static inline int ipa3_qmi_filter_request_ex_send(
 	struct ipa_install_fltr_rule_req_ex_msg_v01 *req)
 {
@@ -328,16 +400,28 @@
 static inline void ipa3_q6_handshake_complete(bool ssr_bootup) { }
 
 static inline int ipa3_wwan_set_modem_perf_profile(int throughput)
+static inline int ipa3_qmi_enable_per_client_stats(
+	struct ipa_enable_per_client_stats_req_msg_v01 *req,
+	struct ipa_enable_per_client_stats_resp_msg_v01 *resp)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_qmi_get_per_client_packet_stats(
+	struct ipa_get_stats_per_client_req_msg_v01 *req,
+	struct ipa_get_stats_per_client_resp_msg_v01 *resp)
 {
 	return -EPERM;
 }
 
 static inline void ipa3_qmi_init(void)
 {
+
 }
 
 static inline void ipa3_qmi_cleanup(void)
 {
+
 }
 
 #endif /* CONFIG_RMNET_IPA3 */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
index d2d4158..703acd7 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
@@ -16,6 +16,8 @@
 
 #include <soc/qcom/msm_qmi_interface.h>
 
+#include "ipa_qmi_service.h"
+
 /* Type Definitions  */
 static struct elem_info ipa3_hdr_tbl_info_type_data_v01_ei[] = {
 	{
@@ -1756,6 +1758,36 @@
 			rule_id),
 	},
 	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			dst_pipe_id_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			dst_pipe_id_len),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= QMI_IPA_MAX_CLIENT_DST_PIPES_V01,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			dst_pipe_id),
+	},
+	{
 		.data_type	= QMI_EOTI,
 		.is_array	= NO_ARRAY,
 		.tlv_type	= QMI_COMMON_TLV_TYPE,
@@ -2923,3 +2955,432 @@
 		.tlv_type	= QMI_COMMON_TLV_TYPE,
 	},
 };
+
+struct elem_info ipa3_per_client_stats_info_type_data_v01_ei[] = {
+	{
+			.data_type	= QMI_UNSIGNED_4_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint32_t),
+			.is_array	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				client_id),
+	},
+	{
+			.data_type	= QMI_UNSIGNED_4_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint32_t),
+			.is_array	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				src_pipe_id),
+	},
+	{
+			.data_type	= QMI_UNSIGNED_8_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint64_t),
+			.is_array	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				num_ul_ipv4_bytes),
+
+	},
+	{
+			.data_type	= QMI_UNSIGNED_8_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint64_t),
+			.is_array	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				num_ul_ipv6_bytes),
+
+	},
+	{
+			.data_type	= QMI_UNSIGNED_8_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint64_t),
+			.is_array	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				num_dl_ipv4_bytes),
+
+	},
+	{
+			.data_type	= QMI_UNSIGNED_8_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint64_t),
+			.is_array	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				num_dl_ipv6_bytes),
+
+	},
+	{
+			.data_type	= QMI_UNSIGNED_4_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint32_t),
+			.is_array	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				num_ul_ipv4_pkts),
+
+	},
+	{
+			.data_type	= QMI_UNSIGNED_4_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint32_t),
+			.is_array	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				num_ul_ipv6_pkts),
+
+	},
+	{
+			.data_type	= QMI_UNSIGNED_4_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint32_t),
+			.is_array	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				num_dl_ipv4_pkts),
+
+	},
+	{
+			.data_type	= QMI_UNSIGNED_4_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint32_t),
+			.is_array	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_per_client_stats_info_type_v01,
+				num_dl_ipv6_pkts),
+
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_ul_firewall_rule_type_data_v01_ei[] = {
+	{
+			.data_type	= QMI_UNSIGNED_4_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint32_t),
+			.is_array	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_ul_firewall_rule_type_v01,
+				ip_type),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_filter_rule_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_ul_firewall_rule_type_v01,
+					filter_rule),
+		.ei_array	= ipa3_filter_rule_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_ul_firewall_config_result_type_data_v01_ei[] = {
+	{
+			.data_type	= QMI_UNSIGNED_4_BYTE,
+			.elem_len	= 1,
+			.elem_size	= sizeof(uint32_t),
+			.is_array	= NO_ARRAY,
+			.tlv_type	= QMI_COMMON_TLV_TYPE,
+			.offset		= offsetof(
+				struct ipa_ul_firewall_config_result_type_v01,
+				is_success),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ul_firewall_config_result_type_v01,
+			mux_id),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_enable_per_client_stats_req_msg_data_v01_ei[] = {
+	{
+				.data_type	= QMI_UNSIGNED_1_BYTE,
+				.elem_len	= 1,
+				.elem_size	= sizeof(uint8_t),
+				.is_array	= NO_ARRAY,
+				.tlv_type	= 0x01,
+				.offset		= offsetof(struct
+				ipa_enable_per_client_stats_req_msg_v01,
+				enable_per_client_stats),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_enable_per_client_stats_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_enable_per_client_stats_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_get_stats_per_client_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(
+			struct ipa_get_stats_per_client_req_msg_v01,
+			client_id),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_get_stats_per_client_req_msg_v01,
+			src_pipe_id),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_stats_per_client_req_msg_v01,
+			reset_stats_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_stats_per_client_req_msg_v01,
+			reset_stats),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_get_stats_per_client_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_get_stats_per_client_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_stats_per_client_resp_msg_v01,
+			per_client_stats_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_stats_per_client_resp_msg_v01,
+			per_client_stats_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_PER_CLIENTS_V01,
+		.elem_size	=
+			sizeof(struct ipa_per_client_stats_info_type_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_stats_per_client_resp_msg_v01,
+			per_client_stats_list),
+		.ei_array	=
+			ipa3_per_client_stats_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1,
+		.offset		= offsetof(
+			struct ipa_configure_ul_firewall_rules_req_msg_v01,
+			firewall_rules_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_UL_FIREWALL_RULES_V01,
+		.elem_size	= sizeof(struct ipa_ul_firewall_rule_type_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x1,
+		.offset		= offsetof(
+			struct ipa_configure_ul_firewall_rules_req_msg_v01,
+			firewall_rules_list),
+		.ei_array	=
+			ipa3_ul_firewall_rule_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x2,
+		.offset		= offsetof(
+			struct ipa_configure_ul_firewall_rules_req_msg_v01,
+			mux_id),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_configure_ul_firewall_rules_req_msg_v01,
+			disable_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_configure_ul_firewall_rules_req_msg_v01,
+			disable),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_configure_ul_firewall_rules_req_msg_v01,
+			are_blacklist_filters_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_configure_ul_firewall_rules_req_msg_v01,
+			are_blacklist_filters),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_configure_ul_firewall_rules_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(
+			struct ipa_ul_firewall_config_result_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(
+			struct ipa_configure_ul_firewall_rules_ind_msg_v01,
+			result),
+		.ei_array	=
+		ipa3_ul_firewall_config_result_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index edba283..2536bf4 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -918,7 +918,8 @@
 static int __ipa_create_rt_entry(struct ipa3_rt_entry **entry,
 		const struct ipa_rt_rule *rule,
 		struct ipa3_rt_tbl *tbl, struct ipa3_hdr_entry *hdr,
-		struct ipa3_hdr_proc_ctx_entry *proc_ctx)
+		struct ipa3_hdr_proc_ctx_entry *proc_ctx,
+		u16 rule_id)
 {
 	int id;
 
@@ -933,11 +934,16 @@
 	(*(entry))->tbl = tbl;
 	(*(entry))->hdr = hdr;
 	(*(entry))->proc_ctx = proc_ctx;
-	id = ipa3_alloc_rule_id(tbl->rule_ids);
-	if (id < 0) {
-		IPAERR("failed to allocate rule id\n");
-		WARN_ON(1);
-		goto alloc_rule_id_fail;
+	if (rule_id) {
+		id = rule_id;
+		(*(entry))->rule_id_valid = 1;
+	} else {
+		id = ipa3_alloc_rule_id(tbl->rule_ids);
+		if (id < 0) {
+			IPAERR("failed to allocate rule id\n");
+			WARN_ON(1);
+			goto alloc_rule_id_fail;
+		}
 	}
 	(*(entry))->rule_id = id;
 
@@ -984,7 +990,8 @@
 }
 
 static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
-		const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl)
+		const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl,
+		u16 rule_id)
 {
 	struct ipa3_rt_tbl *tbl;
 	struct ipa3_rt_entry *entry;
@@ -1012,7 +1019,8 @@
 		goto error;
 	}
 
-	if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx))
+	if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx,
+		rule_id))
 		goto error;
 
 	if (at_rear)
@@ -1043,7 +1051,7 @@
 	if (__ipa_rt_validate_hndls(rule, &hdr, &proc_ctx))
 		goto error;
 
-	if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx))
+	if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx, 0))
 		goto error;
 
 	list_add(&entry->link, &((*add_after_entry)->link));
@@ -1088,8 +1096,54 @@
 		if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
 					&rules->rules[i].rule,
 					rules->rules[i].at_rear,
-					&rules->rules[i].rt_rule_hdl)) {
-			IPAERR_RL("failed to add rt rule %d\n", i);
+					&rules->rules[i].rt_rule_hdl,
+					0)) {
+			IPAERR("failed to add rt rule %d\n", i);
+			rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
+		} else {
+			rules->rules[i].status = 0;
+		}
+	}
+
+	if (rules->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) {
+			ret = -EPERM;
+			goto bail;
+		}
+
+	ret = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
+/**
+ * ipa3_add_rt_rule_ext() - Add the specified routing rules to SW with rule id
+ * and optionally commit to IPA HW
+ * @rules:	[inout] set of routing rules to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_rt_rule_ext(struct ipa_ioc_add_rt_rule_ext *rules)
+{
+	int i;
+	int ret;
+
+	if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < rules->num_rules; i++) {
+		if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
+					&rules->rules[i].rule,
+					rules->rules[i].at_rear,
+					&rules->rules[i].rt_rule_hdl,
+					rules->rules[i].rule_id)) {
+			IPAERR("failed to add rt rule %d\n", i);
 			rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
 		} else {
 			rules->rules[i].status = 0;
@@ -1237,7 +1291,9 @@
 	IPADBG("del rt rule tbl_idx=%d rule_cnt=%d rule_id=%d\n ref_cnt=%u",
 		entry->tbl->idx, entry->tbl->rule_cnt,
 		entry->rule_id, entry->tbl->ref_cnt);
-	idr_remove(entry->tbl->rule_ids, entry->rule_id);
+		/* if rule id was allocated from idr, remove it */
+	if (!entry->rule_id_valid)
+		idr_remove(entry->tbl->rule_ids, entry->rule_id);
 	if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
 		if (__ipa_del_rt_tbl(entry->tbl))
 			IPAERR_RL("fail to del RT tbl\n");
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index f94a342..66d4b10 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -68,6 +68,9 @@
 
 #define IPA_WWAN_CONS_DESC_FIFO_SZ 256
 
+static void rmnet_ipa_free_msg(void *buff, u32 len, u32 type);
+static void rmnet_ipa_get_stats_and_update(void);
+
 static int ipa3_wwan_add_ul_flt_rule_to_ipa(void);
 static int ipa3_wwan_del_ul_flt_rule_to_ipa(void);
 static void ipa3_wwan_msg_free_cb(void*, u32, u32);
@@ -145,6 +148,10 @@
 	u32 pm_hdl;
 	u32 q6_pm_hdl;
 	u32 q6_teth_pm_hdl;
+	struct mutex per_client_stats_guard;
+	struct ipa_tether_device_info
+		tether_device
+		[IPACM_MAX_CLIENT_DEVICE_TYPES];
 };
 
 static struct rmnet_ipa3_context *rmnet_ipa3_ctx;
@@ -1950,12 +1957,6 @@
 	struct ipa_rm_perf_profile profile;
 	int ret;
 
-	ret = ipa_pm_set_perf_profile(rmnet_ipa3_ctx->q6_pm_hdl, throughput);
-	if (ret)
-		return ret;
-	return ipa_pm_set_perf_profile(rmnet_ipa3_ctx->q6_teth_pm_hdl,
-		throughput);
-
 	if (ipa3_ctx->use_ipa_pm) {
 		ret = ipa_pm_set_perf_profile(rmnet_ipa3_ctx->q6_pm_hdl,
 			throughput);
@@ -2745,9 +2746,11 @@
 	}
 
 	if (type != IPA_TETHERING_STATS_UPDATE_STATS &&
-		type != IPA_TETHERING_STATS_UPDATE_NETWORK_STATS) {
+			type != IPA_TETHERING_STATS_UPDATE_NETWORK_STATS &&
+			type != IPA_PER_CLIENT_STATS_CONNECT_EVENT &&
+			type != IPA_PER_CLIENT_STATS_DISCONNECT_EVENT) {
 		IPAWANERR("Wrong type given. buff %p type %d\n",
-			  buff, type);
+				buff, type);
 	}
 	kfree(buff);
 }
@@ -3505,8 +3508,488 @@
 	}
 }
 
+static inline bool rmnet_ipa3_check_any_client_inited
+(
+	enum ipacm_per_client_device_type device_type
+)
+{
+	int i = 0;
+
+	for (; i < IPA_MAX_NUM_HW_PATH_CLIENTS; i++) {
+		if (rmnet_ipa3_ctx->tether_device[device_type].
+		lan_client[i].client_idx != -1 &&
+		rmnet_ipa3_ctx->tether_device[device_type].
+		lan_client[i].inited) {
+			IPAWANERR("Found client index: %d which is inited\n",
+				 i);
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static inline int rmnet_ipa3_get_lan_client_info
+(
+	enum ipacm_per_client_device_type device_type,
+	uint8_t mac[]
+)
+{
+	int i = 0;
+
+	IPAWANDBG("Client MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+		mac[0], mac[1], mac[2],
+		mac[3], mac[4], mac[5]);
+
+	for (; i < IPA_MAX_NUM_HW_PATH_CLIENTS; i++) {
+		if (memcmp(
+		rmnet_ipa3_ctx->tether_device[device_type].
+		lan_client[i].mac,
+		mac,
+		IPA_MAC_ADDR_SIZE) == 0) {
+			IPAWANDBG("Matched client index: %d\n", i);
+			return i;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static inline int rmnet_ipa3_delete_lan_client_info
+(
+	enum ipacm_per_client_device_type device_type,
+	int lan_clnt_idx
+)
+{
+	struct ipa_lan_client *lan_client = NULL;
+	int i;
+
+	/* Check if the request is to clean up all clients. */
+	if (lan_clnt_idx == 0xffffffff) {
+		/* Reset the complete device info. */
+		memset(&rmnet_ipa3_ctx->tether_device[device_type], 0,
+				sizeof(struct ipa_tether_device_info));
+		rmnet_ipa3_ctx->tether_device[device_type].ul_src_pipe = -1;
+		for (i = 0; i < IPA_MAX_NUM_HW_PATH_CLIENTS; i++)
+			rmnet_ipa3_ctx->tether_device[device_type].
+				lan_client[i].client_idx = -1;
+	} else {
+		lan_client =
+			&rmnet_ipa3_ctx->tether_device[device_type].
+			lan_client[lan_clnt_idx];
+		/* Reset the client info before sending the message. */
+		memset(lan_client, 0, sizeof(struct ipa_lan_client));
+		lan_client->client_idx = -1;
+
+	}
+	return 0;
+}
+
+/* rmnet_ipa3_set_lan_client_info() -
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_SET_LAN_CLIENT_INFO.
+ * It is used to store LAN client information which
+ * is used to fetch the packet stats for a client.
+ *
+ * Return codes:
+ * 0: Success
+ * -EINVAL: Invalid args provided
+ */
+int rmnet_ipa3_set_lan_client_info(
+	struct wan_ioctl_lan_client_info *data)
+{
+
+	struct ipa_lan_client *lan_client = NULL;
+
+
+	IPAWANDBG("Client MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+		data->mac[0], data->mac[1], data->mac[2],
+		data->mac[3], data->mac[4], data->mac[5]);
+
+	/* Check if Device type is valid. */
+	if (data->device_type >= IPACM_MAX_CLIENT_DEVICE_TYPES ||
+		data->device_type < 0) {
+		IPAWANERR("Invalid Device type: %d\n", data->device_type);
+		return -EINVAL;
+	}
+
+	/* Check if Client index is valid. */
+	if (data->client_idx >= IPA_MAX_NUM_HW_PATH_CLIENTS ||
+		data->client_idx < 0) {
+		IPAWANERR("Invalid Client Index: %d\n", data->client_idx);
+		return -EINVAL;
+	}
+
+	mutex_lock(&rmnet_ipa3_ctx->per_client_stats_guard);
+	if (data->client_init) {
+		/* check if the client is already inited. */
+		if (rmnet_ipa3_ctx->tether_device[data->device_type]
+			.lan_client[data->client_idx].inited) {
+			IPAWANERR("Client already inited: %d:%d\n",
+				data->device_type, data->client_idx);
+			mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+			return -EINVAL;
+		}
+	}
+
+	lan_client =
+	&rmnet_ipa3_ctx->tether_device[data->device_type].
+	lan_client[data->client_idx];
+
+	memcpy(lan_client->mac, data->mac, IPA_MAC_ADDR_SIZE);
+
+	lan_client->client_idx = data->client_idx;
+
+	/* Update the Source pipe. */
+	rmnet_ipa3_ctx->tether_device[data->device_type].ul_src_pipe =
+			ipa3_get_ep_mapping(data->ul_src_pipe);
+
+	/* Update the header length if not set. */
+	if (!rmnet_ipa3_ctx->tether_device[data->device_type].hdr_len)
+		rmnet_ipa3_ctx->tether_device[data->device_type].hdr_len =
+			data->hdr_len;
+
+	lan_client->inited = true;
+
+	rmnet_ipa3_ctx->tether_device[data->device_type].num_clients++;
+
+	IPAWANDBG("Set the lan client info: %d, %d, %d\n",
+		lan_client->client_idx,
+		rmnet_ipa3_ctx->tether_device[data->device_type].ul_src_pipe,
+		rmnet_ipa3_ctx->tether_device[data->device_type].num_clients);
+
+	mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+
+	return 0;
+}
+
+/* rmnet_ipa3_delete_lan_client_info() -
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_DELETE_LAN_CLIENT_INFO.
+ * It is used to delete LAN client information which
+ * is used to fetch the packet stats for a client.
+ *
+ * Return codes:
+ * 0: Success
+ * -EINVAL: Invalid args provided
+ */
+int rmnet_ipa3_clear_lan_client_info(
+	struct wan_ioctl_lan_client_info *data)
+{
+
+	struct ipa_lan_client *lan_client = NULL;
+
+
+	IPAWANDBG("Client MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+		data->mac[0], data->mac[1], data->mac[2],
+		data->mac[3], data->mac[4], data->mac[5]);
+
+	/* Check if Device type is valid. */
+	if (data->device_type >= IPACM_MAX_CLIENT_DEVICE_TYPES ||
+		data->device_type < 0) {
+		IPAWANERR("Invalid Device type: %d\n", data->device_type);
+		return -EINVAL;
+	}
+
+	/* Check if Client index is valid. */
+	if (data->client_idx >= IPA_MAX_NUM_HW_PATH_CLIENTS ||
+		data->client_idx < 0) {
+		IPAWANERR("Invalid Client Index: %d\n", data->client_idx);
+		return -EINVAL;
+	}
+
+	mutex_lock(&rmnet_ipa3_ctx->per_client_stats_guard);
+	lan_client =
+	&rmnet_ipa3_ctx->tether_device[data->device_type].
+	lan_client[data->client_idx];
+
+	if (!data->client_init) {
+		/* check if the client is already de-inited. */
+		if (!lan_client->inited) {
+			IPAWANERR("Client already de-inited: %d:%d\n",
+				data->device_type, data->client_idx);
+			mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+			return -EINVAL;
+		}
+	}
+
+	lan_client->inited = false;
+	mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+
+	return 0;
+}
+
+
+/* rmnet_ipa3_send_lan_client_msg() -
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_SEND_LAN_CLIENT_MSG.
+ * It is used to send LAN client information to IPACM.
+ *
+ * Return codes:
+ * 0: Success
+ * -EINVAL: Invalid args provided
+ */
+int rmnet_ipa3_send_lan_client_msg(
+	struct wan_ioctl_send_lan_client_msg *data)
+{
+	struct ipa_msg_meta msg_meta;
+	int rc;
+	struct ipa_lan_client_msg *lan_client;
+
+	/* Notify IPACM to reset the client index. */
+	lan_client = kzalloc(sizeof(struct ipa_lan_client_msg),
+		       GFP_KERNEL);
+	if (!lan_client) {
+		IPAWANERR("Can't allocate memory for tether_info\n");
+		return -ENOMEM;
+	}
+	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+	memcpy(lan_client, &data->lan_client,
+		sizeof(struct ipa_lan_client_msg));
+	msg_meta.msg_type = data->client_event;
+	msg_meta.msg_len = sizeof(struct ipa_lan_client_msg);
+
+	rc = ipa_send_msg(&msg_meta, lan_client, rmnet_ipa_free_msg);
+	if (rc) {
+		IPAWANERR("ipa_send_msg failed: %d\n", rc);
+		kfree(lan_client);
+		return rc;
+	}
+	return 0;
+}
+
+/* rmnet_ipa3_enable_per_client_stats() -
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_ENABLE_PER_CLIENT_STATS.
+ * It is used to indicate Q6 to start capturing per client stats.
+ *
+ * Return codes:
+ * 0: Success
+ * -EINVAL: Invalid args provided
+ */
+int rmnet_ipa3_enable_per_client_stats(
+	bool *data)
+{
+	struct ipa_enable_per_client_stats_req_msg_v01 *req;
+	struct ipa_enable_per_client_stats_resp_msg_v01 *resp;
+	int rc;
+
+	req =
+	kzalloc(sizeof(struct ipa_enable_per_client_stats_req_msg_v01),
+			GFP_KERNEL);
+	if (!req) {
+		IPAWANERR("Can't allocate memory for stats message\n");
+		return -ENOMEM;
+	}
+	resp =
+	kzalloc(sizeof(struct ipa_enable_per_client_stats_resp_msg_v01),
+			GFP_KERNEL);
+	if (!resp) {
+		IPAWANERR("Can't allocate memory for stats message\n");
+		kfree(req);
+		return -ENOMEM;
+	}
+	memset(req, 0,
+		sizeof(struct ipa_enable_per_client_stats_req_msg_v01));
+	memset(resp, 0,
+		sizeof(struct ipa_enable_per_client_stats_resp_msg_v01));
+
+	if (*data)
+		req->enable_per_client_stats = 1;
+	else
+		req->enable_per_client_stats = 0;
+
+	rc = ipa3_qmi_enable_per_client_stats(req, resp);
+	if (rc) {
+		IPAWANERR("can't enable per client stats\n");
+		kfree(req);
+		kfree(resp);
+		return rc;
+	}
+
+	kfree(req);
+	kfree(resp);
+	return 0;
+}
+
+int rmnet_ipa3_query_per_client_stats(
+	struct wan_ioctl_query_per_client_stats *data)
+{
+	struct ipa_get_stats_per_client_req_msg_v01 *req;
+	struct ipa_get_stats_per_client_resp_msg_v01 *resp;
+	int rc, lan_clnt_idx, lan_clnt_idx1, i;
+	struct ipa_lan_client *lan_client = NULL;
+
+
+	IPAWANDBG("Client MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+		data->client_info[0].mac[0],
+		data->client_info[0].mac[1],
+		data->client_info[0].mac[2],
+		data->client_info[0].mac[3],
+		data->client_info[0].mac[4],
+		data->client_info[0].mac[5]);
+
+	/* Check if Device type is valid. */
+	if (data->device_type >= IPACM_MAX_CLIENT_DEVICE_TYPES ||
+		data->device_type < 0) {
+		IPAWANERR("Invalid Device type: %d\n", data->device_type);
+		return -EINVAL;
+	}
+
+	/* Check if num_clients is valid. */
+	if (data->num_clients != IPA_MAX_NUM_HW_PATH_CLIENTS &&
+		data->num_clients != 1) {
+		IPAWANERR("Invalid number of clients: %d\n", data->num_clients);
+		return -EINVAL;
+	}
+
+	mutex_lock(&rmnet_ipa3_ctx->per_client_stats_guard);
+
+	if (data->num_clients == 1) {
+		/* Check if the client info is valid.*/
+		lan_clnt_idx1 = rmnet_ipa3_get_lan_client_info(
+			data->device_type,
+			data->client_info[0].mac);
+		if (lan_clnt_idx1 < 0) {
+			IPAWANERR("Client info not available return.\n");
+			mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+			return -EINVAL;
+		}
+		lan_client =
+			&rmnet_ipa3_ctx->tether_device[data->device_type].
+			lan_client[lan_clnt_idx1];
+		/*
+		 * Check if disconnect flag is set and
+		 * see if all the clients info are cleared.
+		 */
+		if (data->disconnect_clnt &&
+			lan_client->inited) {
+			IPAWANERR("Client not inited. Try again.\n");
+			mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+			return -EAGAIN;
+		}
+
+	} else {
+		/* Max number of clients. */
+		/* Check if disconnect flag is set and
+		 * see if all the clients info are cleared.
+		 */
+		if (data->disconnect_clnt &&
+			rmnet_ipa3_check_any_client_inited(data->device_type)) {
+			IPAWANERR("CLient not inited. Try again.\n");
+			mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+			return -EAGAIN;
+		}
+		lan_clnt_idx1 = 0xffffffff;
+	}
+
+	req = kzalloc(sizeof(struct ipa_get_stats_per_client_req_msg_v01),
+			GFP_KERNEL);
+	if (!req) {
+		IPAWANERR("Can't allocate memory for stats message\n");
+		mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+		return -ENOMEM;
+	}
+	resp = kzalloc(sizeof(struct ipa_get_stats_per_client_resp_msg_v01),
+			GFP_KERNEL);
+	if (!resp) {
+		IPAWANERR("Can't allocate memory for stats message\n");
+		mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+		kfree(req);
+		return -ENOMEM;
+	}
+	memset(req, 0, sizeof(struct ipa_get_stats_per_client_req_msg_v01));
+	memset(resp, 0, sizeof(struct ipa_get_stats_per_client_resp_msg_v01));
+
+	if (data->reset_stats) {
+		req->reset_stats_valid = true;
+		req->reset_stats = true;
+		IPAWANDBG("fetch and reset the client stats\n");
+	}
+
+	req->client_id = lan_clnt_idx1;
+	req->src_pipe_id =
+		rmnet_ipa3_ctx->tether_device[data->device_type].ul_src_pipe;
+
+	IPAWANDBG("fetch the client stats for %d, %d\n", req->client_id,
+		req->src_pipe_id);
+
+	rc = ipa3_qmi_get_per_client_packet_stats(req, resp);
+	if (rc) {
+		IPAWANERR("can't get per client stats\n");
+		mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+		kfree(req);
+		kfree(resp);
+		return rc;
+	}
+
+	if (resp->per_client_stats_list_valid) {
+		for (i = 0; i < resp->per_client_stats_list_len
+				&& i < IPA_MAX_NUM_HW_PATH_CLIENTS; i++) {
+			/* Subtract the header bytes from the DL bytes. */
+			data->client_info[i].ipv4_rx_bytes =
+			(resp->per_client_stats_list[i].num_dl_ipv4_bytes) -
+			(rmnet_ipa3_ctx->
+			tether_device[data->device_type].hdr_len *
+			resp->per_client_stats_list[i].num_dl_ipv4_pkts);
+			/* UL header bytes are subtracted by Q6. */
+			data->client_info[i].ipv4_tx_bytes =
+			resp->per_client_stats_list[i].num_ul_ipv4_bytes;
+			/* Subtract the header bytes from the DL bytes. */
+			data->client_info[i].ipv6_rx_bytes =
+			(resp->per_client_stats_list[i].num_dl_ipv6_bytes) -
+			(rmnet_ipa3_ctx->
+			tether_device[data->device_type].hdr_len *
+			resp->per_client_stats_list[i].num_dl_ipv6_pkts);
+			/* UL header bytes are subtracted by Q6. */
+			data->client_info[i].ipv6_tx_bytes =
+			resp->per_client_stats_list[i].num_ul_ipv6_bytes;
+
+			IPAWANDBG("tx_b_v4(%lu)v6(%lu)rx_b_v4(%lu) v6(%lu)\n",
+			(unsigned long int) data->client_info[i].ipv4_tx_bytes,
+			(unsigned long	int) data->client_info[i].ipv6_tx_bytes,
+			(unsigned long int) data->client_info[i].ipv4_rx_bytes,
+			(unsigned long int) data->client_info[i].ipv6_rx_bytes);
+
+			/* Get the lan client index. */
+			lan_clnt_idx = resp->per_client_stats_list[i].client_id;
+			/* Check if lan_clnt_idx is valid. */
+			if (lan_clnt_idx < 0 ||
+				lan_clnt_idx >= IPA_MAX_NUM_HW_PATH_CLIENTS) {
+				IPAWANERR("Lan client index not valid.\n");
+				mutex_unlock(
+				&rmnet_ipa3_ctx->per_client_stats_guard);
+				kfree(req);
+				kfree(resp);
+				ipa_assert();
+				return -EINVAL;
+			}
+			memcpy(data->client_info[i].mac,
+				rmnet_ipa3_ctx->
+				tether_device[data->device_type].
+				lan_client[lan_clnt_idx].mac,
+				IPA_MAC_ADDR_SIZE);
+		}
+	}
+
+	if (data->disconnect_clnt) {
+		rmnet_ipa3_delete_lan_client_info(data->device_type,
+		lan_clnt_idx1);
+	}
+
+	mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+	kfree(req);
+	kfree(resp);
+	return 0;
+}
+
 static int __init ipa3_wwan_init(void)
 {
+	int i, j;
 	rmnet_ipa3_ctx = kzalloc(sizeof(*rmnet_ipa3_ctx), GFP_KERNEL);
 	if (!rmnet_ipa3_ctx) {
 		IPAWANERR("no memory\n");
@@ -3518,6 +4001,14 @@
 
 	mutex_init(&rmnet_ipa3_ctx->pipe_handle_guard);
 	mutex_init(&rmnet_ipa3_ctx->add_mux_channel_lock);
+	mutex_init(&rmnet_ipa3_ctx->per_client_stats_guard);
+	/* Reset the Lan Stats. */
+	for (i = 0; i < IPACM_MAX_CLIENT_DEVICE_TYPES; i++) {
+		rmnet_ipa3_ctx->tether_device[i].ul_src_pipe = -1;
+		for (j = 0; j < IPA_MAX_NUM_HW_PATH_CLIENTS; j++)
+			rmnet_ipa3_ctx->tether_device[i].
+				lan_client[j].client_idx = -1;
+	}
 	rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1;
 	rmnet_ipa3_ctx->apps_to_ipa3_hdl = -1;
 
@@ -3540,6 +4031,7 @@
 	ipa3_qmi_cleanup();
 	mutex_destroy(&rmnet_ipa3_ctx->pipe_handle_guard);
 	mutex_destroy(&rmnet_ipa3_ctx->add_mux_channel_lock);
+	mutex_destroy(&rmnet_ipa3_ctx->per_client_stats_guard);
 	ret = subsys_notif_unregister_notifier(
 		rmnet_ipa3_ctx->subsys_notify_handle, &ipa3_ssr_notifier);
 	if (ret)
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
index 2e43abf..0f85e12 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
@@ -53,6 +53,15 @@
 #define WAN_IOC_NOTIFY_WAN_STATE32 _IOWR(WAN_IOC_MAGIC, \
 		WAN_IOCTL_NOTIFY_WAN_STATE, \
 		compat_uptr_t)
+#define WAN_IOCTL_ENABLE_PER_CLIENT_STATS32 _IOWR(WAN_IOC_MAGIC, \
+			WAN_IOCTL_ENABLE_PER_CLIENT_STATS, \
+			compat_uptr_t)
+#define WAN_IOCTL_QUERY_PER_CLIENT_STATS32 _IOWR(WAN_IOC_MAGIC, \
+			WAN_IOCTL_QUERY_PER_CLIENT_STATS, \
+			compat_uptr_t)
+#define WAN_IOCTL_SET_LAN_CLIENT_INFO32 _IOWR(WAN_IOC_MAGIC, \
+			WAN_IOCTL_SET_LAN_CLIENT_INFO, \
+			compat_uptr_t)
 #endif
 
 static unsigned int dev_num = 1;
@@ -128,6 +137,33 @@
 		}
 		break;
 
+	case WAN_IOC_ADD_UL_FLT_RULE:
+		IPAWANDBG("device %s got WAN_IOC_UL_ADD_FLT_RULE :>>>\n",
+		DRIVER_NAME);
+		pyld_sz =
+		sizeof(struct ipa_configure_ul_firewall_rules_req_msg_v01);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_qmi_ul_filter_request_send(
+			(struct ipa_configure_ul_firewall_rules_req_msg_v01 *)
+			param)) {
+			IPAWANDBG("IPACM->Q6 add ul filter rule failed\n");
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
 	case WAN_IOC_ADD_FLT_RULE_INDEX:
 		IPAWANDBG("device %s got WAN_IOC_ADD_FLT_RULE_INDEX :>>>\n",
 		DRIVER_NAME);
@@ -339,7 +375,115 @@
 			retval = -EFAULT;
 			break;
 		}
+
 		break;
+	case WAN_IOC_ENABLE_PER_CLIENT_STATS:
+		IPAWANDBG_LOW("got WAN_IOC_ENABLE_PER_CLIENT_STATS :>>>\n");
+		pyld_sz = sizeof(bool);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (rmnet_ipa3_enable_per_client_stats(
+			(bool *)param)) {
+			IPAWANERR("WAN_IOC_ENABLE_PER_CLIENT_STATS failed\n");
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case WAN_IOC_QUERY_PER_CLIENT_STATS:
+		IPAWANDBG_LOW("got WAN_IOC_QUERY_PER_CLIENT_STATS :>>>\n");
+		pyld_sz = sizeof(struct wan_ioctl_query_per_client_stats);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+
+		retval = rmnet_ipa3_query_per_client_stats(
+			(struct wan_ioctl_query_per_client_stats *)param);
+		if (retval) {
+			IPAWANERR("WAN_IOC_QUERY_PER_CLIENT_STATS failed\n");
+			break;
+		}
+
+		if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case WAN_IOC_SET_LAN_CLIENT_INFO:
+		IPAWANDBG_LOW("got WAN_IOC_SET_LAN_CLIENT_INFO :>>>\n");
+		pyld_sz = sizeof(struct wan_ioctl_lan_client_info);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (rmnet_ipa3_set_lan_client_info(
+			(struct wan_ioctl_lan_client_info *)param)) {
+			IPAWANERR("WAN_IOC_SET_LAN_CLIENT_INFO failed\n");
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case WAN_IOC_CLEAR_LAN_CLIENT_INFO:
+		IPAWANDBG_LOW("got WAN_IOC_CLEAR_LAN_CLIENT_INFO :>>>\n");
+		pyld_sz = sizeof(struct wan_ioctl_lan_client_info);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (rmnet_ipa3_clear_lan_client_info(
+			(struct wan_ioctl_lan_client_info *)param)) {
+			IPAWANERR("WAN_IOC_CLEAR_LAN_CLIENT_INFO failed\n");
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+
+	case WAN_IOC_SEND_LAN_CLIENT_MSG:
+		IPAWANDBG_LOW("got WAN_IOC_SEND_LAN_CLIENT_MSG :>>>\n");
+		pyld_sz = sizeof(struct wan_ioctl_send_lan_client_msg);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (const void __user *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (rmnet_ipa3_send_lan_client_msg(
+			(struct wan_ioctl_send_lan_client_msg *)
+			param)) {
+			IPAWANERR("IOC_SEND_LAN_CLIENT_MSG failed\n");
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
 	default:
 		retval = -ENOTTY;
 	}
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 13d53a3..589167e 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -491,7 +491,10 @@
 		.name = "fan53555",
 		.driver_data = FAN53555_VENDOR_FAIRCHILD
 	}, {
-		.name = "syr82x",
+		.name = "syr827",
+		.driver_data = FAN53555_VENDOR_SILERGY
+	}, {
+		.name = "syr828",
 		.driver_data = FAN53555_VENDOR_SILERGY
 	}, {
 		.name = "hl7509",
diff --git a/drivers/regulator/mem-acc-regulator.c b/drivers/regulator/mem-acc-regulator.c
index 4c03dec..e22a259 100644
--- a/drivers/regulator/mem-acc-regulator.c
+++ b/drivers/regulator/mem-acc-regulator.c
@@ -108,6 +108,8 @@
 	u32			*phys_reg_addr_list;
 	void __iomem		**remap_reg_addr_list;
 	struct corner_acc_reg_config	*corner_acc_reg_config;
+	u32			*override_acc_range_fuse_list;
+	int			override_acc_range_fuse_num;
 };
 
 static DEFINE_MUTEX(mem_acc_memory_mutex);
@@ -549,9 +551,8 @@
 	return 0;
 }
 
-static int override_mem_acc_custom_data(struct platform_device *pdev,
-				 struct mem_acc_regulator *mem_acc_vreg,
-				 int mem_type)
+static int override_mem_acc_custom_data(struct mem_acc_regulator *mem_acc_vreg,
+		 int mem_type)
 {
 	char *custom_apc_data_str;
 	int len, rc = 0, i;
@@ -647,27 +648,48 @@
 
 }
 
-static int mem_acc_find_override_map_match(struct platform_device *pdev,
-				 struct mem_acc_regulator *mem_acc_vreg)
+static void mem_acc_read_efuse_param(struct mem_acc_regulator *mem_acc_vreg,
+		u32 *fuse_sel, int *val)
 {
-	struct device_node *of_node = pdev->dev.of_node;
+	u64 fuse_bits;
+
+	fuse_bits = mem_acc_read_efuse_row(mem_acc_vreg, fuse_sel[0],
+					   fuse_sel[3]);
+	/*
+	 * fuse_sel[1] = LSB position in row (shift)
+	 * fuse_sel[2] = num of bits (mask)
+	 */
+	*val = (fuse_bits >> fuse_sel[1]) & ((1 << fuse_sel[2]) - 1);
+}
+
+#define FUSE_TUPLE_SIZE 4
+static int mem_acc_parse_override_fuse_version_map(
+			 struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = mem_acc_vreg->dev->of_node;
 	int i, rc, tuple_size;
 	int len = 0;
 	u32 *tmp;
-	char *prop_str = "qcom,override-fuse-version-map";
+	u32 fuse_sel[4];
+	char *prop_str;
 
-	/* Specify default no match case. */
-	mem_acc_vreg->override_map_match = FUSE_MAP_NO_MATCH;
-	mem_acc_vreg->override_map_count = 0;
-
-	if (!of_find_property(of_node, prop_str, &len)) {
-		/* No mapping present. */
-		return 0;
+	prop_str = "qcom,override-acc-fuse-sel";
+	rc = of_property_read_u32_array(of_node, prop_str, fuse_sel,
+					FUSE_TUPLE_SIZE);
+	if (rc < 0) {
+		pr_err("Read failed - %s rc=%d\n", prop_str, rc);
+		return rc;
 	}
 
+	mem_acc_read_efuse_param(mem_acc_vreg, fuse_sel,
+				 &mem_acc_vreg->override_fuse_value);
+
+	prop_str = "qcom,override-fuse-version-map";
+	if (!of_find_property(of_node, prop_str, &len))
+		return -EINVAL;
+
 	tuple_size = 1;
 	mem_acc_vreg->override_map_count = len / (sizeof(u32) * tuple_size);
-
 	if (len == 0 || len % (sizeof(u32) * tuple_size)) {
 		pr_err("%s length=%d is invalid\n", prop_str, len);
 		return -EINVAL;
@@ -695,8 +717,9 @@
 	}
 
 	if (mem_acc_vreg->override_map_match != FUSE_MAP_NO_MATCH)
-		pr_debug("%s tuple match found: %d\n", prop_str,
-				mem_acc_vreg->override_map_match);
+		pr_info("override_fuse_val=%d, %s tuple match found: %d\n",
+			mem_acc_vreg->override_fuse_value, prop_str,
+			mem_acc_vreg->override_map_match);
 	else
 		pr_err("%s tuple match not found\n", prop_str);
 
@@ -705,6 +728,121 @@
 	return rc;
 }
 
+static int mem_acc_parse_override_fuse_version_range(
+			 struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = mem_acc_vreg->dev->of_node;
+	int i, j, rc, size, row_size;
+	int num_fuse_sel, len = 0;
+	u32 *tmp = NULL;
+	char *prop_str;
+	u32 *fuse_val, *fuse_sel;
+	char *buf = NULL;
+	int pos = 0, buflen;
+
+	prop_str = "qcom,override-acc-range-fuse-list";
+	if (!of_find_property(of_node, prop_str, &len)) {
+		pr_err("%s property is missing\n", prop_str);
+		return -EINVAL;
+	}
+
+	size = len / sizeof(u32);
+	if (len == 0 || (size % FUSE_TUPLE_SIZE)) {
+		pr_err("%s property length (%d) is invalid\n", prop_str, len);
+		return -EINVAL;
+	}
+
+	num_fuse_sel = size / FUSE_TUPLE_SIZE;
+	fuse_val = devm_kcalloc(mem_acc_vreg->dev, num_fuse_sel,
+				sizeof(*fuse_val), GFP_KERNEL);
+	if (!fuse_val)
+		return -ENOMEM;
+	mem_acc_vreg->override_acc_range_fuse_list = fuse_val;
+	mem_acc_vreg->override_acc_range_fuse_num = num_fuse_sel;
+
+	fuse_sel = kzalloc(len, GFP_KERNEL);
+	if (!fuse_sel) {
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	rc = of_property_read_u32_array(of_node, prop_str, fuse_sel,
+					size);
+	if (rc) {
+		pr_err("%s read failed, rc=%d\n", prop_str, rc);
+		goto done;
+	}
+
+	for (i = 0; i < num_fuse_sel; i++) {
+		mem_acc_read_efuse_param(mem_acc_vreg, &fuse_sel[i * 4],
+					 &fuse_val[i]);
+	}
+
+	prop_str = "qcom,override-fuse-range-map";
+	if (!of_find_property(of_node, prop_str, &len))
+		goto done;
+
+	row_size = num_fuse_sel * 2;
+	mem_acc_vreg->override_map_count = len / (sizeof(u32) * row_size);
+
+	if (len == 0 || len % (sizeof(u32) * row_size)) {
+		pr_err("%s length=%d is invalid\n", prop_str, len);
+		rc = -EINVAL;
+		goto done;
+	}
+
+	tmp = kzalloc(len, GFP_KERNEL);
+	if (!tmp) {
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	rc = of_property_read_u32_array(of_node, prop_str, tmp,
+				mem_acc_vreg->override_map_count * row_size);
+	if (rc) {
+		pr_err("could not read %s rc=%d\n", prop_str, rc);
+		goto done;
+	}
+
+	for (i = 0; i < mem_acc_vreg->override_map_count; i++) {
+		for (j = 0; j < num_fuse_sel; j++) {
+			if (tmp[i * row_size + j * 2] > fuse_val[j]
+				|| tmp[i * row_size + j * 2 + 1] < fuse_val[j])
+				break;
+		}
+
+		if (j == num_fuse_sel) {
+			mem_acc_vreg->override_map_match = i;
+			break;
+		}
+	}
+
+	/*
+	 * Log register and value mapping since they are useful for
+	 * baseline MEM ACC logging.
+	 */
+	buflen = num_fuse_sel * sizeof("fuse_selxxxx = XXXX ");
+	buf = kzalloc(buflen, GFP_KERNEL);
+	if (!buf)
+		goto done;
+
+	for (j = 0; j < num_fuse_sel; j++)
+		pos += scnprintf(buf + pos, buflen - pos, "fuse_sel%d = %d ",
+				 j, fuse_val[j]);
+	buf[pos] = '\0';
+	if (mem_acc_vreg->override_map_match != FUSE_MAP_NO_MATCH)
+		pr_info("%s %s tuple match found: %d\n", buf, prop_str,
+			mem_acc_vreg->override_map_match);
+	else
+		pr_err("%s %s tuple match not found\n", buf, prop_str);
+
+done:
+	kfree(fuse_sel);
+	kfree(tmp);
+	kfree(buf);
+	return rc;
+}
+
 #define MAX_CHARS_PER_INT	20
 
 static int mem_acc_reg_addr_val_dump(struct mem_acc_regulator *mem_acc_vreg,
@@ -789,6 +927,150 @@
 	return rc;
 }
 
+static int mem_acc_override_reg_addr_val_init(
+			struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = mem_acc_vreg->dev->of_node;
+	struct corner_acc_reg_config *corner_acc_reg_config;
+	struct acc_reg_value *override_reg_config_list;
+	int i, tuple_count, tuple_match, len = 0, rc = 0;
+	u32 list_size, override_max_reg_config_len;
+	char prop_str[40];
+	struct property *prop;
+	int num_corners = mem_acc_vreg->num_corners;
+
+	if (!mem_acc_vreg->corner_acc_reg_config)
+		return 0;
+
+	if (mem_acc_vreg->override_map_count) {
+		if (mem_acc_vreg->override_map_match == FUSE_MAP_NO_MATCH)
+			return 0;
+		tuple_count = mem_acc_vreg->override_map_count;
+		tuple_match = mem_acc_vreg->override_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	corner_acc_reg_config = mem_acc_vreg->corner_acc_reg_config;
+	for (i = 1; i <= num_corners; i++) {
+		snprintf(prop_str, sizeof(prop_str),
+			 "qcom,override-corner%d-addr-val-map", i);
+		prop = of_find_property(of_node, prop_str, &len);
+		list_size = len / (tuple_count * sizeof(u32));
+		if (!prop) {
+			pr_debug("%s property not specified\n", prop_str);
+			continue;
+		}
+
+		if ((!list_size) || list_size < (num_corners * 2)) {
+			pr_err("qcom,override-corner%d-addr-val-map property is missed or invalid length: len=%d\n",
+			i, len);
+			return -EINVAL;
+		}
+
+		override_max_reg_config_len = list_size / (num_corners * 2);
+		override_reg_config_list =
+				corner_acc_reg_config[i].reg_config_list;
+
+		if (corner_acc_reg_config[i].max_reg_config_len
+					!= override_max_reg_config_len) {
+			/* Free already allocate memory */
+			devm_kfree(mem_acc_vreg->dev, override_reg_config_list);
+
+			/* Allocated memory for new requirement */
+			override_reg_config_list =
+				devm_kcalloc(mem_acc_vreg->dev,
+				override_max_reg_config_len * num_corners,
+				sizeof(*override_reg_config_list), GFP_KERNEL);
+			if (!override_reg_config_list)
+				return -ENOMEM;
+
+			corner_acc_reg_config[i].max_reg_config_len =
+						override_max_reg_config_len;
+			corner_acc_reg_config[i].reg_config_list =
+						override_reg_config_list;
+		}
+
+		rc = mem_acc_get_reg_addr_val(of_node, prop_str,
+					override_reg_config_list, tuple_match,
+					list_size, mem_acc_vreg->num_acc_reg);
+		if (rc) {
+			pr_err("Failed to read %s property: rc=%d\n",
+				prop_str, rc);
+			return rc;
+		}
+
+		rc = mem_acc_reg_addr_val_dump(mem_acc_vreg,
+						&corner_acc_reg_config[i], i);
+		if (rc) {
+			pr_err("could not dump acc address-value dump for corner=%d: rc=%d\n",
+				i, rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int mem_acc_parse_override_config(struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = mem_acc_vreg->dev->of_node;
+	int i, rc = 0;
+
+	/* Specify default no match case. */
+	mem_acc_vreg->override_map_match = FUSE_MAP_NO_MATCH;
+	mem_acc_vreg->override_map_count = 0;
+
+	if (of_find_property(of_node, "qcom,override-fuse-range-map",
+			     NULL)) {
+		rc = mem_acc_parse_override_fuse_version_range(mem_acc_vreg);
+		if (rc) {
+			pr_err("parsing qcom,override-fuse-range-map property failed, rc=%d\n",
+				rc);
+			return rc;
+		}
+	} else if (of_find_property(of_node, "qcom,override-fuse-version-map",
+				    NULL)) {
+		rc = mem_acc_parse_override_fuse_version_map(mem_acc_vreg);
+		if (rc) {
+			pr_err("parsing qcom,override-fuse-version-map property failed, rc=%d\n",
+				rc);
+			return rc;
+		}
+	} else {
+		/* No override fuse configuration defined in device node */
+		return 0;
+	}
+
+	if (mem_acc_vreg->override_map_match == FUSE_MAP_NO_MATCH)
+		return 0;
+
+	rc = mem_acc_override_corner_map(mem_acc_vreg);
+	if (rc) {
+		pr_err("Unable to override corner map rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = mem_acc_override_reg_addr_val_init(mem_acc_vreg);
+	if (rc) {
+		pr_err("Unable to override reg_config_list init rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	for (i = 0; i < MEMORY_MAX; i++) {
+		rc = override_mem_acc_custom_data(mem_acc_vreg, i);
+		if (rc) {
+			pr_err("Unable to override custom data for mem_type=%d rc=%d\n",
+				i, rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
 static int mem_acc_init_reg_config(struct mem_acc_regulator *mem_acc_vreg)
 {
 	struct device_node *of_node = mem_acc_vreg->dev->of_node;
@@ -965,92 +1247,6 @@
 	return rc;
 }
 
-static int mem_acc_override_reg_addr_val_init(
-			struct mem_acc_regulator *mem_acc_vreg)
-{
-	struct device_node *of_node = mem_acc_vreg->dev->of_node;
-	struct corner_acc_reg_config *corner_acc_reg_config;
-	struct acc_reg_value *override_reg_config_list;
-	int i, tuple_count, tuple_match, len = 0, rc = 0;
-	u32 list_size, override_max_reg_config_len;
-	char prop_str[40];
-	struct property *prop;
-	int num_corners = mem_acc_vreg->num_corners;
-
-	if (!mem_acc_vreg->corner_acc_reg_config)
-		return 0;
-
-	if (mem_acc_vreg->override_map_count) {
-		if (mem_acc_vreg->override_map_match ==	FUSE_MAP_NO_MATCH)
-			return 0;
-		tuple_count = mem_acc_vreg->override_map_count;
-		tuple_match = mem_acc_vreg->override_map_match;
-	} else {
-		tuple_count = 1;
-		tuple_match = 0;
-	}
-
-	corner_acc_reg_config = mem_acc_vreg->corner_acc_reg_config;
-	for (i = 1; i <= num_corners; i++) {
-		snprintf(prop_str, sizeof(prop_str),
-				"qcom,override-corner%d-addr-val-map", i);
-		prop = of_find_property(of_node, prop_str, &len);
-		list_size = len / (tuple_count * sizeof(u32));
-		if (!prop) {
-			pr_debug("%s property not specified\n", prop_str);
-			continue;
-		}
-
-		if ((!list_size) || list_size < (num_corners * 2)) {
-			pr_err("qcom,override-corner%d-addr-val-map property is missed or invalid length: len=%d\n",
-			i, len);
-			return -EINVAL;
-		}
-
-		override_max_reg_config_len = list_size / (num_corners * 2);
-		override_reg_config_list =
-				corner_acc_reg_config[i].reg_config_list;
-
-		if (corner_acc_reg_config[i].max_reg_config_len
-					!= override_max_reg_config_len) {
-			/* Free already allocate memory */
-			devm_kfree(mem_acc_vreg->dev, override_reg_config_list);
-
-			/* Allocated memory for new requirement */
-			override_reg_config_list =
-				devm_kcalloc(mem_acc_vreg->dev,
-				override_max_reg_config_len * num_corners,
-				sizeof(*override_reg_config_list), GFP_KERNEL);
-			if (!override_reg_config_list)
-				return -ENOMEM;
-
-			corner_acc_reg_config[i].max_reg_config_len =
-						override_max_reg_config_len;
-			corner_acc_reg_config[i].reg_config_list =
-						override_reg_config_list;
-		}
-
-		rc = mem_acc_get_reg_addr_val(of_node, prop_str,
-					override_reg_config_list, tuple_match,
-					list_size, mem_acc_vreg->num_acc_reg);
-		if (rc) {
-			pr_err("Failed to read %s property: rc=%d\n",
-				prop_str, rc);
-			return rc;
-		}
-
-		rc = mem_acc_reg_addr_val_dump(mem_acc_vreg,
-						&corner_acc_reg_config[i], i);
-		if (rc) {
-			pr_err("could not dump acc address-value dump for corner=%d: rc=%d\n",
-				i, rc);
-			return rc;
-		}
-	}
-
-	return rc;
-}
-
 #define MEM_TYPE_STRING_LEN	20
 static int mem_acc_init(struct platform_device *pdev,
 		struct mem_acc_regulator *mem_acc_vreg)
@@ -1058,8 +1254,6 @@
 	struct device_node *of_node = pdev->dev.of_node;
 	struct resource *res;
 	int len, rc, i, j;
-	u32 fuse_sel[4];
-	u64 fuse_bits;
 	bool acc_type_present = false;
 	char tmps[MEM_TYPE_STRING_LEN];
 
@@ -1201,59 +1395,12 @@
 		}
 	}
 
-	if (of_find_property(mem_acc_vreg->dev->of_node,
-				"qcom,override-acc-fuse-sel", NULL)) {
-		rc = of_property_read_u32_array(mem_acc_vreg->dev->of_node,
-			"qcom,override-acc-fuse-sel", fuse_sel, 4);
-		if (rc < 0) {
-			pr_err("Read failed - qcom,override-acc-fuse-sel rc=%d\n",
-					rc);
-			return rc;
-		}
-
-		fuse_bits = mem_acc_read_efuse_row(mem_acc_vreg, fuse_sel[0],
-								fuse_sel[3]);
-		/*
-		 * fuse_sel[1] = LSB position in row (shift)
-		 * fuse_sel[2] = num of bits (mask)
-		 */
-		mem_acc_vreg->override_fuse_value = (fuse_bits >> fuse_sel[1]) &
-						((1 << fuse_sel[2]) - 1);
-
-		rc = mem_acc_find_override_map_match(pdev, mem_acc_vreg);
-		if (rc) {
-			pr_err("Unable to find fuse map match rc=%d\n", rc);
-			return rc;
-		}
-
-		pr_debug("override_fuse_val=%d override_map_match=%d\n",
-					mem_acc_vreg->override_fuse_value,
-					mem_acc_vreg->override_map_match);
-
-		rc = mem_acc_override_corner_map(mem_acc_vreg);
-		if (rc) {
-			pr_err("Unable to override corner map rc=%d\n", rc);
-			return rc;
-		}
-
-		rc = mem_acc_override_reg_addr_val_init(mem_acc_vreg);
-		if (rc) {
-			pr_err("Unable to override reg_config_list init rc=%d\n",
-				rc);
-			return rc;
-		}
-
-		for (i = 0; i < MEMORY_MAX; i++) {
-			rc = override_mem_acc_custom_data(pdev,
-							mem_acc_vreg, i);
-			if (rc) {
-				pr_err("Unable to override custom data for mem_type=%d rc=%d\n",
-					i, rc);
-				return rc;
-			}
-		}
+	rc = mem_acc_parse_override_config(mem_acc_vreg);
+	if (rc) {
+		pr_err("Unable to parse mem acc override configuration, rc=%d\n",
+			rc);
+		return rc;
 	}
-
 	if (acc_type_present) {
 		mem_acc_vreg->mem_acc_type_data = devm_kzalloc(
 			mem_acc_vreg->dev, mem_acc_vreg->num_corners *
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index bcc8f3d..b3f9243 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -358,6 +358,8 @@
 
 	adapter->next_port_scan = jiffies;
 
+	adapter->erp_action.adapter = adapter;
+
 	if (zfcp_qdio_setup(adapter))
 		goto failed;
 
@@ -514,6 +516,9 @@
 	port->dev.groups = zfcp_port_attr_groups;
 	port->dev.release = zfcp_port_release;
 
+	port->erp_action.adapter = adapter;
+	port->erp_action.port = port;
+
 	if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
 		kfree(port);
 		goto err_out;
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 7ccfce5..3b23d675 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -193,9 +193,8 @@
 		atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE,
 				&zfcp_sdev->status);
 		erp_action = &zfcp_sdev->erp_action;
-		memset(erp_action, 0, sizeof(struct zfcp_erp_action));
-		erp_action->port = port;
-		erp_action->sdev = sdev;
+		WARN_ON_ONCE(erp_action->port != port);
+		WARN_ON_ONCE(erp_action->sdev != sdev);
 		if (!(atomic_read(&zfcp_sdev->status) &
 		      ZFCP_STATUS_COMMON_RUNNING))
 			act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
@@ -208,8 +207,8 @@
 		zfcp_erp_action_dismiss_port(port);
 		atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
 		erp_action = &port->erp_action;
-		memset(erp_action, 0, sizeof(struct zfcp_erp_action));
-		erp_action->port = port;
+		WARN_ON_ONCE(erp_action->port != port);
+		WARN_ON_ONCE(erp_action->sdev != NULL);
 		if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING))
 			act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
 		break;
@@ -219,7 +218,8 @@
 		zfcp_erp_action_dismiss_adapter(adapter);
 		atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
 		erp_action = &adapter->erp_action;
-		memset(erp_action, 0, sizeof(struct zfcp_erp_action));
+		WARN_ON_ONCE(erp_action->port != NULL);
+		WARN_ON_ONCE(erp_action->sdev != NULL);
 		if (!(atomic_read(&adapter->status) &
 		      ZFCP_STATUS_COMMON_RUNNING))
 			act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
@@ -229,7 +229,11 @@
 		return NULL;
 	}
 
-	erp_action->adapter = adapter;
+	WARN_ON_ONCE(erp_action->adapter != adapter);
+	memset(&erp_action->list, 0, sizeof(erp_action->list));
+	memset(&erp_action->timer, 0, sizeof(erp_action->timer));
+	erp_action->step = ZFCP_ERP_STEP_UNINITIALIZED;
+	erp_action->fsf_req_id = 0;
 	erp_action->action = need;
 	erp_action->status = act_status;
 
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 9bd9b9a..a9b8104 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -115,10 +115,15 @@
 	struct zfcp_unit *unit;
 	int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE;
 
+	zfcp_sdev->erp_action.adapter = adapter;
+	zfcp_sdev->erp_action.sdev = sdev;
+
 	port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
 	if (!port)
 		return -ENXIO;
 
+	zfcp_sdev->erp_action.port = port;
+
 	unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev));
 	if (unit)
 		put_device(&unit->dev);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 0114e2a..9965135 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -841,7 +841,7 @@
 
 	val = 0;
 	list_for_each_entry(srp, &sfp->rq_list, entry) {
-		if (val > SG_MAX_QUEUE)
+		if (val >= SG_MAX_QUEUE)
 			break;
 		rinfo[val].req_state = srp->done + 1;
 		rinfo[val].problem =
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 14f9dea..7d629b4 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -1215,7 +1215,7 @@
 			goto qspi_probe_err;
 		}
 	} else {
-		goto qspi_probe_err;
+		goto qspi_resource_err;
 	}
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
@@ -1237,7 +1237,7 @@
 		qspi->base[CHIP_SELECT]  = devm_ioremap_resource(dev, res);
 		if (IS_ERR(qspi->base[CHIP_SELECT])) {
 			ret = PTR_ERR(qspi->base[CHIP_SELECT]);
-			goto qspi_probe_err;
+			goto qspi_resource_err;
 		}
 	}
 
@@ -1245,7 +1245,7 @@
 				GFP_KERNEL);
 	if (!qspi->dev_ids) {
 		ret = -ENOMEM;
-		goto qspi_probe_err;
+		goto qspi_resource_err;
 	}
 
 	for (val = 0; val < num_irqs; val++) {
@@ -1334,8 +1334,9 @@
 	bcm_qspi_hw_uninit(qspi);
 	clk_disable_unprepare(qspi->clk);
 qspi_probe_err:
-	spi_master_put(master);
 	kfree(qspi->dev_ids);
+qspi_resource_err:
+	spi_master_put(master);
 	return ret;
 }
 /* probe function to be called by SoC specific platform driver probe */
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 4ddb332..a5e050a 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1218,7 +1218,8 @@
 				 &dwc->fladj);
 	dwc->disable_clk_gating = device_property_read_bool(dev,
 				"snps,disable-clk-gating");
-
+	dwc->enable_bus_suspend = device_property_read_bool(dev,
+					"snps,bus-suspend-enable");
 	if (dwc->enable_bus_suspend) {
 		pm_runtime_set_autosuspend_delay(dev, 500);
 		pm_runtime_use_autosuspend(dev);
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 19c79f2..09daf2e 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -3805,8 +3805,7 @@
 		dwc3_usb3_phy_suspend(dwc, false);
 		mdwc->in_host_mode = false;
 
-		pm_runtime_mark_last_busy(mdwc->dev);
-		pm_runtime_put_sync_autosuspend(mdwc->dev);
+		pm_runtime_put_sync_suspend(mdwc->dev);
 		dbg_event(0xFF, "StopHost psync",
 			atomic_read(&mdwc->dev->power.usage_count));
 	}
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 667e596..fabb326 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -415,25 +415,25 @@
 						     GFP_NOWAIT);
 			if (!command) {
 				spin_unlock_irqrestore(&xhci->lock, flags);
-				xhci_free_command(xhci, cmd);
-				return -ENOMEM;
-
+				ret = -ENOMEM;
+				goto cmd_cleanup;
 			}
 
 			ret = xhci_queue_stop_endpoint(xhci, command, slot_id,
-					i, suspend);
+						       i, suspend);
 			if (ret) {
 				spin_unlock_irqrestore(&xhci->lock, flags);
 				xhci_free_command(xhci, command);
-				goto err_cmd_queue;
+				goto cmd_cleanup;
 			}
 		}
 	}
 	ret = xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend);
 	if (ret) {
 		spin_unlock_irqrestore(&xhci->lock, flags);
-		goto err_cmd_queue;
+		goto cmd_cleanup;
 	}
+
 	xhci_ring_cmd_db(xhci);
 	spin_unlock_irqrestore(&xhci->lock, flags);
 
@@ -445,7 +445,7 @@
 		ret = -ETIME;
 	}
 
-err_cmd_queue:
+cmd_cleanup:
 	xhci_free_command(xhci, cmd);
 	return ret;
 }
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 2ef2b61..79b8ab4 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -1030,6 +1030,7 @@
 	mutex_unlock(&priv->lock);
 
 	if (use_ptemod) {
+		map->pages_vm_start = vma->vm_start;
 		err = apply_to_page_range(vma->vm_mm, vma->vm_start,
 					  vma->vm_end - vma->vm_start,
 					  find_grant_ptes, map);
@@ -1067,7 +1068,6 @@
 					    set_grant_ptes_as_special, NULL);
 		}
 #endif
-		map->pages_vm_start = vma->vm_start;
 	}
 
 	return 0;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 03951f9..3e1c136 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1900,6 +1900,7 @@
 retry:
 	spin_lock(&ci->i_ceph_lock);
 	if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
+		spin_unlock(&ci->i_ceph_lock);
 		dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
 		goto out;
 	}
@@ -1917,8 +1918,10 @@
 			mutex_lock(&session->s_mutex);
 			goto retry;
 		}
-		if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
+		if (cap->session->s_state < CEPH_MDS_SESSION_OPEN) {
+			spin_unlock(&ci->i_ceph_lock);
 			goto out;
+		}
 
 		flushing = __mark_caps_flushing(inode, session, true,
 						&flush_tid, &oldest_flush_tid);
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 599a292..a896e46 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -84,11 +84,16 @@
 static inline struct ecryptfs_auth_tok *
 ecryptfs_get_encrypted_key_payload_data(struct key *key)
 {
-	if (key->type == &key_type_encrypted)
-		return (struct ecryptfs_auth_tok *)
-			(&((struct encrypted_key_payload *)key->payload.data[0])->payload_data);
-	else
+	struct encrypted_key_payload *payload;
+
+	if (key->type != &key_type_encrypted)
 		return NULL;
+
+	payload = key->payload.data[0];
+	if (!payload)
+		return ERR_PTR(-EKEYREVOKED);
+
+	return (struct ecryptfs_auth_tok *)payload->payload_data;
 }
 
 static inline struct key *ecryptfs_get_encrypted_key(char *sig)
@@ -114,12 +119,17 @@
 ecryptfs_get_key_payload_data(struct key *key)
 {
 	struct ecryptfs_auth_tok *auth_tok;
+	const struct user_key_payload *ukp;
 
 	auth_tok = ecryptfs_get_encrypted_key_payload_data(key);
-	if (!auth_tok)
-		return (struct ecryptfs_auth_tok *)user_key_payload(key)->data;
-	else
+	if (auth_tok)
 		return auth_tok;
+
+	ukp = user_key_payload(key);
+	if (!ukp)
+		return ERR_PTR(-EKEYREVOKED);
+
+	return (struct ecryptfs_auth_tok *)ukp->data;
 }
 
 #define ECRYPTFS_MAX_KEYSET_SIZE 1024
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 3cf1546..fa218cd 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -459,7 +459,8 @@
  * @auth_tok_key: key containing the authentication token
  * @auth_tok: authentication token
  *
- * Returns zero on valid auth tok; -EINVAL otherwise
+ * Returns zero on valid auth tok; -EINVAL if the payload is invalid; or
+ * -EKEYREVOKED if the key was revoked before we acquired its semaphore.
  */
 static int
 ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key,
@@ -468,6 +469,12 @@
 	int rc = 0;
 
 	(*auth_tok) = ecryptfs_get_key_payload_data(auth_tok_key);
+	if (IS_ERR(*auth_tok)) {
+		rc = PTR_ERR(*auth_tok);
+		*auth_tok = NULL;
+		goto out;
+	}
+
 	if (ecryptfs_verify_version((*auth_tok)->version)) {
 		printk(KERN_ERR "Data structure version mismatch. Userspace "
 		       "tools must match eCryptfs kernel module with major "
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index c7c3c96..1693308 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1361,7 +1361,8 @@
 			*/
 			over = !dir_emit(ctx, dirent->name, dirent->namelen,
 				       dirent->ino, dirent->type);
-			ctx->pos = dirent->off;
+			if (!over)
+				ctx->pos = dirent->off;
 		}
 
 		buf += reclen;
diff --git a/include/uapi/linux/ipa_qmi_service_v01.h b/include/uapi/linux/ipa_qmi_service_v01.h
index 1ba819b..1917c0d 100644
--- a/include/uapi/linux/ipa_qmi_service_v01.h
+++ b/include/uapi/linux/ipa_qmi_service_v01.h
@@ -47,6 +47,12 @@
 #define QMI_IPA_MAX_FILTERS_EX_V01 128
 #define QMI_IPA_MAX_PIPES_V01 20
 #define QMI_IPA_MAX_APN_V01 8
+#define QMI_IPA_MAX_PER_CLIENTS_V01 64
+/* Currently max we can use is only 1. But for scalability purpose
+ * we are having max value as 8.
+ */
+#define QMI_IPA_MAX_CLIENT_DST_PIPES_V01 8
+#define QMI_IPA_MAX_UL_FIREWALL_RULES_V01 64
 
 #define IPA_INT_MAX	((int)(~0U>>1))
 #define IPA_INT_MIN	(-IPA_INT_MAX - 1)
@@ -989,6 +995,16 @@
 	 *	failure, the Rule Ids in this list must be set to a reserved
 	 *	index (255).
 	 */
+
+	/* Optional */
+	/*	List of destination pipe IDs. */
+	uint8_t dst_pipe_id_valid;
+	/* Must be set to true if dst_pipe_id is being passed. */
+	uint32_t dst_pipe_id_len;
+	/* Must be set to # of elements in dst_pipe_id. */
+	uint32_t dst_pipe_id[QMI_IPA_MAX_CLIENT_DST_PIPES_V01];
+	/* Provides the list of destination pipe IDs for a source pipe. */
+
 };  /* Message */
 
 /* Response Message; This is the message that is exchanged between the
@@ -1626,6 +1642,273 @@
 	 */
 };  /* Message */
 
+/*
+ * Request Message; Requests the modem IPA driver to enable or
+ * disable collection of per client statistics.
+ */
+struct ipa_enable_per_client_stats_req_msg_v01 {
+
+	/* Mandatory */
+	/* Collect statistics per client; */
+	uint8_t enable_per_client_stats;
+	/*
+	 * Indicates whether to start or stop collecting
+	 * per client statistics.
+	 */
+};  /* Message */
+
+/*
+ * Response Message; Requests the modem IPA driver to enable or disable
+ * collection of per client statistics.
+ */
+struct ipa_enable_per_client_stats_resp_msg_v01 {
+
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type. */
+};  /* Message */
+
+struct ipa_per_client_stats_info_type_v01 {
+
+	uint32_t client_id;
+	/*
+	 * Id of the client on APPS processor side for which Modem processor
+	 * needs to send uplink/downlink statistics.
+	 */
+
+	uint32_t src_pipe_id;
+	/*
+	 * IPA consumer pipe on which client on APPS side sent uplink
+	 * data to modem.
+	 */
+
+	uint64_t num_ul_ipv4_bytes;
+	/*
+	 * Accumulated number of uplink IPv4 bytes for a client.
+	 */
+
+	uint64_t num_ul_ipv6_bytes;
+	/*
+	 * Accumulated number of uplink IPv6 bytes for a client.
+	 */
+
+	uint64_t num_dl_ipv4_bytes;
+	/*
+	 * Accumulated number of downlink IPv4 bytes for a client.
+	 */
+
+	uint64_t num_dl_ipv6_bytes;
+	/*
+	 * Accumulated number of downlink IPv6 byes for a client.
+	 */
+
+
+	uint32_t num_ul_ipv4_pkts;
+	/*
+	 * Accumulated number of uplink IPv4 packets for a client.
+	 */
+
+	uint32_t num_ul_ipv6_pkts;
+	/*
+	 * Accumulated number of uplink IPv6 packets for a client.
+	 */
+
+	uint32_t num_dl_ipv4_pkts;
+	/*
+	 * Accumulated number of downlink IPv4 packets for a client.
+	 */
+
+	uint32_t num_dl_ipv6_pkts;
+	/*
+	 * Accumulated number of downlink IPv6 packets for a client.
+	 */
+};  /* Type */
+
+/*
+ * Request Message; Requests the modem IPA driver to provide statistics
+ * for a givenclient.
+ */
+struct ipa_get_stats_per_client_req_msg_v01 {
+
+	/* Mandatory */
+	/*  Client id */
+	uint32_t client_id;
+	/*
+	 * Id of the client on APPS processor side for which Modem processor
+	 * needs to send uplink/downlink statistics. if client id is specified
+	 * as 0xffffffff, then Q6 will send the stats for all the clients of
+	 * the specified source pipe.
+	 */
+
+	/* Mandatory */
+	/*  Source pipe id */
+	uint32_t src_pipe_id;
+	/*
+	 * IPA consumer pipe on which client on APPS side sent uplink
+	 * data to modem. In future, this implementation can be extended
+	 * to provide 0xffffffff as the source pipe id, where Q6 will send
+	 * the stats of all the clients across all different tethered-pipes.
+	 */
+
+	/* Optional */
+	/*  Reset client statistics. */
+	uint8_t reset_stats_valid;
+	/* Must be set to true if reset_stats is being passed. */
+	uint8_t reset_stats;
+	/*
+	 * Option to reset the statistics currently collected by modem for this
+	 * particular client.
+	 */
+};  /* Message */
+
+/*
+ * Response Message; Requests the modem IPA driver to provide statistics
+ * for a given client.
+ */
+struct ipa_get_stats_per_client_resp_msg_v01 {
+
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type. */
+
+	/* Optional */
+	/*  Per clients Statistics List */
+	uint8_t per_client_stats_list_valid;
+	/* Must be set to true if per_client_stats_list is being passed. */
+	uint32_t per_client_stats_list_len;
+	/* Must be set to # of elements in per_client_stats_list. */
+	struct ipa_per_client_stats_info_type_v01
+		per_client_stats_list[QMI_IPA_MAX_PER_CLIENTS_V01];
+	/*
+	 * List of all per client statistics that are retrieved.
+	 */
+};  /* Message */
+
+struct ipa_ul_firewall_rule_type_v01 {
+
+	enum ipa_ip_type_enum_v01 ip_type;
+	/*
+	 * IP type for which this rule is applicable.
+	 * The driver must identify the filter table (v6 or v4), and this
+	 * field is essential for that. Values:
+	 * - QMI_IPA_IP_TYPE_INVALID (0) --  Invalid IP type identifier
+	 * - QMI_IPA_IP_TYPE_V4 (1) --  IPv4 type
+	 * - QMI_IPA_IP_TYPE_V6 (2) --  IPv6 type
+	 */
+
+	struct ipa_filter_rule_type_v01 filter_rule;
+	/*
+	 * Rules in the filter specification. These rules are the
+	 * ones that are matched against fields in the packet.
+	 * Currently we only send IPv6 whitelist rules to Q6.
+	 */
+};  /* Type */
+
+/*
+ * Request Message; Requestes remote IPA driver to install uplink
+ * firewall rules.
+ */
+struct ipa_configure_ul_firewall_rules_req_msg_v01 {
+
+	/* Optional */
+	/*  Uplink Firewall Specification  */
+	uint32_t firewall_rules_list_len;
+	/* Must be set to # of elements in firewall_rules_list. */
+	struct ipa_ul_firewall_rule_type_v01
+		firewall_rules_list[QMI_IPA_MAX_UL_FIREWALL_RULES_V01];
+	/*
+	 * List of uplink firewall specifications of filters that must be
+	 * installed.
+	 */
+
+	uint32_t mux_id;
+	/*
+	 * QMAP Mux ID. As a part of the QMAP protocol,
+	 * several data calls may be multiplexed over the same physical
+	 * transport channel. This identifier is used to identify one
+	 * such data call. The maximum value for this identifier is 255.
+	 */
+
+	/* Optional */
+	uint8_t disable_valid;
+	/* Must be set to true if enable is being passed. */
+	uint8_t disable;
+	/*
+	 * Indicates whether uplink firewall needs to be enabled or disabled.
+	 */
+
+	/* Optional */
+	uint8_t are_blacklist_filters_valid;
+	/* Must be set to true if are_blacklist_filters is being passed. */
+	uint8_t are_blacklist_filters;
+	/*
+	 * Indicates whether the filters received as part of this message are
+	 * blacklist filters. i.e. drop uplink packets matching these rules.
+	 */
+};  /* Message */
+
+/*
+ * Response Message; Requestes remote IPA driver to install
+ * uplink firewall rules.
+ */
+struct ipa_configure_ul_firewall_rules_resp_msg_v01 {
+
+	/* Mandatory */
+	/* Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/*
+	 * Standard response type.
+	 * Standard response type. Contains the following data members:
+	 * qmi_result_type -- QMI_RESULT_SUCCESS or QMI_RESULT_FAILURE
+	 * qmi_error_type  -- Error code. Possible error code values are
+	 * described in the error codes section of each message definition.
+	 */
+};  /* Message */
+
+enum ipa_ul_firewall_status_enum_v01 {
+	IPA_UL_FIREWALL_STATUS_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	/* To force a 32 bit signed enum.  Do not change or use*/
+	QMI_IPA_UL_FIREWALL_STATUS_SUCCESS_V01 = 0,
+	/* Indicates that the uplink firewall rules
+	 * are configured successfully.
+	 */
+	QMI_IPA_UL_FIREWALL_STATUS_FAILURE_V01 = 1,
+	/* Indicates that the uplink firewall rules
+	 * are not configured successfully.
+	 */
+	IPA_UL_FIREWALL_STATUS_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+	/* To force a 32 bit signed enum.  Do not change or use*/
+};
+
+struct ipa_ul_firewall_config_result_type_v01 {
+
+	enum ipa_ul_firewall_status_enum_v01 is_success;
+	/*
+	 * Indicates whether the uplink firewall rules are configured
+	 * successfully.
+	 */
+
+	uint32_t mux_id;
+	/*
+	 * QMAP Mux ID. As a part of the QMAP protocol,
+	 * several data calls may be multiplexed over the same physical
+	 * transport channel. This identifier is used to identify one
+	 * such data call. The maximum value for this identifier is 255.
+	 */
+};
+
+/*
+ * Indication Message; Requestes remote IPA driver to install
+ * uplink firewall rules.
+ */
+struct ipa_configure_ul_firewall_rules_ind_msg_v01 {
+
+	 struct ipa_ul_firewall_config_result_type_v01 result;
+};  /* Message */
+
+
 /*Service Message Definition*/
 #define QMI_IPA_INDICATION_REGISTER_REQ_V01 0x0020
 #define QMI_IPA_INDICATION_REGISTER_RESP_V01 0x0020
@@ -1659,6 +1942,13 @@
 #define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_V01 0x0035
 #define QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_V01 0x0037
 #define QMI_IPA_INSTALL_FILTER_RULE_EX_RESP_V01 0x0037
+#define QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01 0x0038
+#define QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_V01 0x0038
+#define QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01 0x0039
+#define QMI_IPA_GET_STATS_PER_CLIENT_RESP_V01 0x0039
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_V01 0x003A
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_V01 0x003A
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_V01 0x003A
 
 /* add for max length*/
 #define QMI_IPA_INIT_MODEM_DRIVER_REQ_MAX_MSG_LEN_V01 134
@@ -1667,7 +1957,7 @@
 #define QMI_IPA_INDICATION_REGISTER_RESP_MAX_MSG_LEN_V01 7
 #define QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01 22369
 #define QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01 783
-#define QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01 834
+#define QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01 870
 #define QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01 7
 #define QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01 7
 #define QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_MAX_MSG_LEN_V01 15
@@ -1700,6 +1990,15 @@
 #define QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_MAX_MSG_LEN_V01 22685
 #define QMI_IPA_INSTALL_FILTER_RULE_EX_RESP_MAX_MSG_LEN_V01 523
 
+#define QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_MAX_MSG_LEN_V01 4
+#define QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_MAX_MSG_LEN_V01 7
+
+#define QMI_IPA_GET_STATS_PER_CLIENT_REQ_MAX_MSG_LEN_V01 18
+#define QMI_IPA_GET_STATS_PER_CLIENT_RESP_MAX_MSG_LEN_V01 3595
+
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_MAX_MSG_LEN_V01 9875
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_MAX_MSG_LEN_V01 11
 /* Service Object Accessor */
 
 #endif/* IPA_QMI_SERVICE_V01_H */
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index 97eca0a..d3b9a33 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -127,6 +127,17 @@
 #define IPA_WAN_MSG_IPv6_ADDR_GW_LEN 4
 
 /**
+ * max number of lan clients supported per device type
+ * for LAN stats via HW.
+ */
+#define IPA_MAX_NUM_HW_PATH_CLIENTS 16
+
+/**
+ * max number of destination pipes possible for a client.
+ */
+#define QMI_IPA_MAX_CLIENT_DST_PIPES 4
+
+/**
  * the attributes of the rule (routing or filtering)
  */
 #define IPA_FLT_TOS			(1ul << 0)
@@ -470,6 +481,7 @@
 	IPA_TETHERING_STATS_EVENT_MAX,
 };
 
+
 enum ipa_quota_event {
 	IPA_QUOTA_REACH = IPA_TETHERING_STATS_EVENT_MAX,
 	IPA_QUOTA_EVENT_MAX,
@@ -489,7 +501,13 @@
 	IPA_VLAN_L2TP_EVENT_MAX,
 };
 
-#define IPA_EVENT_MAX_NUM (IPA_VLAN_L2TP_EVENT_MAX)
+enum ipa_per_client_stats_event {
+	IPA_PER_CLIENT_STATS_CONNECT_EVENT = IPA_VLAN_L2TP_EVENT_MAX,
+	IPA_PER_CLIENT_STATS_DISCONNECT_EVENT,
+	IPA_PER_CLIENT_STATS_EVENT_MAX
+};
+
+#define IPA_EVENT_MAX_NUM (IPA_PER_CLIENT_STATS_EVENT_MAX)
 #define IPA_EVENT_MAX ((int)IPA_EVENT_MAX_NUM)
 
 /**
@@ -1175,6 +1193,48 @@
 };
 
 /**
+ * struct ipa_rt_rule_add_ext - routing rule descriptor includes in
+ * and out parameters
+ * @rule: actual rule to be added
+ * @at_rear:	add at back of routing table, it is NOT possible to add rules at
+ *		the rear of the "default" routing tables
+ * @rt_rule_hdl: output parameter, handle to rule, valid when status is 0
+ * @status:	output parameter, status of routing rule add operation,
+ * @rule_id: rule_id to be assigned to the routing rule. In case client
+ *  specifies rule_id as 0 the driver will assign a new rule_id
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_rt_rule_add_ext {
+	struct ipa_rt_rule rule;
+	uint8_t at_rear;
+	uint32_t rt_rule_hdl;
+	int status;
+	uint16_t rule_id;
+};
+
+/**
+ * struct ipa_ioc_add_rt_rule - routing rule addition parameters (supports
+ * multiple rules and commit with rule_id);
+ *
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @rt_tbl_name: name of routing table resource
+ * @num_rules: number of routing rules that follow
+ * @ipa_rt_rule_add_ext rules: all rules need to go back to back here,
+ *  no pointers
+ */
+struct ipa_ioc_add_rt_rule_ext {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	char rt_tbl_name[IPA_RESOURCE_NAME_MAX];
+	uint8_t num_rules;
+	struct ipa_rt_rule_add_ext rules[0];
+};
+
+
+/**
  * struct ipa_ioc_del_rt_rule - routing rule deletion parameters (supports
  * multiple headers and commit)
  * @commit: should rules be removed from IPA HW also?
@@ -1733,6 +1793,52 @@
 	IPACM_CLIENT_WLAN,
 	IPACM_CLIENT_MAX
 };
+
+enum ipacm_per_client_device_type {
+	IPACM_CLIENT_DEVICE_TYPE_USB = 0,
+	IPACM_CLIENT_DEVICE_TYPE_WLAN = 1,
+	IPACM_CLIENT_DEVICE_TYPE_ETH = 2
+};
+
+/**
+ * max number of device types supported.
+ */
+#define IPACM_MAX_CLIENT_DEVICE_TYPES 3
+
+/**
+ * @lanIface - Name of the lan interface
+ * @mac: Mac address of the client.
+ */
+struct ipa_lan_client_msg {
+	char lanIface[IPA_RESOURCE_NAME_MAX];
+	uint8_t mac[IPA_MAC_ADDR_SIZE];
+};
+
+/**
+ * struct ipa_lan_client - lan client data
+ * @mac: MAC Address of the client.
+ * @client_idx: Client Index.
+ * @inited: Bool to indicate whether client info is set.
+ */
+struct ipa_lan_client {
+	uint8_t mac[IPA_MAC_ADDR_SIZE];
+	int8_t client_idx;
+	uint8_t inited;
+};
+
+/**
+ * struct ipa_tether_device_info - tether device info indicated from IPACM
+ * @ul_src_pipe: Source pipe of the lan client.
+ * @hdr_len: Header length of the client.
+ * @num_clients: Number of clients connected.
+ */
+struct ipa_tether_device_info {
+	int32_t ul_src_pipe;
+	uint8_t hdr_len;
+	uint32_t num_clients;
+	struct ipa_lan_client lan_client[IPA_MAX_NUM_HW_PATH_CLIENTS];
+};
+
 /**
  *   actual IOCTLs supported by IPA driver
  */
@@ -1745,6 +1851,9 @@
 #define IPA_IOC_ADD_RT_RULE _IOWR(IPA_IOC_MAGIC, \
 					IPA_IOCTL_ADD_RT_RULE, \
 					struct ipa_ioc_add_rt_rule *)
+#define IPA_IOC_ADD_RT_RULE_EXT _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_RT_RULE_EXT, \
+					struct ipa_ioc_add_rt_rule_ext *)
 #define IPA_IOC_ADD_RT_RULE_AFTER _IOWR(IPA_IOC_MAGIC, \
 					IPA_IOCTL_ADD_RT_RULE_AFTER, \
 					struct ipa_ioc_add_rt_rule_after *)
diff --git a/include/uapi/linux/rmnet_ipa_fd_ioctl.h b/include/uapi/linux/rmnet_ipa_fd_ioctl.h
index 04aaaad..2992e2c 100644
--- a/include/uapi/linux/rmnet_ipa_fd_ioctl.h
+++ b/include/uapi/linux/rmnet_ipa_fd_ioctl.h
@@ -34,6 +34,12 @@
 #define WAN_IOCTL_ADD_FLT_RULE_EX        9
 #define WAN_IOCTL_QUERY_TETHER_STATS_ALL  10
 #define WAN_IOCTL_NOTIFY_WAN_STATE  11
+#define WAN_IOCTL_ADD_UL_FLT_RULE          12
+#define WAN_IOCTL_ENABLE_PER_CLIENT_STATS    13
+#define WAN_IOCTL_QUERY_PER_CLIENT_STATS     14
+#define WAN_IOCTL_SET_LAN_CLIENT_INFO        15
+#define WAN_IOCTL_CLEAR_LAN_CLIENT_INFO      16
+#define WAN_IOCTL_SEND_LAN_CLIENT_MSG        17
 
 /* User space may not have this defined. */
 #ifndef IFNAMSIZ
@@ -130,6 +136,56 @@
 struct wan_ioctl_notify_wan_state {
 	uint8_t up;
 };
+struct wan_ioctl_send_lan_client_msg {
+	/* Lan client info. */
+	struct ipa_lan_client_msg lan_client;
+	/* Event to indicate whether client is
+	 * connected or disconnected.
+	 */
+	enum ipa_per_client_stats_event client_event;
+};
+
+struct wan_ioctl_lan_client_info {
+	/* Device type of the client. */
+	enum ipacm_per_client_device_type device_type;
+	/* MAC Address of the client. */
+	uint8_t mac[IPA_MAC_ADDR_SIZE];
+	/* Init client. */
+	uint8_t client_init;
+	/* Client Index */
+	int8_t client_idx;
+	/* Header length of the client. */
+	uint8_t hdr_len;
+	/* Source pipe of the lan client. */
+	enum ipa_client_type ul_src_pipe;
+};
+
+struct wan_ioctl_per_client_info {
+	/* MAC Address of the client. */
+	uint8_t mac[IPA_MAC_ADDR_SIZE];
+	/* Ipv4 UL traffic bytes. */
+	uint64_t ipv4_tx_bytes;
+	/* Ipv4 DL traffic bytes. */
+	uint64_t ipv4_rx_bytes;
+	/* Ipv6 UL traffic bytes. */
+	uint64_t ipv6_tx_bytes;
+	/* Ipv6 DL traffic bytes. */
+	uint64_t ipv6_rx_bytes;
+};
+
+struct wan_ioctl_query_per_client_stats {
+	/* Device type of the client. */
+	enum ipacm_per_client_device_type device_type;
+	/* Indicate whether to reset the stats (use 1) or not */
+	uint8_t reset_stats;
+	/* Indicates whether client is disconnected. */
+	uint8_t disconnect_clnt;
+	/* Number of clients. */
+	uint8_t num_clients;
+	/* Client information. */
+	struct wan_ioctl_per_client_info
+		client_info[IPA_MAX_NUM_HW_PATH_CLIENTS];
+};
 
 #define WAN_IOC_ADD_FLT_RULE _IOWR(WAN_IOC_MAGIC, \
 		WAN_IOCTL_ADD_FLT_RULE, \
@@ -179,4 +235,27 @@
 		WAN_IOCTL_NOTIFY_WAN_STATE, \
 		struct wan_ioctl_notify_wan_state *)
 
+#define WAN_IOC_ADD_UL_FLT_RULE _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_ADD_UL_FLT_RULE, \
+		struct ipa_configure_ul_firewall_rules_req_msg_v01 *)
+
+#define WAN_IOC_ENABLE_PER_CLIENT_STATS _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_ENABLE_PER_CLIENT_STATS, \
+		bool *)
+
+#define WAN_IOC_QUERY_PER_CLIENT_STATS _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_QUERY_PER_CLIENT_STATS, \
+		struct wan_ioctl_query_per_client_stats *)
+
+#define WAN_IOC_SET_LAN_CLIENT_INFO _IOWR(WAN_IOC_MAGIC, \
+			WAN_IOCTL_SET_LAN_CLIENT_INFO, \
+			struct wan_ioctl_lan_client_info *)
+
+#define WAN_IOC_SEND_LAN_CLIENT_MSG _IOWR(WAN_IOC_MAGIC, \
+				WAN_IOCTL_SEND_LAN_CLIENT_MSG, \
+				struct wan_ioctl_send_lan_client_msg *)
+
+#define WAN_IOC_CLEAR_LAN_CLIENT_INFO _IOWR(WAN_IOC_MAGIC, \
+			WAN_IOCTL_CLEAR_LAN_CLIENT_INFO, \
+			struct wan_ioctl_lan_client_info *)
 #endif /* _RMNET_IPA_FD_IOCTL_H */
diff --git a/include/uapi/linux/spi/spidev.h b/include/uapi/linux/spi/spidev.h
index dd5f21e..856de39 100644
--- a/include/uapi/linux/spi/spidev.h
+++ b/include/uapi/linux/spi/spidev.h
@@ -23,6 +23,7 @@
 #define SPIDEV_H
 
 #include <linux/types.h>
+#include <linux/ioctl.h>
 
 /* User space versions of kernel symbols for SPI clocking modes,
  * matching <linux/spi/spi.h>
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b5a271b..9801487 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1810,7 +1810,7 @@
 cpu_util_freq_pelt(int cpu)
 {
 	struct rq *rq = cpu_rq(cpu);
-	unsigned long util = rq->cfs.avg.util_avg;
+	u64 util = rq->cfs.avg.util_avg;
 	unsigned long capacity = capacity_orig_of(cpu);
 
 	util *= (100 + per_cpu(sched_load_boost, cpu));
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 0e5e54f..3630826 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -70,6 +70,7 @@
 	 * attach_mutex to avoid changing binding state while
 	 * worker_attach_to_pool() is in progress.
 	 */
+	POOL_MANAGER_ACTIVE	= 1 << 0,	/* being managed */
 	POOL_DISASSOCIATED	= 1 << 2,	/* cpu can't serve workers */
 
 	/* worker flags */
@@ -167,7 +168,6 @@
 						/* L: hash of busy workers */
 
 	/* see manage_workers() for details on the two manager mutexes */
-	struct mutex		manager_arb;	/* manager arbitration */
 	struct worker		*manager;	/* L: purely informational */
 	struct mutex		attach_mutex;	/* attach/detach exclusion */
 	struct list_head	workers;	/* A: attached workers */
@@ -299,6 +299,7 @@
 
 static DEFINE_MUTEX(wq_pool_mutex);	/* protects pools and workqueues list */
 static DEFINE_SPINLOCK(wq_mayday_lock);	/* protects wq->maydays list */
+static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
 
 static LIST_HEAD(workqueues);		/* PR: list of all workqueues */
 static bool workqueue_freezing;		/* PL: have wqs started freezing? */
@@ -801,7 +802,7 @@
 /* Do we have too many workers and should some go away? */
 static bool too_many_workers(struct worker_pool *pool)
 {
-	bool managing = mutex_is_locked(&pool->manager_arb);
+	bool managing = pool->flags & POOL_MANAGER_ACTIVE;
 	int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
 	int nr_busy = pool->nr_workers - nr_idle;
 
@@ -1985,24 +1986,17 @@
 {
 	struct worker_pool *pool = worker->pool;
 
-	/*
-	 * Anyone who successfully grabs manager_arb wins the arbitration
-	 * and becomes the manager.  mutex_trylock() on pool->manager_arb
-	 * failure while holding pool->lock reliably indicates that someone
-	 * else is managing the pool and the worker which failed trylock
-	 * can proceed to executing work items.  This means that anyone
-	 * grabbing manager_arb is responsible for actually performing
-	 * manager duties.  If manager_arb is grabbed and released without
-	 * actual management, the pool may stall indefinitely.
-	 */
-	if (!mutex_trylock(&pool->manager_arb))
+	if (pool->flags & POOL_MANAGER_ACTIVE)
 		return false;
+
+	pool->flags |= POOL_MANAGER_ACTIVE;
 	pool->manager = worker;
 
 	maybe_create_worker(pool);
 
 	pool->manager = NULL;
-	mutex_unlock(&pool->manager_arb);
+	pool->flags &= ~POOL_MANAGER_ACTIVE;
+	wake_up(&wq_manager_wait);
 	return true;
 }
 
@@ -3210,7 +3204,6 @@
 	setup_timer(&pool->mayday_timer, pool_mayday_timeout,
 		    (unsigned long)pool);
 
-	mutex_init(&pool->manager_arb);
 	mutex_init(&pool->attach_mutex);
 	INIT_LIST_HEAD(&pool->workers);
 
@@ -3280,13 +3273,15 @@
 	hash_del(&pool->hash_node);
 
 	/*
-	 * Become the manager and destroy all workers.  Grabbing
-	 * manager_arb prevents @pool's workers from blocking on
-	 * attach_mutex.
+	 * Become the manager and destroy all workers.  This prevents
+	 * @pool's workers from blocking on attach_mutex.  We're the last
+	 * manager and @pool gets freed with the flag set.
 	 */
-	mutex_lock(&pool->manager_arb);
-
 	spin_lock_irq(&pool->lock);
+	wait_event_lock_irq(wq_manager_wait,
+			    !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
+	pool->flags |= POOL_MANAGER_ACTIVE;
+
 	while ((worker = first_idle_worker(pool)))
 		destroy_worker(worker);
 	WARN_ON(pool->nr_workers || pool->nr_idle);
@@ -3300,8 +3295,6 @@
 	if (pool->detach_completion)
 		wait_for_completion(pool->detach_completion);
 
-	mutex_unlock(&pool->manager_arb);
-
 	/* shut down the timers */
 	del_timer_sync(&pool->idle_timer);
 	del_timer_sync(&pool->mayday_timer);
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 59fd7c0..5cd0935 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -598,21 +598,31 @@
 		if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0)
 			goto all_leaves_cluster_together;
 
-		/* Otherwise we can just insert a new node ahead of the old
-		 * one.
+		/* Otherwise all the old leaves cluster in the same slot, but
+		 * the new leaf wants to go into a different slot - so we
+		 * create a new node (n0) to hold the new leaf and a pointer to
+		 * a new node (n1) holding all the old leaves.
+		 *
+		 * This can be done by falling through to the node splitting
+		 * path.
 		 */
-		goto present_leaves_cluster_but_not_new_leaf;
+		pr_devel("present leaves cluster but not new leaf\n");
 	}
 
 split_node:
 	pr_devel("split node\n");
 
-	/* We need to split the current node; we know that the node doesn't
-	 * simply contain a full set of leaves that cluster together (it
-	 * contains meta pointers and/or non-clustering leaves).
+	/* We need to split the current node.  The node must contain anything
+	 * from a single leaf (in the one leaf case, this leaf will cluster
+	 * with the new leaf) and the rest meta-pointers, to all leaves, some
+	 * of which may cluster.
+	 *
+	 * It won't contain the case in which all the current leaves plus the
+	 * new leaves want to cluster in the same slot.
 	 *
 	 * We need to expel at least two leaves out of a set consisting of the
-	 * leaves in the node and the new leaf.
+	 * leaves in the node and the new leaf.  The current meta pointers can
+	 * just be copied as they shouldn't cluster with any of the leaves.
 	 *
 	 * We need a new node (n0) to replace the current one and a new node to
 	 * take the expelled nodes (n1).
@@ -717,33 +727,6 @@
 	pr_devel("<--%s() = ok [split node]\n", __func__);
 	return true;
 
-present_leaves_cluster_but_not_new_leaf:
-	/* All the old leaves cluster in the same slot, but the new leaf wants
-	 * to go into a different slot, so we create a new node to hold the new
-	 * leaf and a pointer to a new node holding all the old leaves.
-	 */
-	pr_devel("present leaves cluster but not new leaf\n");
-
-	new_n0->back_pointer = node->back_pointer;
-	new_n0->parent_slot = node->parent_slot;
-	new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
-	new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
-	new_n1->parent_slot = edit->segment_cache[0];
-	new_n1->nr_leaves_on_branch = node->nr_leaves_on_branch;
-	edit->adjust_count_on = new_n0;
-
-	for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
-		new_n1->slots[i] = node->slots[i];
-
-	new_n0->slots[edit->segment_cache[0]] = assoc_array_node_to_ptr(new_n0);
-	edit->leaf_p = &new_n0->slots[edit->segment_cache[ASSOC_ARRAY_FAN_OUT]];
-
-	edit->set[0].ptr = &assoc_array_ptr_to_node(node->back_pointer)->slots[node->parent_slot];
-	edit->set[0].to = assoc_array_node_to_ptr(new_n0);
-	edit->excised_meta[0] = assoc_array_node_to_ptr(node);
-	pr_devel("<--%s() = ok [insert node before]\n", __func__);
-	return true;
-
 all_leaves_cluster_together:
 	/* All the leaves, new and old, want to cluster together in this node
 	 * in the same slot, so we have to replace this node with a shortcut to
diff --git a/net/core/dev.c b/net/core/dev.c
index 7aab8f6..18de74e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4174,6 +4174,9 @@
 int (*athrs_fast_nat_recv)(struct sk_buff *skb) __rcu __read_mostly;
 EXPORT_SYMBOL(athrs_fast_nat_recv);
 
+int (*embms_tm_multicast_recv)(struct sk_buff *skb) __rcu __read_mostly;
+EXPORT_SYMBOL(embms_tm_multicast_recv);
+
 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
 {
 	struct packet_type *ptype, *pt_prev;
@@ -4183,6 +4186,7 @@
 	int ret = NET_RX_DROP;
 	__be16 type;
 	int (*fast_recv)(struct sk_buff *skb);
+	int (*embms_recv)(struct sk_buff *skb);
 
 	net_timestamp_check(!netdev_tstamp_prequeue, skb);
 
@@ -4250,6 +4254,10 @@
 		}
 	}
 
+	embms_recv = rcu_dereference(embms_tm_multicast_recv);
+	if (embms_recv)
+		embms_recv(skb);
+
 #ifdef CONFIG_NET_CLS_ACT
 	skb->tc_verd = 0;
 ncls:
diff --git a/net/embms_kernel/Makefile b/net/embms_kernel/Makefile
new file mode 100644
index 0000000..c21480e
--- /dev/null
+++ b/net/embms_kernel/Makefile
@@ -0,0 +1,22 @@
+#
+# Makefile for Embms Kernel module.
+#
+
+KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build
+
+obj-m += embms_kernel.o
+
+ccflags-y += -D__CHECK_ENDIAN__
+
+CDEFINES += -D__CHECK_ENDIAN__
+
+KBUILD_CPPFLAGS += $(CDEFINES)
+
+all:
+	$(MAKE) -C $(KERNEL_SRC) M=$(shell pwd) modules
+modules_install:
+	$(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(shell pwd) modules_install
+
+clean:
+	$(MAKE) -C $(KERNEL_SRC) M=$(PWD) clean
+
diff --git a/net/embms_kernel/embms_kernel.c b/net/embms_kernel/embms_kernel.c
new file mode 100644
index 0000000..7b79574
--- /dev/null
+++ b/net/embms_kernel/embms_kernel.c
@@ -0,0 +1,1050 @@
+/*************************************************************************
+ * -----------------------------------------------------------------------
+ * Copyright (c) 2013-2015, 2017, The Linux Foundation. All rights reserved.
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ * -----------------------------------------------------------------------
+
+ * DESCRIPTION
+ * Main file for eMBMs Tunneling Module in kernel.
+ *************************************************************************
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <net/ip.h>
+#include <linux/uaccess.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/etherdevice.h>
+
+#include <linux/inetdevice.h>
+#include <linux/netfilter.h>
+#include <net/arp.h>
+#include <net/neighbour.h>
+
+#include <linux/skbuff.h>
+#include <linux/list.h>
+#include <linux/in.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <linux/miscdevice.h>
+#include "embms_kernel.h"
+
+struct embms_info_internal embms_conf;
+
+/* Global structures used for tunneling. These include
+ * iphdr and udphdr which are appended to skbs for
+ * tunneling, net_device and tunnleing related
+ * structs and params
+ */
+
+unsigned char hdr_buff[sizeof(struct iphdr) + sizeof(struct udphdr)];
+struct iphdr *iph_global;
+struct udphdr *udph_global;
+struct net_device *dev_global;
+
+static struct tmgi_to_clnt_info tmgi_to_clnt_map_tbl;
+
+/* handle_multicast_stream - packet forwarding
+ * function for multicast stream
+ * Main use case is for EMBMS Over Softap feature
+ */
+
+static int handle_multicast_stream(struct sk_buff *skb)
+{
+	struct iphdr *iph;
+	struct udphdr *udph;
+	struct in_device *in_dev;
+	unsigned char *tmp_ptr = NULL;
+	struct sk_buff *skb_new = NULL;
+	struct sk_buff *skb_cpy = NULL;
+	struct clnt_info *temp_client = NULL;
+	struct tmgi_to_clnt_info *temp_tmgi = NULL;
+	struct list_head *tmgi_entry_ptr, *prev_tmgi_entry_ptr;
+	struct list_head *clnt_ptr, *prev_clnt_ptr;
+	int hdr_size = sizeof(*udph) + sizeof(*iph) + ETH_HLEN;
+
+	/* only IP packets */
+	if (htons(ETH_P_IP) != skb->protocol) {
+		embms_error("Not an IP packet\n");
+		return 0;
+	}
+
+	if (embms_conf.embms_tunneling_status == TUNNELING_OFF) {
+		embms_debug("Tunneling Disabled. Can't process packets\n");
+		return 0;
+	}
+
+	if (unlikely(memcmp(skb->dev->name, embms_conf.embms_iface,
+			    strlen(embms_conf.embms_iface)) != 0)) {
+		embms_error("Packet received on %s iface. NOT an EMBMS Iface\n",
+			    skb->dev->name);
+		return 0;
+	}
+
+	/* Check if dst ip of packet is same as multicast ip of any tmgi*/
+
+	iph = (struct iphdr *)skb->data;
+	udph = (struct udphdr *)(skb->data + sizeof(struct iphdr));
+
+	spin_lock_bh(&embms_conf.lock);
+
+	list_for_each_safe(tmgi_entry_ptr, prev_tmgi_entry_ptr,
+			   &tmgi_to_clnt_map_tbl.tmgi_list_ptr) {
+		temp_tmgi = list_entry(tmgi_entry_ptr,
+				       struct tmgi_to_clnt_info,
+				       tmgi_list_ptr);
+
+		if ((temp_tmgi->tmgi_multicast_addr == iph->daddr) &&
+		    (temp_tmgi->tmgi_port == udph->dest))
+			break;
+	}
+
+	if (tmgi_entry_ptr == &tmgi_to_clnt_map_tbl.tmgi_list_ptr) {
+		embms_error("handle_multicast_stream:");
+		embms_error("could not find matchin tmgi entry\n");
+		spin_unlock_bh(&embms_conf.lock);
+		return 0;
+	}
+
+	/* Found a matching tmgi entry. Realloc headroom to
+	 * accommodate new Ethernet, IP and UDP header
+	 */
+
+	skb_new = skb_realloc_headroom(skb, hdr_size);
+	if (unlikely(!skb_new)) {
+		embms_error("Can't allocate headroom\n");
+		spin_unlock_bh(&embms_conf.lock);
+		return 0;
+	}
+
+	/* push skb->data and copy IP and UDP headers*/
+
+	tmp_ptr = skb_push(skb_new,
+			   sizeof(struct udphdr) + sizeof(struct iphdr));
+
+	iph = (struct iphdr *)tmp_ptr;
+	udph = (struct udphdr *)(tmp_ptr + sizeof(struct iphdr));
+
+	memcpy(tmp_ptr, hdr_buff, hdr_size - ETH_HLEN);
+	udph->len = htons(skb_new->len - sizeof(struct iphdr));
+	iph->tot_len = htons(skb_new->len);
+
+	list_for_each_safe(clnt_ptr, prev_clnt_ptr,
+			   &temp_tmgi->client_list_head) {
+		temp_client = list_entry(clnt_ptr,
+					 struct clnt_info,
+					 client_list_ptr);
+
+		/* Make a copy of skb_new with new IP and UDP header.
+		 * We can't use skb_new or its clone here since we need to
+		 * constantly change dst ip and dst port which is not possible
+		 * for shared memory as is the case with skb_new.
+		 */
+
+		skb_cpy = skb_copy(skb_new, GFP_ATOMIC);
+		if (unlikely(!skb_cpy)) {
+			embms_error("Can't copy skb\n");
+			kfree_skb(skb_new);
+			return 0;
+		}
+
+		iph = (struct iphdr *)skb_cpy->data;
+		udph = (struct udphdr *)(skb_cpy->data + sizeof(struct iphdr));
+
+		iph->id = htons(atomic_inc_return(&embms_conf.ip_ident));
+
+		/* Calculate checksum for new IP and UDP header*/
+
+		udph->dest = temp_client->port;
+		skb_cpy->csum = csum_partial((char *)udph,
+					     ntohs(udph->len),
+					     skb_cpy->csum);
+
+		iph->daddr = temp_client->addr;
+		ip_send_check(iph);
+
+		udph->check = 0;
+		udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
+						ntohs(udph->len),
+						IPPROTO_UDP,
+						skb_cpy->csum);
+
+		if (udph->check == 0)
+			udph->check = CSUM_MANGLED_0;
+
+		if (unlikely(!dev_global)) {
+			embms_error("Global device NULL\n");
+			kfree_skb(skb_cpy);
+			kfree_skb(skb_new);
+			return 0;
+		}
+
+		/* update device info and add MAC header*/
+
+		skb_cpy->dev = dev_global;
+
+		skb_cpy->dev->header_ops->create(skb_cpy, skb_cpy->dev,
+						ETH_P_IP, temp_client->dmac,
+						NULL, skb_cpy->len);
+		dev_queue_xmit(skb_cpy);
+	}
+
+	spin_unlock_bh(&embms_conf.lock);
+	kfree_skb(skb_new);
+	return 1;
+}
+
+static int check_embms_device(atomic_t *use_count)
+{
+	int ret;
+
+	if (atomic_inc_return(use_count) == 1) {
+		ret = 0;
+	} else {
+		atomic_dec(use_count);
+		ret = -EBUSY;
+	}
+	return ret;
+}
+
+static int embms_device_open(struct inode *inode, struct file *file)
+{
+	/*Check if the device is busy*/
+	if (check_embms_device(&embms_conf.device_under_use)) {
+		embms_error("embms_tm_open : EMBMS device busy\n");
+		return -EBUSY;
+	}
+
+	try_module_get(THIS_MODULE);
+	return SUCCESS;
+}
+
+static int embms_device_release(struct inode *inode, struct file *file)
+{
+	/* Reduce device use count before leaving*/
+	embms_debug("Releasing EMBMS device..\n");
+	atomic_dec(&embms_conf.device_under_use);
+	embms_conf.embms_tunneling_status = TUNNELING_OFF;
+	module_put(THIS_MODULE);
+	return SUCCESS;
+}
+
+static struct tmgi_to_clnt_info *check_for_tmgi_entry(u32 addr,
+						      u16 port)
+{
+	struct list_head *tmgi_ptr, *prev_tmgi_ptr;
+	struct tmgi_to_clnt_info *temp_tmgi = NULL;
+
+	embms_debug("check_for_tmgi_entry: mcast addr :%pI4, port %u\n",
+		    &addr, ntohs(port));
+
+	list_for_each_safe(tmgi_ptr,
+			   prev_tmgi_ptr,
+			   &tmgi_to_clnt_map_tbl.tmgi_list_ptr) {
+		temp_tmgi = list_entry(tmgi_ptr,
+				       struct tmgi_to_clnt_info,
+				       tmgi_list_ptr);
+
+		if ((temp_tmgi->tmgi_multicast_addr == addr) &&
+		    (temp_tmgi->tmgi_port == port)) {
+			embms_debug("check_for_tmgi_entry:TMGI entry found\n");
+			return temp_tmgi;
+		}
+	}
+	return NULL;
+}
+
+static struct clnt_info *chk_clnt_entry(struct tmgi_to_clnt_info *tmgi,
+					struct tmgi_to_clnt_info_update *clnt)
+{
+	struct list_head *clnt_ptr, *prev_clnt_ptr;
+	struct clnt_info *temp_client = NULL;
+
+	embms_debug("check_for_client_entry: clnt addr :%pI4, port %u\n",
+		    &clnt->client_addr, ntohs(clnt->client_port));
+
+	list_for_each_safe(clnt_ptr,
+			   prev_clnt_ptr,
+			   &tmgi->client_list_head) {
+		temp_client = list_entry(clnt_ptr,
+					 struct clnt_info,
+					 client_list_ptr);
+		if ((temp_client->addr == clnt->client_addr) &&
+		    (temp_client->port == clnt->client_port)) {
+			embms_debug("Clnt entry present\n");
+			return temp_client;
+		}
+	}
+	return NULL;
+}
+
+static int add_new_tmgi_entry(struct tmgi_to_clnt_info_update *info_update,
+			      struct clnt_info *clnt)
+{
+	struct tmgi_to_clnt_info *new_tmgi = NULL;
+
+	embms_debug("add_new_tmgi_entry:Enter\n");
+
+	new_tmgi = kzalloc(sizeof(*new_tmgi),
+			   GFP_ATOMIC);
+	if (!new_tmgi) {
+		embms_error("add_new_tmgi_entry: mem alloc failed\n");
+		return -ENOMEM;
+	}
+
+	memset(new_tmgi, 0, sizeof(struct tmgi_to_clnt_info));
+
+	new_tmgi->tmgi_multicast_addr = info_update->multicast_addr;
+	new_tmgi->tmgi_port = info_update->multicast_port;
+
+	embms_debug("add_new_tmgi_entry:");
+	embms_debug("New tmgi multicast addr :%pI4 , port %u\n",
+		    &info_update->multicast_addr,
+		    ntohs(info_update->multicast_port));
+
+	embms_debug("add_new_tmgi_entry:Adding client entry\n");
+
+	spin_lock_bh(&embms_conf.lock);
+
+	INIT_LIST_HEAD(&new_tmgi->client_list_head);
+	list_add(&clnt->client_list_ptr,
+		 &new_tmgi->client_list_head);
+	new_tmgi->no_of_clients++;
+
+	/* Once above steps are done successfully,
+	 * we add tmgi entry to our local table
+	 */
+
+	list_add(&new_tmgi->tmgi_list_ptr,
+		 &tmgi_to_clnt_map_tbl.tmgi_list_ptr);
+	embms_conf.no_of_tmgi_sessions++;
+
+	spin_unlock_bh(&embms_conf.lock);
+
+	return SUCCESS;
+}
+
+static void print_tmgi_to_client_table(void)
+{
+	int i, j;
+	struct clnt_info *temp_client = NULL;
+	struct tmgi_to_clnt_info *temp_tmgi = NULL;
+	struct list_head *tmgi_entry_ptr, *prev_tmgi_entry_ptr;
+	struct list_head *clnt_ptr, *prev_clnt_ptr;
+
+	embms_debug("====================================================\n");
+	embms_debug("Printing TMGI to Client Table :\n");
+	embms_debug("No of Active TMGIs : %d\n",
+		    embms_conf.no_of_tmgi_sessions);
+	embms_debug("====================================================\n\n");
+
+	if (embms_conf.no_of_tmgi_sessions > 0) {
+		i = 1;
+		list_for_each_safe(tmgi_entry_ptr, prev_tmgi_entry_ptr,
+				   &tmgi_to_clnt_map_tbl.tmgi_list_ptr) {
+			temp_tmgi = list_entry(tmgi_entry_ptr,
+					       struct tmgi_to_clnt_info,
+					       tmgi_list_ptr);
+
+			embms_debug("TMGI entry %d :\n", i);
+			embms_debug("TMGI multicast addr : %pI4 , port %u\n\n",
+				    &temp_tmgi->tmgi_multicast_addr,
+				    ntohs(temp_tmgi->tmgi_port));
+			embms_debug("No of clients : %d\n",
+				    temp_tmgi->no_of_clients);
+			j = 1;
+
+			list_for_each_safe(clnt_ptr, prev_clnt_ptr,
+					   &temp_tmgi->client_list_head) {
+				temp_client = list_entry(clnt_ptr,
+							 struct clnt_info,
+							 client_list_ptr);
+				embms_debug("Client entry %d :\n", j);
+				embms_debug("client addr : %pI4 , port %u\n\n",
+					    &temp_client->addr,
+					    ntohs(temp_client->port));
+				j++;
+			}
+			i++;
+			embms_debug("===========================================\n\n");
+		}
+	} else {
+		embms_debug("No TMGI entries to Display\n");
+	}
+	embms_debug("==================================================================\n\n");
+}
+
+/**
+ * delete_tmgi_entry_from_table() - deletes tmgi from global tmgi-client table
+ * @buffer:	Buffer containing TMGI info for deletion.
+ *
+ * This function completely removes the TMGI from
+ * global TMGI-client table, along with the client list
+ * so that no packets for this TMGI are processed
+ *
+ * Return: Success on deleting TMGI entry, error otherwise.
+ */
+
+int delete_tmgi_entry_from_table(char *buffer)
+{
+	int i;
+	struct tmgi_to_clnt_info_update *info_update;
+	char message_buffer[sizeof(struct tmgi_to_clnt_info_update)];
+	struct clnt_info *temp_client = NULL;
+	struct tmgi_to_clnt_info *temp_tmgi = NULL;
+	struct list_head *tmgi_entry_ptr, *prev_tmgi_entry_ptr;
+	struct list_head *clnt_ptr, *prev_clnt_ptr;
+
+	embms_debug("delete_tmgi_entry_from_table: Enter\n");
+
+	info_update = (struct tmgi_to_clnt_info_update *)buffer;
+
+	if (!info_update) {
+		embms_error("delete_tmgi_entry_from_table:");
+		embms_error("NULL arguments passed\n");
+		return -EBADPARAM;
+	}
+
+	/* This function is used to delete a specific TMGI entry
+	 * when that particular TMGI goes down
+	 * Search for the TMGI entry in our local table
+	 */
+	if (embms_conf.no_of_tmgi_sessions == 0) {
+		embms_error("TMGI count 0. Nothing to delete\n");
+		return SUCCESS;
+	}
+
+	temp_tmgi = check_for_tmgi_entry(info_update->multicast_addr,
+					 info_update->multicast_port);
+
+	if (!temp_tmgi) {
+		/* TMGI entry was not found in our local table*/
+		embms_error("delete_client_entry_from_table :");
+		embms_error("Desired TMGI entry not found\n");
+		return -EBADPARAM;
+	}
+
+	spin_lock_bh(&embms_conf.lock);
+
+	/* We need to free memory allocated to client entries
+	 * for a particular TMGI entry
+	 */
+
+	list_for_each_safe(clnt_ptr, prev_clnt_ptr,
+			   &temp_tmgi->client_list_head) {
+		temp_client = list_entry(clnt_ptr,
+					 struct clnt_info,
+					 client_list_ptr);
+		embms_debug("delete_tmgi_entry_from_table :");
+		embms_debug("Client addr to delete :%pI4 , port %u\n",
+			    &temp_client->addr, ntohs(temp_client->port));
+		list_del(&temp_client->client_list_ptr);
+		temp_tmgi->no_of_clients--;
+		kfree(temp_client);
+	}
+
+	/* Free memory allocated to tmgi entry*/
+
+	list_del(&temp_tmgi->tmgi_list_ptr);
+	kfree(temp_tmgi);
+	embms_conf.no_of_tmgi_sessions--;
+
+	spin_unlock_bh(&embms_conf.lock);
+
+	embms_debug("delete_tmgi_entry_from_table : TMGI Entry deleted.\n");
+
+	return SUCCESS;
+}
+
+/**
+ * delete_client_entry_from_all_tmgi() - deletes client from all tmgi lists
+ * @buffer:	Buffer containing client info for deletion.
+ *
+ * This function completely removes a client from
+ * all TMGIs in global TMGI-client table. Also delets TMGI
+ * entries if no more clients are there
+ *
+ * Return: Success on deleting client entry, error otherwise.
+ */
+int delete_client_entry_from_all_tmgi(char *buffer)
+{
+	int i;
+	struct tmgi_to_clnt_info_update *info_update;
+	char message_buffer[sizeof(struct tmgi_to_clnt_info_update)];
+	struct clnt_info *temp_client = NULL;
+	struct tmgi_to_clnt_info *tmgi = NULL;
+	struct list_head *tmgi_entry_ptr, *prev_tmgi_entry_ptr;
+	struct list_head *clnt_ptr, *prev_clnt_ptr;
+
+	/* We use this function when we want to delete any
+	 * client entry from all TMGI entries. This scenario
+	 * happens when any client disconnects and hence
+	 * we need to clean all realted client entries
+	 * in our mapping table
+	 */
+
+	embms_debug("del_clnt_from_all_tmgi: Enter\n");
+
+	info_update = (struct tmgi_to_clnt_info_update *)buffer;
+
+	if (!info_update) {
+		embms_error("del_clnt_from_all_tmgi:");
+		embms_error("NULL arguments passed\n");
+		return -EBADPARAM;
+	}
+
+	/* We start checking from first TMGI entry and if client
+	 * entry is found in client entries of any TMGI, we clean
+	 * up that client entry from that TMGI entry
+	 */
+	if (embms_conf.no_of_tmgi_sessions == 0)
+		return SUCCESS;
+
+	list_for_each_safe(tmgi_entry_ptr, prev_tmgi_entry_ptr,
+			   &tmgi_to_clnt_map_tbl.tmgi_list_ptr) {
+		tmgi = list_entry(tmgi_entry_ptr,
+				  struct tmgi_to_clnt_info,
+				  tmgi_list_ptr);
+
+		temp_client = chk_clnt_entry(tmgi, info_update);
+		if (!temp_client)
+			continue;
+
+		spin_lock_bh(&embms_conf.lock);
+
+		list_del(&temp_client->client_list_ptr);
+		tmgi->no_of_clients--;
+		kfree(temp_client);
+
+		spin_unlock_bh(&embms_conf.lock);
+
+		temp_client = NULL;
+
+		if (tmgi->no_of_clients == 0) {
+			/* Deleted clnt was the only clnt for
+			 * that TMGI we need to delete TMGI
+			 * entry from table
+			 */
+			embms_debug("del_clnt_from_all_tmgi:");
+			embms_debug("Deleted client was ");
+			embms_debug("last client for tmgi\n");
+			embms_debug("del_clnt_from_all_tmgi:");
+			embms_debug("Delting tmgi as it has ");
+			embms_debug("zero clients.TMGI IP ");
+			embms_debug(":%pI4 , port %u\n",
+				    &tmgi->tmgi_multicast_addr,
+				    ntohs(tmgi->tmgi_port));
+
+			spin_lock_bh(&embms_conf.lock);
+
+			list_del(&tmgi->tmgi_list_ptr);
+			embms_conf.no_of_tmgi_sessions--;
+			kfree(tmgi);
+
+			spin_unlock_bh(&embms_conf.lock);
+
+			embms_debug("del_clnt_from_all_tmgi:");
+			embms_debug("TMGI entry deleted\n");
+		}
+	}
+
+	embms_debug("del_clnt_from_all_tmgi Successful\n");
+	return SUCCESS;
+}
+
+/**
+ * add_client_entry_to_table() - add client entry to specified TMGI
+ * @buffer:	Buffer containing client info for addition.
+ *
+ * This function adds a client to the specified TMGI in
+ * the global TMGI-client table. If TMGI entry is not
+ * present, it adds a new TMGI entry and adds client
+ * entry to it.
+ *
+ * Return: Success on adding client entry, error otherwise.
+ */
+int add_client_entry_to_table(char *buffer)
+{
+	int i, ret;
+	struct tmgi_to_clnt_info_update *info_update;
+	char message_buffer[sizeof(struct tmgi_to_clnt_info_update)];
+	struct clnt_info *new_client = NULL;
+	struct clnt_info *temp_client = NULL;
+	struct tmgi_to_clnt_info *new_tmgi = NULL;
+	struct tmgi_to_clnt_info *tmgi = NULL;
+	struct list_head *tmgi_entry_ptr, *prev_tmgi_entry_ptr;
+	struct list_head *clnt_ptr, *prev_clnt_ptr;
+	struct neighbour *neigh_entry;
+	struct in_device *iface_dev;
+	struct in_ifaddr *iface_info;
+
+	embms_debug("add_client_entry_to_table: Enter\n");
+
+	info_update = (struct tmgi_to_clnt_info_update *)buffer;
+
+	if (!info_update) {
+		embms_error("add_client_entry_to_table:");
+		embms_error("NULL arguments passed\n");
+		return -EBADPARAM;
+	}
+
+	new_client = kzalloc(sizeof(*new_client), GFP_ATOMIC);
+	if (!new_client) {
+		embms_error("add_client_entry_to_table:");
+		embms_error("Cannot allocate memory\n");
+		return -ENOMEM;
+	}
+
+	new_client->addr = info_update->client_addr;
+	new_client->port = info_update->client_port;
+
+	neigh_entry = __ipv4_neigh_lookup(dev_global,
+					  (u32)(new_client->addr));
+	if (!neigh_entry) {
+		embms_error("add_client_entry_to_table :");
+		embms_error("Can't find neighbour entry\n");
+		kfree(new_client);
+		return -EBADPARAM;
+	}
+
+	ether_addr_copy(new_client->dmac, neigh_entry->ha);
+
+	embms_debug("DMAC of client : %pM\n", new_client->dmac);
+
+	embms_debug("add_client_entry_to_table:");
+	embms_debug("New client addr :%pI4 , port %u\n",
+		    &info_update->client_addr,
+		    ntohs(info_update->client_port));
+
+	if (embms_conf.no_of_tmgi_sessions == 0) {
+		/* TMGI Client mapping table is empty.
+		 * First client entry is being added
+		 */
+
+		embms_debug("tmgi_to_clnt_map_tbl is empty\n");
+
+		ret = add_new_tmgi_entry(info_update, new_client);
+		if (ret != SUCCESS) {
+			kfree(new_client);
+			new_client = NULL;
+		}
+
+		goto exit_add;
+	}
+
+	/* In this case, table already has some entries
+	 * and we need to search for the specific tmgi entry
+	 * for which client entry is to be added
+	 */
+
+	tmgi = check_for_tmgi_entry(info_update->multicast_addr,
+				    info_update->multicast_port);
+	if (tmgi) {
+		if (chk_clnt_entry(tmgi, info_update)) {
+			kfree(new_client);
+			return -ENOEFFECT;
+		}
+
+		/* Adding client to the client list
+		 * for the specified TMGI
+		 */
+
+		spin_lock_bh(&embms_conf.lock);
+
+		list_add(&new_client->client_list_ptr,
+			 &tmgi->client_list_head);
+			tmgi->no_of_clients++;
+
+		spin_unlock_bh(&embms_conf.lock);
+
+		ret = SUCCESS;
+	} else {
+		/* TMGI specified in the message was not found in
+		 * mapping table.Hence, we need to add a new entry
+		 * for this TMGI and add the specified client to the client
+		 * list
+		 */
+
+		embms_debug("TMGI entry not present. Adding tmgi entry\n");
+
+		ret = add_new_tmgi_entry(info_update, new_client);
+		if (ret != SUCCESS) {
+			kfree(new_client);
+			new_client = NULL;
+		}
+	}
+
+exit_add:
+	return ret;
+}
+
+/**
+ * delete_client_entry_from_table() - delete client entry from specified TMGI
+ * @buffer:	Buffer containing client info for deletion.
+ *
+ * This function deletes a client from the specified TMGI in
+ * the global TMGI-client table. If this was the last client
+ * entry, it also deletes the TMGI entry.
+ *
+ * Return: Success on deleting client entry, error otherwise.
+ */
+int delete_client_entry_from_table(char *buffer)
+{
+	int i;
+	struct tmgi_to_clnt_info_update *info_update;
+	char message_buffer[sizeof(struct tmgi_to_clnt_info_update)];
+	struct clnt_info *temp_client = NULL;
+	struct tmgi_to_clnt_info *temp_tmgi = NULL;
+	struct list_head *tmgi_entry_ptr, *prev_tmgi_entry_ptr;
+	struct list_head *clnt_ptr, *prev_clnt_ptr;
+
+	embms_debug("delete_client_entry_from_table: Enter\n");
+
+	info_update = (struct tmgi_to_clnt_info_update *)buffer;
+
+	if (!info_update) {
+		embms_error("delete_client_entry_from_table:");
+		embms_error("NULL arguments passed\n");
+		return -EBADPARAM;
+	}
+
+	/* Search for the TMGI entry*/
+	if (embms_conf.no_of_tmgi_sessions == 0)
+		return SUCCESS;
+
+	temp_tmgi = check_for_tmgi_entry(info_update->multicast_addr,
+					 info_update->multicast_port);
+
+	if (!temp_tmgi) {
+		embms_error("delete_client_entry_from_table:TMGI not found\n");
+		return -EBADPARAM;
+	}
+	/* Delete client entry for a specific tmgi*/
+
+	embms_debug("delete_client_entry_from_table:clnt addr :%pI4,port %u\n",
+		    &info_update->client_addr,
+		    ntohs(info_update->client_port));
+
+	temp_client = chk_clnt_entry(temp_tmgi, info_update);
+
+	if (!temp_client) {
+		/* Specified client entry was not found in client list
+		 * of specified TMGI
+		 */
+		embms_error("delete_client_entry_from_table:Clnt not found\n");
+		return -EBADPARAM;
+	}
+
+	spin_lock_bh(&embms_conf.lock);
+
+	list_del(&temp_client->client_list_ptr);
+	temp_tmgi->no_of_clients--;
+
+	spin_unlock_bh(&embms_conf.lock);
+
+	kfree(temp_client);
+	temp_client = NULL;
+
+	embms_debug("delete_client_entry_from_table:Client entry deleted\n");
+
+	if (temp_tmgi->no_of_clients == 0) {
+		/* If deleted client was the only client for that TMGI
+		 * we need to delete TMGI entry from table
+		 */
+		embms_debug("delete_client_entry_from_table:");
+		embms_debug("Deleted client was the last client for tmgi\n");
+		embms_debug("delete_client_entry_from_table:");
+		embms_debug("Deleting tmgi since it has zero clients\n");
+
+		spin_lock_bh(&embms_conf.lock);
+
+		list_del(&temp_tmgi->tmgi_list_ptr);
+		embms_conf.no_of_tmgi_sessions--;
+		kfree(temp_tmgi);
+
+		spin_unlock_bh(&embms_conf.lock);
+
+		embms_debug("delete_client_entry_from_table: TMGI deleted\n");
+	}
+
+	if (embms_conf.no_of_tmgi_sessions == 0)
+		embms_conf.embms_tunneling_status = TUNNELING_OFF;
+
+	return SUCCESS;
+}
+
+/**
+ * embms_device_ioctl() - handle IOCTL calls to device
+ * @file:	File descriptor of file opened from userspace process
+ * @ioctl_num:	IOCTL to use
+ * @ioctl_param:	IOCTL parameters/arguments
+ *
+ * This function is called whenever a process tries to do
+ * an ioctl on our device file. As per the IOCTL number,
+ * it calls various functions to manipulate global
+ * TMGI-client table
+ *
+ * Return: Success if functoin call returns SUCCESS, error otherwise.
+ */
+
+int embms_device_ioctl(struct file *file, unsigned int ioctl_num,
+		       unsigned long ioctl_param)
+{
+	int i, ret, error;
+	char *temp;
+	char buffer[BUF_LEN];
+	struct in_device *iface_dev;
+	struct in_ifaddr *iface_info;
+	struct tmgi_to_clnt_info_update *info_update;
+	char __user *argp = (char __user *)ioctl_param;
+
+	memset(buffer, 0, BUF_LEN);
+
+	/* Switch according to the ioctl called*/
+	switch (ioctl_num) {
+	case ADD_EMBMS_TUNNEL:
+		if (copy_from_user(buffer, argp,
+				   sizeof(struct tmgi_to_clnt_info_update)))
+			return -EFAULT;
+
+		ret = add_client_entry_to_table(buffer);
+		print_tmgi_to_client_table();
+		break;
+
+	case DEL_EMBMS_TUNNEL:
+		if (copy_from_user(buffer, argp,
+				   sizeof(struct tmgi_to_clnt_info_update)))
+			return -EFAULT;
+
+		ret = delete_client_entry_from_table(buffer);
+		print_tmgi_to_client_table();
+		break;
+
+	case TMGI_DEACTIVATE:
+		if (copy_from_user(buffer, argp,
+				   sizeof(struct tmgi_to_clnt_info_update)))
+			return -EFAULT;
+
+		ret = delete_tmgi_entry_from_table(buffer);
+		print_tmgi_to_client_table();
+		break;
+
+	case CLIENT_DEACTIVATE:
+		if (copy_from_user(buffer, argp,
+				   sizeof(struct tmgi_to_clnt_info_update)))
+			return -EFAULT;
+
+		ret = delete_client_entry_from_all_tmgi(buffer);
+		print_tmgi_to_client_table();
+		break;
+
+	case GET_EMBMS_TUNNELING_STATUS:
+		/* This ioctl is both input (ioctl_param) and
+		 * output (the return value of this function)
+		 */
+		embms_debug("Sending tunneling status : %d\n",
+			    embms_conf.embms_tunneling_status);
+		ret = embms_conf.embms_tunneling_status;
+		break;
+
+	case START_EMBMS_TUNNEL:
+
+		if (copy_from_user(buffer, argp,
+				   sizeof(struct tmgi_to_clnt_info_update)))
+			return -EFAULT;
+
+		info_update = (struct tmgi_to_clnt_info_update *)buffer;
+		embms_conf.embms_data_port = info_update->data_port;
+		udph_global->source = embms_conf.embms_data_port;
+
+		memset(embms_conf.embms_iface, 0, EMBMS_MAX_IFACE_NAME);
+		memcpy(embms_conf.embms_iface, info_update->iface_name,
+		       EMBMS_MAX_IFACE_NAME);
+
+		embms_conf.embms_tunneling_status = TUNNELING_ON;
+		embms_debug("Starting Tunneling. Embms_data_port  = %d\n",
+			    ntohs(embms_conf.embms_data_port));
+		embms_debug("Embms Data Iface = %s\n", embms_conf.embms_iface);
+		ret = SUCCESS;
+
+		/*Initialise dev_global to bridge device*/
+		dev_global = __dev_get_by_name(&init_net, BRIDGE_IFACE);
+		if (!dev_global) {
+			embms_error("Error in getting device info\n");
+			ret = FAILURE;
+		} else {
+			iface_dev = (struct in_device *)dev_global->ip_ptr;
+			iface_info = iface_dev->ifa_list;
+			while (iface_info) {
+				if (memcmp(iface_info->ifa_label,
+					   BRIDGE_IFACE,
+					   strlen(BRIDGE_IFACE)) == 0)
+					break;
+
+				iface_info = iface_info->ifa_next;
+			}
+			if (iface_info) {
+				embms_debug("IP address of %s iface is %pI4\n",
+					    BRIDGE_IFACE,
+					    &iface_info->ifa_address);
+				/*Populate source addr for header*/
+				iph_global->saddr = iface_info->ifa_address;
+				ret = SUCCESS;
+			} else {
+				embms_debug("Could not find iface address\n");
+				ret = FAILURE;
+			}
+		}
+
+		break;
+
+	case STOP_EMBMS_TUNNEL:
+
+		embms_conf.embms_tunneling_status = TUNNELING_OFF;
+		embms_debug("Stopped Tunneling..\n");
+		ret = SUCCESS;
+		break;
+	}
+
+	return ret;
+}
+
+/* Module Declarations
+ * This structure will hold the functions to be called
+ * when a process does something to the device we
+ * created. Since a pointer to this structure is kept in
+ * the devices table, it can't be local to
+ * init_module. NULL is for unimplemented functions.
+ */
+static const struct file_operations embms_device_fops = {
+	.owner = THIS_MODULE,
+	.open = embms_device_open,
+	.release = embms_device_release,
+	.read = NULL,
+	.write = NULL,
+	.unlocked_ioctl = embms_device_ioctl,
+};
+
+static int embms_ioctl_init(void)
+{
+	int ret;
+	struct device *dev;
+
+	ret = alloc_chrdev_region(&device, 0, dev_num, EMBMS_DEVICE_NAME);
+	if (ret) {
+		embms_error("device_alloc err\n");
+		goto dev_alloc_err;
+	}
+
+	embms_class = class_create(THIS_MODULE, EMBMS_DEVICE_NAME);
+	if (IS_ERR(embms_class)) {
+		embms_error("class_create err\n");
+		goto class_err;
+	}
+
+	dev = device_create(embms_class, NULL, device,
+			    &embms_conf, EMBMS_DEVICE_NAME);
+	if (IS_ERR(dev)) {
+		embms_error("device_create err\n");
+		goto device_err;
+	}
+
+	cdev_init(&embms_device, &embms_device_fops);
+	ret = cdev_add(&embms_device, device, dev_num);
+	if (ret) {
+		embms_error("cdev_add err\n");
+		goto cdev_add_err;
+	}
+
+	embms_debug("ioctl init OK!!\n");
+	return 0;
+
+cdev_add_err:
+	device_destroy(embms_class, device);
+device_err:
+	class_destroy(embms_class);
+class_err:
+	unregister_chrdev_region(device, dev_num);
+dev_alloc_err:
+	return -ENODEV;
+}
+
+static void embms_ioctl_deinit(void)
+{
+	cdev_del(&embms_device);
+	device_destroy(embms_class, device);
+	class_destroy(embms_class);
+	unregister_chrdev_region(device, dev_num);
+}
+
+/*Initialize the module - Register the misc device*/
+static int __init start_embms(void)
+{
+	int ret = 0;
+
+	iph_global = (struct iphdr *)hdr_buff;
+	udph_global = (struct udphdr *)(hdr_buff + sizeof(struct iphdr));
+
+	embms_conf.embms_tunneling_status = TUNNELING_OFF;
+	embms_conf.no_of_tmgi_sessions = 0;
+	embms_conf.embms_data_port = 0;
+	atomic_set(&embms_conf.device_under_use, 0);
+	atomic_set(&embms_conf.ip_ident, 0);
+	spin_lock_init(&embms_conf.lock);
+
+	embms_debug("Registering embms device\n");
+
+	ret = embms_ioctl_init();
+	if (ret) {
+		embms_error("embms device failed to register");
+		goto fail_init;
+	}
+
+	INIT_LIST_HEAD(&tmgi_to_clnt_map_tbl.tmgi_list_ptr);
+
+	memset(hdr_buff, 0, sizeof(struct udphdr) + sizeof(struct iphdr));
+	udph_global->check = UDP_CHECKSUM;
+	iph_global->version = IP_VERSION;
+	iph_global->ihl = IP_IHL;
+	iph_global->tos = IP_TOS;
+	iph_global->frag_off = IP_FRAG_OFFSET;
+	iph_global->ttl = IP_TTL;
+	iph_global->protocol = IPPROTO_UDP;
+
+	dev_global = NULL;
+
+	if (!embms_tm_multicast_recv)
+		RCU_INIT_POINTER(embms_tm_multicast_recv,
+				 handle_multicast_stream);
+
+	return ret;
+
+fail_init:
+	embms_ioctl_deinit();
+	return ret;
+}
+
+/*Cleanup - unregister the appropriate file from proc*/
+
+static void __exit stop_embms(void)
+{
+	embms_ioctl_deinit();
+
+	if (rcu_dereference(embms_tm_multicast_recv))
+		RCU_INIT_POINTER(embms_tm_multicast_recv, NULL);
+
+	embms_debug("unregister_chrdev done\n");
+}
+
+module_init(start_embms);
+module_exit(stop_embms);
+MODULE_LICENSE("GPL v2");
diff --git a/net/embms_kernel/embms_kernel.h b/net/embms_kernel/embms_kernel.h
new file mode 100644
index 0000000..c8248ce
--- /dev/null
+++ b/net/embms_kernel/embms_kernel.h
@@ -0,0 +1,233 @@
+/******************************************************************
+ * Copyright (c) 2013-2015,2017, The Linux Foundation. All rights reserved.
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *---------------------------------------------------------------
+
+ * DESCRIPTION
+ * Header file for eMBMs Tunneling Module in kernel.
+ *******************************************************************
+ */
+
+#ifndef EMBMS_H
+#define EMBMS_H
+
+#include <linux/ioctl.h>
+#include <stdbool.h>
+#include <linux/if_addr.h>
+#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+#include <linux/cdev.h>
+
+#define EMBMS_MAX_IFACE_NAME    20
+
+/* Defining IP and UDP header related macros*/
+
+#define UDP_CHECKSUM                0
+#define IP_VERSION                  4
+#define IP_IHL                      5
+#define IP_TOS                      0
+#define IP_ID                       1
+#define IP_FRAG_OFFSET              htons(0x4000)
+#define IP_TTL                      64
+#define BRIDGE_IFACE                "bridge0"
+
+#define BUF_LEN 1024
+#define TUNNELING_ON 1
+#define TUNNELING_OFF 0
+
+// definitions required for IOCTL
+static unsigned int dev_num = 1;
+/* Embms device used for communication*/
+struct cdev embms_device;
+static struct class *embms_class;
+static dev_t device;
+#define EMBMS_IOC_MAGIC 0x64
+
+#define embms_debug pr_debug
+#define embms_error pr_debug
+
+/* The name of the device file*/
+#define EMBMS_DEVICE_NAME "embms_tm_device"
+
+extern int (*embms_tm_multicast_recv)(struct sk_buff *skb);
+
+/**
+ * enum embms_action_type - Describes action to perform
+ * @ADD_CLIENT_ENTRY: add client entry to TMGI
+ * @DELETE_CLIENT_ENTRY: deelte client entry from TMGI
+ * @TMGI_DEACTIVATE: Delete TMGI entry
+ * @CLIENT_ACTIVATE_ALL_TMGI: Add client to all TMGI
+ * @CLIENT_DEACTIVATE_ALL_TMGI: Delete client from all TMGI
+ * @SESSION_DEACTIVATE: Stop session
+ * @SOCK_INFO: Socket information like V4 addr, port etc
+ *
+ * This enum defines the types of action which are
+ * supported by this module.
+ */
+
+enum {
+	ADD_CLIENT_ENTRY = 0,
+	DELETE_CLIENT_ENTRY,
+	TMGI_DEACTIVATE,
+	CLIENT_ACTIVATE_ALL_TMGI,
+	CLIENT_DEACTIVATE_ALL_TMGI,
+	SESSION_DEACTIVATE,
+	SOCK_INFO
+} embms_action_type;
+
+/**
+ * struct tmgi_to_clnt_info_update - information for addition/deletion
+ * @multicast_addr: TMGI multicast IP to receive data
+ * @multicast_port: TMGI multicast port to receive date
+ * @client_addr: Client IPV4 address for sending data
+ * @client_port: Client port for sending data
+ * @data_port: port used to send data to client
+ * @action_type: Action to be performed
+ * @iface_name: iface to listen to for data
+ *
+ * This structure contains information as to what action
+ * needs to be performed on TMGI-client table. It is
+ * sent as a parameter during an IOCTL call
+ */
+
+struct tmgi_to_clnt_info_update {
+	u32 multicast_addr;
+	u16 multicast_port;
+	u32 client_addr;
+	u16 client_port;
+	u16 data_port;
+	u32 action_type;
+	char iface_name[EMBMS_MAX_IFACE_NAME];
+};
+
+/**
+ * struct clnt_info - contains client information
+ * @addr: Client IPV4 address for sending packets
+ * @port: Client port for sending packets
+ * @dmac: Client DMAC address
+ * @client_list_ptr : list ptr used to maintain client list
+ *
+ * This structure maintains complete client information
+ * to be used when sending packets to client
+ */
+
+struct clnt_info {
+	u32 addr;
+	u16 port;
+	u8 dmac[ETH_ALEN];
+	struct list_head client_list_ptr;
+};
+
+/**
+ * struct tmgi_to_clnt_info - contains TMGI information
+ * @tmgi_multicast_addr: TMGI IPV4 address to listen for packets
+ * @tmgi_port: Client port to listen for packets
+ * @no_of_clients: No of clients for a TMGI
+ * @client_list_head : list head for client list
+ * @tmgi_list_ptr : list ptr to maintain tmgi list
+ *
+ * This structure maintains complete client information
+ * to be used when sending data to client
+ */
+
+struct tmgi_to_clnt_info {
+	u32 tmgi_multicast_addr;
+	u16 tmgi_port;
+	u16 no_of_clients;
+	struct list_head client_list_head;
+	struct list_head tmgi_list_ptr;
+};
+
+/**
+ * struct embms_info_internal - stores module specific params
+ * @device_under_use: Used to prevent concurent access to the same device
+ * @embms_data_port: Source Data port used for tunnelled packets
+ * @embms_iface: Iface to receive embms traffic
+ * @embms_tunneling_status : Current EMBMS Status
+ * @no_of_tmgi_sessions : Number of current active TMGI sessions
+ * @lock : Lock for concurrency scenarios
+ * @ip_ident : IP identification number to be used for sent packets
+ *
+ * This tructure holds module specific information which is
+ * used throughout the module to maintain consistency
+ */
+
+struct embms_info_internal {
+	atomic_t device_under_use;
+	int embms_data_port;
+	char embms_iface[EMBMS_MAX_IFACE_NAME];
+	int embms_tunneling_status;
+	int no_of_tmgi_sessions;
+	/*lock to prevent concurrent access*/
+	spinlock_t lock;
+	atomic_t ip_ident;
+};
+
+/* This ioctl is used to add a new client entry to tunneling module.
+ * Entry params are populated in the struct used for ioctl
+ */
+
+#define ADD_EMBMS_TUNNEL _IOW(EMBMS_IOC_MAGIC, 0, \
+		struct tmgi_to_clnt_info_update)
+
+/* This ioctl is used to delete a client entry for a particular
+ * TMGI from tunneling module.
+ * Entry params are populated in the struct used for ioctl
+ */
+
+#define DEL_EMBMS_TUNNEL _IOW(EMBMS_IOC_MAGIC, 1, \
+		struct tmgi_to_clnt_info_update)
+
+/* This ioctl is used to delete a TMGI entry completely
+ * from tunneling module.
+ * Entry params are populated in the struct used for ioctl
+ */
+
+#define TMGI_DEACTIVATE _IOW(EMBMS_IOC_MAGIC, 2, \
+		struct tmgi_to_clnt_info_update)
+
+/* This ioctl is used to delete client entry completely
+ * from tunneling module.
+ * Entry params are populated in the struct used for ioctl
+ */
+
+#define CLIENT_DEACTIVATE _IOW(EMBMS_IOC_MAGIC, 3, \
+		struct tmgi_to_clnt_info_update)
+
+/* Gets the ON/OFF status of Tunneling module*/
+
+#define GET_EMBMS_TUNNELING_STATUS _IO(EMBMS_IOC_MAGIC, 4)
+
+/* Used to start tunneling. Argument is the port
+ * number to be used to send
+ * data to clients
+ */
+
+#define START_EMBMS_TUNNEL _IOW(EMBMS_IOC_MAGIC, 5, \
+		struct tmgi_to_clnt_info_update)
+
+/* Used to stop tunnleing*/
+
+#define STOP_EMBMS_TUNNEL _IO(EMBMS_IOC_MAGIC, 6)
+
+/* Return values indicating error status*/
+#define SUCCESS               0         /* Successful operation*/
+#define FAILURE               -1         /* Unsuccessful operation*/
+
+/* Error Condition Values*/
+#define ENOMEM                -2        /* Out of memory*/
+#define EBADPARAM             -3        /* Incorrect parameters passed*/
+#define ENOEFFECT             -4        /* No Effect*/
+
+#endif
+
diff --git a/net/rmnet_data/rmnet_data_config.h b/net/rmnet_data/rmnet_data_config.h
index 5ce4600..aa8a0b5 100644
--- a/net/rmnet_data/rmnet_data_config.h
+++ b/net/rmnet_data/rmnet_data_config.h
@@ -34,12 +34,14 @@
  *            parmeter depends on the rmnet_mode
  */
 struct rmnet_logical_ep_conf_s {
+	struct net_device *egress_dev;
+	struct timespec last_flush_time;
+	long curr_time_limit;
+	unsigned int flush_byte_count;
+	unsigned int curr_byte_threshold;
 	u8 refcount;
 	u8 rmnet_mode;
 	u8 mux_id;
-	struct timespec flush_time;
-	unsigned int flush_byte_count;
-	struct net_device *egress_dev;
 };
 
 /**
diff --git a/net/rmnet_data/rmnet_data_handlers.c b/net/rmnet_data/rmnet_data_handlers.c
index 68a4376..1209197 100644
--- a/net/rmnet_data/rmnet_data_handlers.c
+++ b/net/rmnet_data/rmnet_data_handlers.c
@@ -41,26 +41,30 @@
 MODULE_PARM_DESC(dump_pkt_tx, "Dump packets exiting egress handler");
 #endif /* CONFIG_RMNET_DATA_DEBUG_PKT */
 
-/* Time in nano seconds. This number must be less that a second. */
-long gro_flush_time __read_mostly = 10000L;
-module_param(gro_flush_time, long, 0644);
-MODULE_PARM_DESC(gro_flush_time, "Flush GRO when spaced more than this");
+static bool gro_flush_logic_on __read_mostly = 1;
+module_param(gro_flush_logic_on, bool, 0644);
+MODULE_PARM_DESC(gro_flush_logic_on, "If off let GRO determine flushing");
 
-unsigned int gro_min_byte_thresh __read_mostly = 7500;
-module_param(gro_min_byte_thresh, uint, 0644);
-MODULE_PARM_DESC(gro_min_byte_thresh, "Min byte thresh to change flush time");
-
-unsigned int dynamic_gro_on __read_mostly = 1;
-module_param(dynamic_gro_on, uint, 0644);
+static bool dynamic_gro_on __read_mostly = 1;
+module_param(dynamic_gro_on, bool, 0644);
 MODULE_PARM_DESC(dynamic_gro_on, "Toggle to turn on dynamic gro logic");
 
+/* Time in nano seconds. This number must be less that a second. */
+static long lower_flush_time __read_mostly = 10000L;
+module_param(lower_flush_time, long, 0644);
+MODULE_PARM_DESC(lower_flush_time, "Min time value for flushing GRO");
+
+static unsigned int lower_byte_limit __read_mostly = 7500;
+module_param(lower_byte_limit, uint, 0644);
+MODULE_PARM_DESC(lower_byte_limit, "Min byte count for flushing GRO");
+
 unsigned int upper_flush_time __read_mostly = 15000;
 module_param(upper_flush_time, uint, 0644);
-MODULE_PARM_DESC(upper_flush_time, "Upper limit on flush time");
+MODULE_PARM_DESC(upper_flush_time, "Max time value for flushing GRO");
 
 unsigned int upper_byte_limit __read_mostly = 10500;
 module_param(upper_byte_limit, uint, 0644);
-MODULE_PARM_DESC(upper_byte_limit, "Upper byte limit");
+MODULE_PARM_DESC(upper_byte_limit, "Max byte count for flushing GRO");
 
 #define RMNET_DATA_IP_VERSION_4 0x40
 #define RMNET_DATA_IP_VERSION_6 0x60
@@ -258,62 +262,64 @@
 {
 	struct timespec curr_time, diff;
 
-	if (!gro_flush_time)
+	if (!gro_flush_logic_on)
 		return;
 
-	if (unlikely(ep->flush_time.tv_sec == 0)) {
-		getnstimeofday(&ep->flush_time);
+	if (unlikely(ep->last_flush_time.tv_sec == 0)) {
+		getnstimeofday(&ep->last_flush_time);
 		ep->flush_byte_count = 0;
+		ep->curr_time_limit = lower_flush_time;
+		ep->curr_byte_threshold = lower_byte_limit;
 	} else {
 		getnstimeofday(&(curr_time));
-		diff = timespec_sub(curr_time, ep->flush_time);
+		diff = timespec_sub(curr_time, ep->last_flush_time);
 		ep->flush_byte_count += skb_size;
 
 		if (dynamic_gro_on) {
 			if ((!(diff.tv_sec > 0) || diff.tv_nsec <=
-					gro_flush_time) &&
+					ep->curr_time_limit) &&
 					ep->flush_byte_count >=
-					gro_min_byte_thresh) {
+					ep->curr_byte_threshold) {
 				/* Processed many bytes in a small time window.
 				 * No longer need to flush so often and we can
 				 * increase our byte limit
 				 */
-				gro_flush_time = upper_flush_time;
-				gro_min_byte_thresh = upper_byte_limit;
+				ep->curr_time_limit = upper_flush_time;
+				ep->curr_byte_threshold = upper_byte_limit;
 			} else if ((diff.tv_sec > 0 ||
-					diff.tv_nsec > gro_flush_time) &&
+					diff.tv_nsec > ep->curr_time_limit) &&
 					ep->flush_byte_count <
-					gro_min_byte_thresh) {
+					ep->curr_byte_threshold) {
 				/* We have not hit our time limit and we are not
 				 * receive many bytes. Demote ourselves to the
 				 * lowest limits and flush
 				 */
 				napi_gro_flush(napi, false);
-				getnstimeofday(&ep->flush_time);
+				ep->last_flush_time = curr_time;
 				ep->flush_byte_count = 0;
-				gro_flush_time = 10000L;
-				gro_min_byte_thresh = 7500L;
+				ep->curr_time_limit = lower_flush_time;
+				ep->curr_byte_threshold = lower_byte_limit;
 			} else if ((diff.tv_sec > 0 ||
-					diff.tv_nsec > gro_flush_time) &&
+					diff.tv_nsec > ep->curr_time_limit) &&
 					ep->flush_byte_count >=
-					gro_min_byte_thresh) {
+					ep->curr_byte_threshold) {
 				/* Above byte and time limt, therefore we can
 				 * move/maintain our limits to be the max
 				 * and flush
 				 */
 				napi_gro_flush(napi, false);
-				getnstimeofday(&ep->flush_time);
+				ep->last_flush_time = curr_time;
 				ep->flush_byte_count = 0;
-				gro_flush_time = upper_flush_time;
-				gro_min_byte_thresh = upper_byte_limit;
+				ep->curr_time_limit = upper_flush_time;
+				ep->curr_byte_threshold = upper_byte_limit;
 			}
 			/* else, below time limit and below
 			 * byte thresh, so change nothing
 			 */
 		} else if (diff.tv_sec > 0 ||
-				diff.tv_nsec >= gro_flush_time) {
+				diff.tv_nsec >= lower_flush_time) {
 			napi_gro_flush(napi, false);
-			getnstimeofday(&ep->flush_time);
+			ep->last_flush_time = curr_time;
 			ep->flush_byte_count = 0;
 		}
 	}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 3dd7b21..d8387b1 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -9700,6 +9700,9 @@
 		if (err)
 			return err;
 
+		if (!setup.chandef.chan)
+			return -EINVAL;
+
 		err = validate_beacon_tx_rate(rdev, setup.chandef.chan->band,
 					      &setup.beacon_rate);
 		if (err)
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index bb7f5be..d414049 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -545,11 +545,6 @@
 		return -EOPNOTSUPP;
 
 	if (wdev->current_bss) {
-		if (!prev_bssid)
-			return -EALREADY;
-		if (prev_bssid &&
-		    !ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid))
-			return -ENOTCONN;
 		cfg80211_unhold_bss(wdev->current_bss);
 		cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
 		wdev->current_bss = NULL;
@@ -1085,11 +1080,35 @@
 
 	ASSERT_WDEV_LOCK(wdev);
 
-	if (WARN_ON(wdev->connect_keys)) {
-		kzfree(wdev->connect_keys);
-		wdev->connect_keys = NULL;
+	/*
+	 * If we have an ssid_len, we're trying to connect or are
+	 * already connected, so reject a new SSID unless it's the
+	 * same (which is the case for re-association.)
+	 */
+	if (wdev->ssid_len &&
+	    (wdev->ssid_len != connect->ssid_len ||
+	     memcmp(wdev->ssid, connect->ssid, wdev->ssid_len)))
+		return -EALREADY;
+
+	/*
+	 * If connected, reject (re-)association unless prev_bssid
+	 * matches the current BSSID.
+	 */
+	if (wdev->current_bss) {
+		if (!prev_bssid)
+			return -EALREADY;
+		if (!ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid))
+			return -ENOTCONN;
 	}
 
+	/*
+	 * Reject if we're in the process of connecting with WEP,
+	 * this case isn't very interesting and trying to handle
+	 * it would make the code much more complex.
+	 */
+	if (wdev->connect_keys)
+		return -EINPROGRESS;
+
 	cfg80211_oper_and_ht_capa(&connect->ht_capa_mask,
 				  rdev->wiphy.ht_capa_mod_mask);
 
@@ -1140,7 +1159,12 @@
 
 	if (err) {
 		wdev->connect_keys = NULL;
-		wdev->ssid_len = 0;
+		/*
+		 * This could be reassoc getting refused, don't clear
+		 * ssid_len in that case.
+		 */
+		if (!wdev->current_bss)
+			wdev->ssid_len = 0;
 		return err;
 	}
 
@@ -1165,5 +1189,13 @@
 	else if (wdev->current_bss)
 		err = rdev_disconnect(rdev, dev, reason);
 
+	/*
+	 * Clear ssid_len unless we actually were fully connected,
+	 * in which case cfg80211_disconnected() will take care of
+	 * this later.
+	 */
+	if (!wdev->current_bss)
+		wdev->ssid_len = 0;
+
 	return err;
 }
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index ed4f571..2cade02 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1664,32 +1664,34 @@
 
 static int xfrm_dump_policy_done(struct netlink_callback *cb)
 {
-	struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
+	struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
 	struct net *net = sock_net(cb->skb->sk);
 
 	xfrm_policy_walk_done(walk, net);
 	return 0;
 }
 
+static int xfrm_dump_policy_start(struct netlink_callback *cb)
+{
+	struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
+
+	BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args));
+
+	xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
+	return 0;
+}
+
 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
 {
 	struct net *net = sock_net(skb->sk);
-	struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
+	struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
 	struct xfrm_dump_info info;
 
-	BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) >
-		     sizeof(cb->args) - sizeof(cb->args[0]));
-
 	info.in_skb = cb->skb;
 	info.out_skb = skb;
 	info.nlmsg_seq = cb->nlh->nlmsg_seq;
 	info.nlmsg_flags = NLM_F_MULTI;
 
-	if (!cb->args[0]) {
-		cb->args[0] = 1;
-		xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
-	}
-
 	(void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
 
 	return skb->len;
@@ -2437,6 +2439,7 @@
 
 static const struct xfrm_link {
 	int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
+	int (*start)(struct netlink_callback *);
 	int (*dump)(struct sk_buff *, struct netlink_callback *);
 	int (*done)(struct netlink_callback *);
 	const struct nla_policy *nla_pol;
@@ -2450,6 +2453,7 @@
 	[XFRM_MSG_NEWPOLICY   - XFRM_MSG_BASE] = { .doit = xfrm_add_policy    },
 	[XFRM_MSG_DELPOLICY   - XFRM_MSG_BASE] = { .doit = xfrm_get_policy    },
 	[XFRM_MSG_GETPOLICY   - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
+						   .start = xfrm_dump_policy_start,
 						   .dump = xfrm_dump_policy,
 						   .done = xfrm_dump_policy_done },
 	[XFRM_MSG_ALLOCSPI    - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
@@ -2501,6 +2505,7 @@
 
 		{
 			struct netlink_dump_control c = {
+				.start = link->start,
 				.dump = link->dump,
 				.done = link->done,
 			};
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 6f337f0..fe1d06d 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -329,6 +329,7 @@
 		break;
 	case 0x10ec0225:
 	case 0x10ec0233:
+	case 0x10ec0236:
 	case 0x10ec0255:
 	case 0x10ec0256:
 	case 0x10ec0282:
@@ -909,6 +910,7 @@
 	{ 0x10ec0275, 0x1028, 0, "ALC3260" },
 	{ 0x10ec0899, 0x1028, 0, "ALC3861" },
 	{ 0x10ec0298, 0x1028, 0, "ALC3266" },
+	{ 0x10ec0236, 0x1028, 0, "ALC3204" },
 	{ 0x10ec0256, 0x1028, 0, "ALC3246" },
 	{ 0x10ec0225, 0x1028, 0, "ALC3253" },
 	{ 0x10ec0295, 0x1028, 0, "ALC3254" },
@@ -3694,6 +3696,7 @@
 		alc_process_coef_fw(codec, coef0255_1);
 		alc_process_coef_fw(codec, coef0255);
 		break;
+	case 0x10ec0236:
 	case 0x10ec0256:
 		alc_process_coef_fw(codec, coef0256);
 		alc_process_coef_fw(codec, coef0255);
@@ -3777,6 +3780,7 @@
 
 
 	switch (codec->core.vendor_id) {
+	case 0x10ec0236:
 	case 0x10ec0255:
 	case 0x10ec0256:
 		alc_write_coef_idx(codec, 0x45, 0xc489);
@@ -3885,6 +3889,7 @@
 	case 0x10ec0295:
 		alc_process_coef_fw(codec, coef0225);
 		break;
+	case 0x10ec0236:
 	case 0x10ec0255:
 	case 0x10ec0256:
 		alc_process_coef_fw(codec, coef0255);
@@ -3971,6 +3976,7 @@
 	case 0x10ec0255:
 		alc_process_coef_fw(codec, coef0255);
 		break;
+	case 0x10ec0236:
 	case 0x10ec0256:
 		alc_process_coef_fw(codec, coef0256);
 		break;
@@ -4064,6 +4070,7 @@
 	case 0x10ec0255:
 		alc_process_coef_fw(codec, coef0255);
 		break;
+	case 0x10ec0236:
 	case 0x10ec0256:
 		alc_process_coef_fw(codec, coef0256);
 		break;
@@ -4131,6 +4138,7 @@
 	};
 
 	switch (codec->core.vendor_id) {
+	case 0x10ec0236:
 	case 0x10ec0255:
 	case 0x10ec0256:
 		alc_process_coef_fw(codec, coef0255);
@@ -4335,6 +4343,7 @@
 	case 0x10ec0255:
 		alc_process_coef_fw(codec, alc255fw);
 		break;
+	case 0x10ec0236:
 	case 0x10ec0256:
 		alc_process_coef_fw(codec, alc256fw);
 		break;
@@ -5852,6 +5861,14 @@
 		ALC225_STANDARD_PINS,
 		{0x12, 0xb7a60130},
 		{0x1b, 0x90170110}),
+	SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+		{0x12, 0x90a60140},
+		{0x14, 0x90170110},
+		{0x21, 0x02211020}),
+	SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+		{0x12, 0x90a60140},
+		{0x14, 0x90170150},
+		{0x21, 0x02211020}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
 		{0x14, 0x90170110},
 		{0x21, 0x02211020}),
@@ -6226,6 +6243,7 @@
 	case 0x10ec0255:
 		spec->codec_variant = ALC269_TYPE_ALC255;
 		break;
+	case 0x10ec0236:
 	case 0x10ec0256:
 		spec->codec_variant = ALC269_TYPE_ALC256;
 		spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
@@ -7205,6 +7223,7 @@
 	HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269),
+	HDA_CODEC_ENTRY(0x10ec0236, "ALC236", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269),
 	HDA_CODEC_ENTRY(0x10ec0260, "ALC260", patch_alc260),